1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  */
26 
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/intel-iommu.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/dma-resv.h>
34 #include <linux/slab.h>
35 
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_damage_helper.h>
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_edid.h>
42 #include <drm/drm_fourcc.h>
43 #include <drm/drm_plane_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/drm_rect.h>
46 
47 #include "display/intel_audio.h"
48 #include "display/intel_crt.h"
49 #include "display/intel_ddi.h"
50 #include "display/intel_display_debugfs.h"
51 #include "display/intel_dp.h"
52 #include "display/intel_dp_mst.h"
53 #include "display/intel_dpll.h"
54 #include "display/intel_dpll_mgr.h"
55 #include "display/intel_dsi.h"
56 #include "display/intel_dvo.h"
57 #include "display/intel_fb.h"
58 #include "display/intel_gmbus.h"
59 #include "display/intel_hdmi.h"
60 #include "display/intel_lvds.h"
61 #include "display/intel_sdvo.h"
62 #include "display/intel_snps_phy.h"
63 #include "display/intel_tv.h"
64 #include "display/intel_vdsc.h"
65 #include "display/intel_vrr.h"
66 
67 #include "gem/i915_gem_lmem.h"
68 #include "gem/i915_gem_object.h"
69 
70 #include "gt/intel_rps.h"
71 #include "gt/gen8_ppgtt.h"
72 
73 #include "g4x_dp.h"
74 #include "g4x_hdmi.h"
75 #include "i915_drv.h"
76 #include "intel_acpi.h"
77 #include "intel_atomic.h"
78 #include "intel_atomic_plane.h"
79 #include "intel_bw.h"
80 #include "intel_cdclk.h"
81 #include "intel_color.h"
82 #include "intel_crtc.h"
83 #include "intel_de.h"
84 #include "intel_display_types.h"
85 #include "intel_dmc.h"
86 #include "intel_dp_link_training.h"
87 #include "intel_fbc.h"
88 #include "intel_fdi.h"
89 #include "intel_fbdev.h"
90 #include "intel_fifo_underrun.h"
91 #include "intel_frontbuffer.h"
92 #include "intel_hdcp.h"
93 #include "intel_hotplug.h"
94 #include "intel_overlay.h"
95 #include "intel_pipe_crc.h"
96 #include "intel_pm.h"
97 #include "intel_pps.h"
98 #include "intel_psr.h"
99 #include "intel_quirks.h"
100 #include "intel_sideband.h"
101 #include "intel_sprite.h"
102 #include "intel_tc.h"
103 #include "intel_vga.h"
104 #include "i9xx_plane.h"
105 #include "skl_scaler.h"
106 #include "skl_universal_plane.h"
107 
108 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
109 				struct intel_crtc_state *pipe_config);
110 static void ilk_pch_clock_get(struct intel_crtc *crtc,
111 			      struct intel_crtc_state *pipe_config);
112 
113 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
114 				  struct drm_i915_gem_object *obj,
115 				  struct drm_mode_fb_cmd2 *mode_cmd);
116 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
117 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
118 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
119 					 const struct intel_link_m_n *m_n,
120 					 const struct intel_link_m_n *m2_n2);
121 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
122 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
123 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
124 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
125 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
126 static void intel_modeset_setup_hw_state(struct drm_device *dev,
127 					 struct drm_modeset_acquire_ctx *ctx);
128 
129 struct i915_dpt {
130 	struct i915_address_space vm;
131 
132 	struct drm_i915_gem_object *obj;
133 	struct i915_vma *vma;
134 	void __iomem *iomem;
135 };
136 
137 #define i915_is_dpt(vm) ((vm)->is_dpt)
138 
139 static inline struct i915_dpt *
140 i915_vm_to_dpt(struct i915_address_space *vm)
141 {
142 	BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
143 	GEM_BUG_ON(!i915_is_dpt(vm));
144 	return container_of(vm, struct i915_dpt, vm);
145 }
146 
147 #define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
148 
149 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
150 {
151 	writeq(pte, addr);
152 }
153 
154 static void dpt_insert_page(struct i915_address_space *vm,
155 			    dma_addr_t addr,
156 			    u64 offset,
157 			    enum i915_cache_level level,
158 			    u32 flags)
159 {
160 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
161 	gen8_pte_t __iomem *base = dpt->iomem;
162 
163 	gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
164 		     vm->pte_encode(addr, level, flags));
165 }
166 
167 static void dpt_insert_entries(struct i915_address_space *vm,
168 			       struct i915_vma *vma,
169 			       enum i915_cache_level level,
170 			       u32 flags)
171 {
172 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
173 	gen8_pte_t __iomem *base = dpt->iomem;
174 	const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags);
175 	struct sgt_iter sgt_iter;
176 	dma_addr_t addr;
177 	int i;
178 
179 	/*
180 	 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
181 	 * not to allow the user to override access to a read only page.
182 	 */
183 
184 	i = vma->node.start / I915_GTT_PAGE_SIZE;
185 	for_each_sgt_daddr(addr, sgt_iter, vma->pages)
186 		gen8_set_pte(&base[i++], pte_encode | addr);
187 }
188 
189 static void dpt_clear_range(struct i915_address_space *vm,
190 			    u64 start, u64 length)
191 {
192 }
193 
194 static void dpt_bind_vma(struct i915_address_space *vm,
195 			 struct i915_vm_pt_stash *stash,
196 			 struct i915_vma *vma,
197 			 enum i915_cache_level cache_level,
198 			 u32 flags)
199 {
200 	struct drm_i915_gem_object *obj = vma->obj;
201 	u32 pte_flags;
202 
203 	/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
204 	pte_flags = 0;
205 	if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj))
206 		pte_flags |= PTE_READ_ONLY;
207 	if (i915_gem_object_is_lmem(obj))
208 		pte_flags |= PTE_LM;
209 
210 	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
211 
212 	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
213 
214 	/*
215 	 * Without aliasing PPGTT there's no difference between
216 	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
217 	 * upgrade to both bound if we bind either to avoid double-binding.
218 	 */
219 	atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
220 }
221 
222 static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
223 {
224 	vm->clear_range(vm, vma->node.start, vma->size);
225 }
226 
227 static void dpt_cleanup(struct i915_address_space *vm)
228 {
229 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
230 
231 	i915_gem_object_put(dpt->obj);
232 }
233 
234 static struct i915_address_space *
235 intel_dpt_create(struct intel_framebuffer *fb)
236 {
237 	struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base;
238 	struct drm_i915_private *i915 = to_i915(obj->dev);
239 	struct drm_i915_gem_object *dpt_obj;
240 	struct i915_address_space *vm;
241 	struct i915_dpt *dpt;
242 	size_t size;
243 	int ret;
244 
245 	if (intel_fb_needs_pot_stride_remap(fb))
246 		size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
247 	else
248 		size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
249 
250 	size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
251 
252 	if (HAS_LMEM(i915))
253 		dpt_obj = i915_gem_object_create_lmem(i915, size, 0);
254 	else
255 		dpt_obj = i915_gem_object_create_stolen(i915, size);
256 	if (IS_ERR(dpt_obj))
257 		return ERR_CAST(dpt_obj);
258 
259 	ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
260 	if (ret) {
261 		i915_gem_object_put(dpt_obj);
262 		return ERR_PTR(ret);
263 	}
264 
265 	dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
266 	if (!dpt) {
267 		i915_gem_object_put(dpt_obj);
268 		return ERR_PTR(-ENOMEM);
269 	}
270 
271 	vm = &dpt->vm;
272 
273 	vm->gt = &i915->gt;
274 	vm->i915 = i915;
275 	vm->dma = i915->drm.dev;
276 	vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
277 	vm->is_dpt = true;
278 
279 	i915_address_space_init(vm, VM_CLASS_DPT);
280 
281 	vm->insert_page = dpt_insert_page;
282 	vm->clear_range = dpt_clear_range;
283 	vm->insert_entries = dpt_insert_entries;
284 	vm->cleanup = dpt_cleanup;
285 
286 	vm->vma_ops.bind_vma    = dpt_bind_vma;
287 	vm->vma_ops.unbind_vma  = dpt_unbind_vma;
288 	vm->vma_ops.set_pages   = ggtt_set_pages;
289 	vm->vma_ops.clear_pages = clear_pages;
290 
291 	vm->pte_encode = gen8_ggtt_pte_encode;
292 
293 	dpt->obj = dpt_obj;
294 
295 	return &dpt->vm;
296 }
297 
298 static void intel_dpt_destroy(struct i915_address_space *vm)
299 {
300 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
301 
302 	i915_vm_close(&dpt->vm);
303 }
304 
305 /* returns HPLL frequency in kHz */
306 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
307 {
308 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
309 
310 	/* Obtain SKU information */
311 	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
312 		CCK_FUSE_HPLL_FREQ_MASK;
313 
314 	return vco_freq[hpll_freq] * 1000;
315 }
316 
317 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
318 		      const char *name, u32 reg, int ref_freq)
319 {
320 	u32 val;
321 	int divider;
322 
323 	val = vlv_cck_read(dev_priv, reg);
324 	divider = val & CCK_FREQUENCY_VALUES;
325 
326 	drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
327 		 (divider << CCK_FREQUENCY_STATUS_SHIFT),
328 		 "%s change in progress\n", name);
329 
330 	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
331 }
332 
333 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
334 			   const char *name, u32 reg)
335 {
336 	int hpll;
337 
338 	vlv_cck_get(dev_priv);
339 
340 	if (dev_priv->hpll_freq == 0)
341 		dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
342 
343 	hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
344 
345 	vlv_cck_put(dev_priv);
346 
347 	return hpll;
348 }
349 
350 static void intel_update_czclk(struct drm_i915_private *dev_priv)
351 {
352 	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
353 		return;
354 
355 	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
356 						      CCK_CZ_CLOCK_CONTROL);
357 
358 	drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
359 		dev_priv->czclk_freq);
360 }
361 
362 /* WA Display #0827: Gen9:all */
363 static void
364 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
365 {
366 	if (enable)
367 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
368 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
369 	else
370 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
371 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
372 }
373 
374 /* Wa_2006604312:icl,ehl */
375 static void
376 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
377 		       bool enable)
378 {
379 	if (enable)
380 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
381 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
382 	else
383 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
384 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
385 }
386 
387 static bool
388 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
389 {
390 	return crtc_state->master_transcoder != INVALID_TRANSCODER;
391 }
392 
393 static bool
394 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
395 {
396 	return crtc_state->sync_mode_slaves_mask != 0;
397 }
398 
399 bool
400 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
401 {
402 	return is_trans_port_sync_master(crtc_state) ||
403 		is_trans_port_sync_slave(crtc_state);
404 }
405 
406 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
407 				    enum pipe pipe)
408 {
409 	i915_reg_t reg = PIPEDSL(pipe);
410 	u32 line1, line2;
411 	u32 line_mask;
412 
413 	if (DISPLAY_VER(dev_priv) == 2)
414 		line_mask = DSL_LINEMASK_GEN2;
415 	else
416 		line_mask = DSL_LINEMASK_GEN3;
417 
418 	line1 = intel_de_read(dev_priv, reg) & line_mask;
419 	msleep(5);
420 	line2 = intel_de_read(dev_priv, reg) & line_mask;
421 
422 	return line1 != line2;
423 }
424 
425 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
426 {
427 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
428 	enum pipe pipe = crtc->pipe;
429 
430 	/* Wait for the display line to settle/start moving */
431 	if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
432 		drm_err(&dev_priv->drm,
433 			"pipe %c scanline %s wait timed out\n",
434 			pipe_name(pipe), onoff(state));
435 }
436 
437 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
438 {
439 	wait_for_pipe_scanline_moving(crtc, false);
440 }
441 
442 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
443 {
444 	wait_for_pipe_scanline_moving(crtc, true);
445 }
446 
447 static void
448 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
449 {
450 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
451 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
452 
453 	if (DISPLAY_VER(dev_priv) >= 4) {
454 		enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
455 		i915_reg_t reg = PIPECONF(cpu_transcoder);
456 
457 		/* Wait for the Pipe State to go off */
458 		if (intel_de_wait_for_clear(dev_priv, reg,
459 					    I965_PIPECONF_ACTIVE, 100))
460 			drm_WARN(&dev_priv->drm, 1,
461 				 "pipe_off wait timed out\n");
462 	} else {
463 		intel_wait_for_pipe_scanline_stopped(crtc);
464 	}
465 }
466 
467 /* Only for pre-ILK configs */
468 void assert_pll(struct drm_i915_private *dev_priv,
469 		enum pipe pipe, bool state)
470 {
471 	u32 val;
472 	bool cur_state;
473 
474 	val = intel_de_read(dev_priv, DPLL(pipe));
475 	cur_state = !!(val & DPLL_VCO_ENABLE);
476 	I915_STATE_WARN(cur_state != state,
477 	     "PLL state assertion failure (expected %s, current %s)\n",
478 			onoff(state), onoff(cur_state));
479 }
480 
481 /* XXX: the dsi pll is shared between MIPI DSI ports */
482 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
483 {
484 	u32 val;
485 	bool cur_state;
486 
487 	vlv_cck_get(dev_priv);
488 	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
489 	vlv_cck_put(dev_priv);
490 
491 	cur_state = val & DSI_PLL_VCO_EN;
492 	I915_STATE_WARN(cur_state != state,
493 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
494 			onoff(state), onoff(cur_state));
495 }
496 
497 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
498 			  enum pipe pipe, bool state)
499 {
500 	bool cur_state;
501 
502 	if (HAS_DDI(dev_priv)) {
503 		/*
504 		 * DDI does not have a specific FDI_TX register.
505 		 *
506 		 * FDI is never fed from EDP transcoder
507 		 * so pipe->transcoder cast is fine here.
508 		 */
509 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
510 		u32 val = intel_de_read(dev_priv,
511 					TRANS_DDI_FUNC_CTL(cpu_transcoder));
512 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
513 	} else {
514 		u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
515 		cur_state = !!(val & FDI_TX_ENABLE);
516 	}
517 	I915_STATE_WARN(cur_state != state,
518 	     "FDI TX state assertion failure (expected %s, current %s)\n",
519 			onoff(state), onoff(cur_state));
520 }
521 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
522 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
523 
524 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
525 			  enum pipe pipe, bool state)
526 {
527 	u32 val;
528 	bool cur_state;
529 
530 	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
531 	cur_state = !!(val & FDI_RX_ENABLE);
532 	I915_STATE_WARN(cur_state != state,
533 	     "FDI RX state assertion failure (expected %s, current %s)\n",
534 			onoff(state), onoff(cur_state));
535 }
536 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
537 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
538 
539 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
540 				      enum pipe pipe)
541 {
542 	u32 val;
543 
544 	/* ILK FDI PLL is always enabled */
545 	if (IS_IRONLAKE(dev_priv))
546 		return;
547 
548 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
549 	if (HAS_DDI(dev_priv))
550 		return;
551 
552 	val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
553 	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
554 }
555 
556 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
557 		       enum pipe pipe, bool state)
558 {
559 	u32 val;
560 	bool cur_state;
561 
562 	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
563 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
564 	I915_STATE_WARN(cur_state != state,
565 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
566 			onoff(state), onoff(cur_state));
567 }
568 
569 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
570 {
571 	i915_reg_t pp_reg;
572 	u32 val;
573 	enum pipe panel_pipe = INVALID_PIPE;
574 	bool locked = true;
575 
576 	if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
577 		return;
578 
579 	if (HAS_PCH_SPLIT(dev_priv)) {
580 		u32 port_sel;
581 
582 		pp_reg = PP_CONTROL(0);
583 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
584 
585 		switch (port_sel) {
586 		case PANEL_PORT_SELECT_LVDS:
587 			intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
588 			break;
589 		case PANEL_PORT_SELECT_DPA:
590 			g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
591 			break;
592 		case PANEL_PORT_SELECT_DPC:
593 			g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
594 			break;
595 		case PANEL_PORT_SELECT_DPD:
596 			g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
597 			break;
598 		default:
599 			MISSING_CASE(port_sel);
600 			break;
601 		}
602 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
603 		/* presumably write lock depends on pipe, not port select */
604 		pp_reg = PP_CONTROL(pipe);
605 		panel_pipe = pipe;
606 	} else {
607 		u32 port_sel;
608 
609 		pp_reg = PP_CONTROL(0);
610 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
611 
612 		drm_WARN_ON(&dev_priv->drm,
613 			    port_sel != PANEL_PORT_SELECT_LVDS);
614 		intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
615 	}
616 
617 	val = intel_de_read(dev_priv, pp_reg);
618 	if (!(val & PANEL_POWER_ON) ||
619 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
620 		locked = false;
621 
622 	I915_STATE_WARN(panel_pipe == pipe && locked,
623 	     "panel assertion failure, pipe %c regs locked\n",
624 	     pipe_name(pipe));
625 }
626 
627 void assert_pipe(struct drm_i915_private *dev_priv,
628 		 enum transcoder cpu_transcoder, bool state)
629 {
630 	bool cur_state;
631 	enum intel_display_power_domain power_domain;
632 	intel_wakeref_t wakeref;
633 
634 	/* we keep both pipes enabled on 830 */
635 	if (IS_I830(dev_priv))
636 		state = true;
637 
638 	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
639 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
640 	if (wakeref) {
641 		u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
642 		cur_state = !!(val & PIPECONF_ENABLE);
643 
644 		intel_display_power_put(dev_priv, power_domain, wakeref);
645 	} else {
646 		cur_state = false;
647 	}
648 
649 	I915_STATE_WARN(cur_state != state,
650 			"transcoder %s assertion failure (expected %s, current %s)\n",
651 			transcoder_name(cpu_transcoder),
652 			onoff(state), onoff(cur_state));
653 }
654 
655 static void assert_plane(struct intel_plane *plane, bool state)
656 {
657 	enum pipe pipe;
658 	bool cur_state;
659 
660 	cur_state = plane->get_hw_state(plane, &pipe);
661 
662 	I915_STATE_WARN(cur_state != state,
663 			"%s assertion failure (expected %s, current %s)\n",
664 			plane->base.name, onoff(state), onoff(cur_state));
665 }
666 
667 #define assert_plane_enabled(p) assert_plane(p, true)
668 #define assert_plane_disabled(p) assert_plane(p, false)
669 
670 static void assert_planes_disabled(struct intel_crtc *crtc)
671 {
672 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
673 	struct intel_plane *plane;
674 
675 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
676 		assert_plane_disabled(plane);
677 }
678 
679 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
680 				    enum pipe pipe)
681 {
682 	u32 val;
683 	bool enabled;
684 
685 	val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
686 	enabled = !!(val & TRANS_ENABLE);
687 	I915_STATE_WARN(enabled,
688 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
689 	     pipe_name(pipe));
690 }
691 
692 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
693 				   enum pipe pipe, enum port port,
694 				   i915_reg_t dp_reg)
695 {
696 	enum pipe port_pipe;
697 	bool state;
698 
699 	state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
700 
701 	I915_STATE_WARN(state && port_pipe == pipe,
702 			"PCH DP %c enabled on transcoder %c, should be disabled\n",
703 			port_name(port), pipe_name(pipe));
704 
705 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
706 			"IBX PCH DP %c still using transcoder B\n",
707 			port_name(port));
708 }
709 
710 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
711 				     enum pipe pipe, enum port port,
712 				     i915_reg_t hdmi_reg)
713 {
714 	enum pipe port_pipe;
715 	bool state;
716 
717 	state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
718 
719 	I915_STATE_WARN(state && port_pipe == pipe,
720 			"PCH HDMI %c enabled on transcoder %c, should be disabled\n",
721 			port_name(port), pipe_name(pipe));
722 
723 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
724 			"IBX PCH HDMI %c still using transcoder B\n",
725 			port_name(port));
726 }
727 
728 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
729 				      enum pipe pipe)
730 {
731 	enum pipe port_pipe;
732 
733 	assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
734 	assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
735 	assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
736 
737 	I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
738 			port_pipe == pipe,
739 			"PCH VGA enabled on transcoder %c, should be disabled\n",
740 			pipe_name(pipe));
741 
742 	I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
743 			port_pipe == pipe,
744 			"PCH LVDS enabled on transcoder %c, should be disabled\n",
745 			pipe_name(pipe));
746 
747 	/* PCH SDVOB multiplex with HDMIB */
748 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
749 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
750 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
751 }
752 
753 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
754 			 struct intel_digital_port *dig_port,
755 			 unsigned int expected_mask)
756 {
757 	u32 port_mask;
758 	i915_reg_t dpll_reg;
759 
760 	switch (dig_port->base.port) {
761 	case PORT_B:
762 		port_mask = DPLL_PORTB_READY_MASK;
763 		dpll_reg = DPLL(0);
764 		break;
765 	case PORT_C:
766 		port_mask = DPLL_PORTC_READY_MASK;
767 		dpll_reg = DPLL(0);
768 		expected_mask <<= 4;
769 		break;
770 	case PORT_D:
771 		port_mask = DPLL_PORTD_READY_MASK;
772 		dpll_reg = DPIO_PHY_STATUS;
773 		break;
774 	default:
775 		BUG();
776 	}
777 
778 	if (intel_de_wait_for_register(dev_priv, dpll_reg,
779 				       port_mask, expected_mask, 1000))
780 		drm_WARN(&dev_priv->drm, 1,
781 			 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
782 			 dig_port->base.base.base.id, dig_port->base.base.name,
783 			 intel_de_read(dev_priv, dpll_reg) & port_mask,
784 			 expected_mask);
785 }
786 
787 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
788 {
789 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
790 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
791 	enum pipe pipe = crtc->pipe;
792 	i915_reg_t reg;
793 	u32 val, pipeconf_val;
794 
795 	/* Make sure PCH DPLL is enabled */
796 	assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
797 
798 	/* FDI must be feeding us bits for PCH ports */
799 	assert_fdi_tx_enabled(dev_priv, pipe);
800 	assert_fdi_rx_enabled(dev_priv, pipe);
801 
802 	if (HAS_PCH_CPT(dev_priv)) {
803 		reg = TRANS_CHICKEN2(pipe);
804 		val = intel_de_read(dev_priv, reg);
805 		/*
806 		 * Workaround: Set the timing override bit
807 		 * before enabling the pch transcoder.
808 		 */
809 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
810 		/* Configure frame start delay to match the CPU */
811 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
812 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
813 		intel_de_write(dev_priv, reg, val);
814 	}
815 
816 	reg = PCH_TRANSCONF(pipe);
817 	val = intel_de_read(dev_priv, reg);
818 	pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
819 
820 	if (HAS_PCH_IBX(dev_priv)) {
821 		/* Configure frame start delay to match the CPU */
822 		val &= ~TRANS_FRAME_START_DELAY_MASK;
823 		val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
824 
825 		/*
826 		 * Make the BPC in transcoder be consistent with
827 		 * that in pipeconf reg. For HDMI we must use 8bpc
828 		 * here for both 8bpc and 12bpc.
829 		 */
830 		val &= ~PIPECONF_BPC_MASK;
831 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
832 			val |= PIPECONF_8BPC;
833 		else
834 			val |= pipeconf_val & PIPECONF_BPC_MASK;
835 	}
836 
837 	val &= ~TRANS_INTERLACE_MASK;
838 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
839 		if (HAS_PCH_IBX(dev_priv) &&
840 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
841 			val |= TRANS_LEGACY_INTERLACED_ILK;
842 		else
843 			val |= TRANS_INTERLACED;
844 	} else {
845 		val |= TRANS_PROGRESSIVE;
846 	}
847 
848 	intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
849 	if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
850 		drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
851 			pipe_name(pipe));
852 }
853 
854 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
855 				      enum transcoder cpu_transcoder)
856 {
857 	u32 val, pipeconf_val;
858 
859 	/* FDI must be feeding us bits for PCH ports */
860 	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
861 	assert_fdi_rx_enabled(dev_priv, PIPE_A);
862 
863 	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
864 	/* Workaround: set timing override bit. */
865 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
866 	/* Configure frame start delay to match the CPU */
867 	val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
868 	val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
869 	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
870 
871 	val = TRANS_ENABLE;
872 	pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
873 
874 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
875 	    PIPECONF_INTERLACED_ILK)
876 		val |= TRANS_INTERLACED;
877 	else
878 		val |= TRANS_PROGRESSIVE;
879 
880 	intel_de_write(dev_priv, LPT_TRANSCONF, val);
881 	if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
882 				  TRANS_STATE_ENABLE, 100))
883 		drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
884 }
885 
886 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
887 				       enum pipe pipe)
888 {
889 	i915_reg_t reg;
890 	u32 val;
891 
892 	/* FDI relies on the transcoder */
893 	assert_fdi_tx_disabled(dev_priv, pipe);
894 	assert_fdi_rx_disabled(dev_priv, pipe);
895 
896 	/* Ports must be off as well */
897 	assert_pch_ports_disabled(dev_priv, pipe);
898 
899 	reg = PCH_TRANSCONF(pipe);
900 	val = intel_de_read(dev_priv, reg);
901 	val &= ~TRANS_ENABLE;
902 	intel_de_write(dev_priv, reg, val);
903 	/* wait for PCH transcoder off, transcoder state */
904 	if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
905 		drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
906 			pipe_name(pipe));
907 
908 	if (HAS_PCH_CPT(dev_priv)) {
909 		/* Workaround: Clear the timing override chicken bit again. */
910 		reg = TRANS_CHICKEN2(pipe);
911 		val = intel_de_read(dev_priv, reg);
912 		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
913 		intel_de_write(dev_priv, reg, val);
914 	}
915 }
916 
917 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
918 {
919 	u32 val;
920 
921 	val = intel_de_read(dev_priv, LPT_TRANSCONF);
922 	val &= ~TRANS_ENABLE;
923 	intel_de_write(dev_priv, LPT_TRANSCONF, val);
924 	/* wait for PCH transcoder off, transcoder state */
925 	if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
926 				    TRANS_STATE_ENABLE, 50))
927 		drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
928 
929 	/* Workaround: clear timing override bit. */
930 	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
931 	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
932 	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
933 }
934 
935 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
936 {
937 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
938 
939 	if (HAS_PCH_LPT(dev_priv))
940 		return PIPE_A;
941 	else
942 		return crtc->pipe;
943 }
944 
945 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
946 {
947 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
948 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
949 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
950 	enum pipe pipe = crtc->pipe;
951 	i915_reg_t reg;
952 	u32 val;
953 
954 	drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
955 
956 	assert_planes_disabled(crtc);
957 
958 	/*
959 	 * A pipe without a PLL won't actually be able to drive bits from
960 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
961 	 * need the check.
962 	 */
963 	if (HAS_GMCH(dev_priv)) {
964 		if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
965 			assert_dsi_pll_enabled(dev_priv);
966 		else
967 			assert_pll_enabled(dev_priv, pipe);
968 	} else {
969 		if (new_crtc_state->has_pch_encoder) {
970 			/* if driving the PCH, we need FDI enabled */
971 			assert_fdi_rx_pll_enabled(dev_priv,
972 						  intel_crtc_pch_transcoder(crtc));
973 			assert_fdi_tx_pll_enabled(dev_priv,
974 						  (enum pipe) cpu_transcoder);
975 		}
976 		/* FIXME: assert CPU port conditions for SNB+ */
977 	}
978 
979 	/* Wa_22012358565:adl-p */
980 	if (DISPLAY_VER(dev_priv) == 13)
981 		intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
982 			     0, PIPE_ARB_USE_PROG_SLOTS);
983 
984 	reg = PIPECONF(cpu_transcoder);
985 	val = intel_de_read(dev_priv, reg);
986 	if (val & PIPECONF_ENABLE) {
987 		/* we keep both pipes enabled on 830 */
988 		drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
989 		return;
990 	}
991 
992 	intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
993 	intel_de_posting_read(dev_priv, reg);
994 
995 	/*
996 	 * Until the pipe starts PIPEDSL reads will return a stale value,
997 	 * which causes an apparent vblank timestamp jump when PIPEDSL
998 	 * resets to its proper value. That also messes up the frame count
999 	 * when it's derived from the timestamps. So let's wait for the
1000 	 * pipe to start properly before we call drm_crtc_vblank_on()
1001 	 */
1002 	if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1003 		intel_wait_for_pipe_scanline_moving(crtc);
1004 }
1005 
1006 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1007 {
1008 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1009 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1010 	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1011 	enum pipe pipe = crtc->pipe;
1012 	i915_reg_t reg;
1013 	u32 val;
1014 
1015 	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
1016 
1017 	/*
1018 	 * Make sure planes won't keep trying to pump pixels to us,
1019 	 * or we might hang the display.
1020 	 */
1021 	assert_planes_disabled(crtc);
1022 
1023 	reg = PIPECONF(cpu_transcoder);
1024 	val = intel_de_read(dev_priv, reg);
1025 	if ((val & PIPECONF_ENABLE) == 0)
1026 		return;
1027 
1028 	/*
1029 	 * Double wide has implications for planes
1030 	 * so best keep it disabled when not needed.
1031 	 */
1032 	if (old_crtc_state->double_wide)
1033 		val &= ~PIPECONF_DOUBLE_WIDE;
1034 
1035 	/* Don't disable pipe or pipe PLLs if needed */
1036 	if (!IS_I830(dev_priv))
1037 		val &= ~PIPECONF_ENABLE;
1038 
1039 	if (DISPLAY_VER(dev_priv) >= 12)
1040 		intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
1041 			     FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
1042 
1043 	intel_de_write(dev_priv, reg, val);
1044 	if ((val & PIPECONF_ENABLE) == 0)
1045 		intel_wait_for_pipe_off(old_crtc_state);
1046 }
1047 
1048 bool
1049 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
1050 				    u64 modifier)
1051 {
1052 	return info->is_yuv &&
1053 	       info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
1054 }
1055 
1056 unsigned int
1057 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1058 {
1059 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
1060 	unsigned int cpp = fb->format->cpp[color_plane];
1061 
1062 	switch (fb->modifier) {
1063 	case DRM_FORMAT_MOD_LINEAR:
1064 		return intel_tile_size(dev_priv);
1065 	case I915_FORMAT_MOD_X_TILED:
1066 		if (DISPLAY_VER(dev_priv) == 2)
1067 			return 128;
1068 		else
1069 			return 512;
1070 	case I915_FORMAT_MOD_Y_TILED_CCS:
1071 		if (is_ccs_plane(fb, color_plane))
1072 			return 128;
1073 		fallthrough;
1074 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1075 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1076 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1077 		if (is_ccs_plane(fb, color_plane))
1078 			return 64;
1079 		fallthrough;
1080 	case I915_FORMAT_MOD_Y_TILED:
1081 		if (DISPLAY_VER(dev_priv) == 2 || HAS_128_BYTE_Y_TILING(dev_priv))
1082 			return 128;
1083 		else
1084 			return 512;
1085 	case I915_FORMAT_MOD_Yf_TILED_CCS:
1086 		if (is_ccs_plane(fb, color_plane))
1087 			return 128;
1088 		fallthrough;
1089 	case I915_FORMAT_MOD_Yf_TILED:
1090 		switch (cpp) {
1091 		case 1:
1092 			return 64;
1093 		case 2:
1094 		case 4:
1095 			return 128;
1096 		case 8:
1097 		case 16:
1098 			return 256;
1099 		default:
1100 			MISSING_CASE(cpp);
1101 			return cpp;
1102 		}
1103 		break;
1104 	default:
1105 		MISSING_CASE(fb->modifier);
1106 		return cpp;
1107 	}
1108 }
1109 
1110 unsigned int
1111 intel_fb_align_height(const struct drm_framebuffer *fb,
1112 		      int color_plane, unsigned int height)
1113 {
1114 	unsigned int tile_height = intel_tile_height(fb, color_plane);
1115 
1116 	return ALIGN(height, tile_height);
1117 }
1118 
1119 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1120 {
1121 	unsigned int size = 0;
1122 	int i;
1123 
1124 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1125 		size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
1126 
1127 	return size;
1128 }
1129 
1130 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
1131 {
1132 	unsigned int size = 0;
1133 	int i;
1134 
1135 	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
1136 		size += rem_info->plane[i].dst_stride * rem_info->plane[i].height;
1137 
1138 	return size;
1139 }
1140 
1141 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
1142 {
1143 	if (DISPLAY_VER(dev_priv) >= 9)
1144 		return 256 * 1024;
1145 	else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
1146 		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1147 		return 128 * 1024;
1148 	else if (DISPLAY_VER(dev_priv) >= 4)
1149 		return 4 * 1024;
1150 	else
1151 		return 0;
1152 }
1153 
1154 static bool has_async_flips(struct drm_i915_private *i915)
1155 {
1156 	return DISPLAY_VER(i915) >= 5;
1157 }
1158 
1159 unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
1160 				  int color_plane)
1161 {
1162 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
1163 
1164 	if (intel_fb_uses_dpt(fb))
1165 		return 512 * 4096;
1166 
1167 	/* AUX_DIST needs only 4K alignment */
1168 	if (is_ccs_plane(fb, color_plane))
1169 		return 4096;
1170 
1171 	if (is_semiplanar_uv_plane(fb, color_plane)) {
1172 		/*
1173 		 * TODO: cross-check wrt. the bspec stride in bytes * 64 bytes
1174 		 * alignment for linear UV planes on all platforms.
1175 		 */
1176 		if (DISPLAY_VER(dev_priv) >= 12) {
1177 			if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
1178 				return intel_linear_alignment(dev_priv);
1179 
1180 			return intel_tile_row_size(fb, color_plane);
1181 		}
1182 
1183 		return 4096;
1184 	}
1185 
1186 	drm_WARN_ON(&dev_priv->drm, color_plane != 0);
1187 
1188 	switch (fb->modifier) {
1189 	case DRM_FORMAT_MOD_LINEAR:
1190 		return intel_linear_alignment(dev_priv);
1191 	case I915_FORMAT_MOD_X_TILED:
1192 		if (has_async_flips(dev_priv))
1193 			return 256 * 1024;
1194 		return 0;
1195 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1196 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1197 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1198 		return 16 * 1024;
1199 	case I915_FORMAT_MOD_Y_TILED_CCS:
1200 	case I915_FORMAT_MOD_Yf_TILED_CCS:
1201 	case I915_FORMAT_MOD_Y_TILED:
1202 	case I915_FORMAT_MOD_Yf_TILED:
1203 		return 1 * 1024 * 1024;
1204 	default:
1205 		MISSING_CASE(fb->modifier);
1206 		return 0;
1207 	}
1208 }
1209 
1210 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
1211 {
1212 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1213 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1214 
1215 	return DISPLAY_VER(dev_priv) < 4 ||
1216 		(plane->has_fbc &&
1217 		 plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
1218 }
1219 
1220 static struct i915_vma *
1221 intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
1222 		     const struct i915_ggtt_view *view,
1223 		     bool uses_fence,
1224 		     unsigned long *out_flags,
1225 		     struct i915_address_space *vm)
1226 {
1227 	struct drm_device *dev = fb->dev;
1228 	struct drm_i915_private *dev_priv = to_i915(dev);
1229 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1230 	struct i915_vma *vma;
1231 	u32 alignment;
1232 	int ret;
1233 
1234 	if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
1235 		return ERR_PTR(-EINVAL);
1236 
1237 	alignment = 4096 * 512;
1238 
1239 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1240 
1241 	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
1242 	if (ret) {
1243 		vma = ERR_PTR(ret);
1244 		goto err;
1245 	}
1246 
1247 	vma = i915_vma_instance(obj, vm, view);
1248 	if (IS_ERR(vma))
1249 		goto err;
1250 
1251 	if (i915_vma_misplaced(vma, 0, alignment, 0)) {
1252 		ret = i915_vma_unbind(vma);
1253 		if (ret) {
1254 			vma = ERR_PTR(ret);
1255 			goto err;
1256 		}
1257 	}
1258 
1259 	ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
1260 	if (ret) {
1261 		vma = ERR_PTR(ret);
1262 		goto err;
1263 	}
1264 
1265 	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
1266 
1267 	i915_gem_object_flush_if_display(obj);
1268 
1269 	i915_vma_get(vma);
1270 err:
1271 	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1272 
1273 	return vma;
1274 }
1275 
1276 struct i915_vma *
1277 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1278 			   bool phys_cursor,
1279 			   const struct i915_ggtt_view *view,
1280 			   bool uses_fence,
1281 			   unsigned long *out_flags)
1282 {
1283 	struct drm_device *dev = fb->dev;
1284 	struct drm_i915_private *dev_priv = to_i915(dev);
1285 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1286 	intel_wakeref_t wakeref;
1287 	struct i915_gem_ww_ctx ww;
1288 	struct i915_vma *vma;
1289 	unsigned int pinctl;
1290 	u32 alignment;
1291 	int ret;
1292 
1293 	if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
1294 		return ERR_PTR(-EINVAL);
1295 
1296 	if (phys_cursor)
1297 		alignment = intel_cursor_alignment(dev_priv);
1298 	else
1299 		alignment = intel_surf_alignment(fb, 0);
1300 	if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
1301 		return ERR_PTR(-EINVAL);
1302 
1303 	/* Note that the w/a also requires 64 PTE of padding following the
1304 	 * bo. We currently fill all unused PTE with the shadow page and so
1305 	 * we should always have valid PTE following the scanout preventing
1306 	 * the VT-d warning.
1307 	 */
1308 	if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
1309 		alignment = 256 * 1024;
1310 
1311 	/*
1312 	 * Global gtt pte registers are special registers which actually forward
1313 	 * writes to a chunk of system memory. Which means that there is no risk
1314 	 * that the register values disappear as soon as we call
1315 	 * intel_runtime_pm_put(), so it is correct to wrap only the
1316 	 * pin/unpin/fence and not more.
1317 	 */
1318 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1319 
1320 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1321 
1322 	/*
1323 	 * Valleyview is definitely limited to scanning out the first
1324 	 * 512MiB. Lets presume this behaviour was inherited from the
1325 	 * g4x display engine and that all earlier gen are similarly
1326 	 * limited. Testing suggests that it is a little more
1327 	 * complicated than this. For example, Cherryview appears quite
1328 	 * happy to scanout from anywhere within its global aperture.
1329 	 */
1330 	pinctl = 0;
1331 	if (HAS_GMCH(dev_priv))
1332 		pinctl |= PIN_MAPPABLE;
1333 
1334 	i915_gem_ww_ctx_init(&ww, true);
1335 retry:
1336 	ret = i915_gem_object_lock(obj, &ww);
1337 	if (!ret && phys_cursor)
1338 		ret = i915_gem_object_attach_phys(obj, alignment);
1339 	else if (!ret && HAS_LMEM(dev_priv))
1340 		ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
1341 	/* TODO: Do we need to sync when migration becomes async? */
1342 	if (!ret)
1343 		ret = i915_gem_object_pin_pages(obj);
1344 	if (ret)
1345 		goto err;
1346 
1347 	if (!ret) {
1348 		vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
1349 							   view, pinctl);
1350 		if (IS_ERR(vma)) {
1351 			ret = PTR_ERR(vma);
1352 			goto err_unpin;
1353 		}
1354 	}
1355 
1356 	if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
1357 		/*
1358 		 * Install a fence for tiled scan-out. Pre-i965 always needs a
1359 		 * fence, whereas 965+ only requires a fence if using
1360 		 * framebuffer compression.  For simplicity, we always, when
1361 		 * possible, install a fence as the cost is not that onerous.
1362 		 *
1363 		 * If we fail to fence the tiled scanout, then either the
1364 		 * modeset will reject the change (which is highly unlikely as
1365 		 * the affected systems, all but one, do not have unmappable
1366 		 * space) or we will not be able to enable full powersaving
1367 		 * techniques (also likely not to apply due to various limits
1368 		 * FBC and the like impose on the size of the buffer, which
1369 		 * presumably we violated anyway with this unmappable buffer).
1370 		 * Anyway, it is presumably better to stumble onwards with
1371 		 * something and try to run the system in a "less than optimal"
1372 		 * mode that matches the user configuration.
1373 		 */
1374 		ret = i915_vma_pin_fence(vma);
1375 		if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
1376 			i915_vma_unpin(vma);
1377 			goto err_unpin;
1378 		}
1379 		ret = 0;
1380 
1381 		if (vma->fence)
1382 			*out_flags |= PLANE_HAS_FENCE;
1383 	}
1384 
1385 	i915_vma_get(vma);
1386 
1387 err_unpin:
1388 	i915_gem_object_unpin_pages(obj);
1389 err:
1390 	if (ret == -EDEADLK) {
1391 		ret = i915_gem_ww_ctx_backoff(&ww);
1392 		if (!ret)
1393 			goto retry;
1394 	}
1395 	i915_gem_ww_ctx_fini(&ww);
1396 	if (ret)
1397 		vma = ERR_PTR(ret);
1398 
1399 	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1400 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1401 	return vma;
1402 }
1403 
1404 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
1405 {
1406 	if (flags & PLANE_HAS_FENCE)
1407 		i915_vma_unpin_fence(vma);
1408 	i915_vma_unpin(vma);
1409 	i915_vma_put(vma);
1410 }
1411 
1412 /*
1413  * Convert the x/y offsets into a linear offset.
1414  * Only valid with 0/180 degree rotation, which is fine since linear
1415  * offset is only used with linear buffers on pre-hsw and tiled buffers
1416  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
1417  */
1418 u32 intel_fb_xy_to_linear(int x, int y,
1419 			  const struct intel_plane_state *state,
1420 			  int color_plane)
1421 {
1422 	const struct drm_framebuffer *fb = state->hw.fb;
1423 	unsigned int cpp = fb->format->cpp[color_plane];
1424 	unsigned int pitch = state->view.color_plane[color_plane].stride;
1425 
1426 	return y * pitch + x * cpp;
1427 }
1428 
1429 /*
1430  * Add the x/y offsets derived from fb->offsets[] to the user
1431  * specified plane src x/y offsets. The resulting x/y offsets
1432  * specify the start of scanout from the beginning of the gtt mapping.
1433  */
1434 void intel_add_fb_offsets(int *x, int *y,
1435 			  const struct intel_plane_state *state,
1436 			  int color_plane)
1437 
1438 {
1439 	*x += state->view.color_plane[color_plane].x;
1440 	*y += state->view.color_plane[color_plane].y;
1441 }
1442 
1443 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
1444 {
1445 	switch (fb_modifier) {
1446 	case I915_FORMAT_MOD_X_TILED:
1447 		return I915_TILING_X;
1448 	case I915_FORMAT_MOD_Y_TILED:
1449 	case I915_FORMAT_MOD_Y_TILED_CCS:
1450 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1451 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1452 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1453 		return I915_TILING_Y;
1454 	default:
1455 		return I915_TILING_NONE;
1456 	}
1457 }
1458 
1459 /*
1460  * From the Sky Lake PRM:
1461  * "The Color Control Surface (CCS) contains the compression status of
1462  *  the cache-line pairs. The compression state of the cache-line pair
1463  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
1464  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
1465  *  cache-line-pairs. CCS is always Y tiled."
1466  *
1467  * Since cache line pairs refers to horizontally adjacent cache lines,
1468  * each cache line in the CCS corresponds to an area of 32x16 cache
1469  * lines on the main surface. Since each pixel is 4 bytes, this gives
1470  * us a ratio of one byte in the CCS for each 8x16 pixels in the
1471  * main surface.
1472  */
1473 static const struct drm_format_info skl_ccs_formats[] = {
1474 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1475 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1476 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1477 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1478 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1479 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1480 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1481 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1482 };
1483 
1484 /*
1485  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
1486  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
1487  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
1488  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
1489  * the main surface.
1490  */
1491 static const struct drm_format_info gen12_ccs_formats[] = {
1492 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1493 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1494 	  .hsub = 1, .vsub = 1, },
1495 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1496 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1497 	  .hsub = 1, .vsub = 1, },
1498 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1499 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1500 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1501 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1502 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1503 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1504 	{ .format = DRM_FORMAT_YUYV, .num_planes = 2,
1505 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1506 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1507 	{ .format = DRM_FORMAT_YVYU, .num_planes = 2,
1508 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1509 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1510 	{ .format = DRM_FORMAT_UYVY, .num_planes = 2,
1511 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1512 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1513 	{ .format = DRM_FORMAT_VYUY, .num_planes = 2,
1514 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1515 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1516 	{ .format = DRM_FORMAT_XYUV8888, .num_planes = 2,
1517 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1518 	  .hsub = 1, .vsub = 1, .is_yuv = true },
1519 	{ .format = DRM_FORMAT_NV12, .num_planes = 4,
1520 	  .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
1521 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1522 	{ .format = DRM_FORMAT_P010, .num_planes = 4,
1523 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1524 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1525 	{ .format = DRM_FORMAT_P012, .num_planes = 4,
1526 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1527 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1528 	{ .format = DRM_FORMAT_P016, .num_planes = 4,
1529 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1530 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1531 };
1532 
1533 /*
1534  * Same as gen12_ccs_formats[] above, but with additional surface used
1535  * to pass Clear Color information in plane 2 with 64 bits of data.
1536  */
1537 static const struct drm_format_info gen12_ccs_cc_formats[] = {
1538 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
1539 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1540 	  .hsub = 1, .vsub = 1, },
1541 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
1542 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1543 	  .hsub = 1, .vsub = 1, },
1544 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
1545 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1546 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1547 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
1548 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1549 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1550 };
1551 
1552 static const struct drm_format_info *
1553 lookup_format_info(const struct drm_format_info formats[],
1554 		   int num_formats, u32 format)
1555 {
1556 	int i;
1557 
1558 	for (i = 0; i < num_formats; i++) {
1559 		if (formats[i].format == format)
1560 			return &formats[i];
1561 	}
1562 
1563 	return NULL;
1564 }
1565 
1566 static const struct drm_format_info *
1567 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
1568 {
1569 	switch (cmd->modifier[0]) {
1570 	case I915_FORMAT_MOD_Y_TILED_CCS:
1571 	case I915_FORMAT_MOD_Yf_TILED_CCS:
1572 		return lookup_format_info(skl_ccs_formats,
1573 					  ARRAY_SIZE(skl_ccs_formats),
1574 					  cmd->pixel_format);
1575 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1576 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1577 		return lookup_format_info(gen12_ccs_formats,
1578 					  ARRAY_SIZE(gen12_ccs_formats),
1579 					  cmd->pixel_format);
1580 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1581 		return lookup_format_info(gen12_ccs_cc_formats,
1582 					  ARRAY_SIZE(gen12_ccs_cc_formats),
1583 					  cmd->pixel_format);
1584 	default:
1585 		return NULL;
1586 	}
1587 }
1588 
1589 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
1590 {
1591 	return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)],
1592 			    512) * 64;
1593 }
1594 
1595 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
1596 			      u32 pixel_format, u64 modifier)
1597 {
1598 	struct intel_crtc *crtc;
1599 	struct intel_plane *plane;
1600 
1601 	if (!HAS_DISPLAY(dev_priv))
1602 		return 0;
1603 
1604 	/*
1605 	 * We assume the primary plane for pipe A has
1606 	 * the highest stride limits of them all,
1607 	 * if in case pipe A is disabled, use the first pipe from pipe_mask.
1608 	 */
1609 	crtc = intel_get_first_crtc(dev_priv);
1610 	if (!crtc)
1611 		return 0;
1612 
1613 	plane = to_intel_plane(crtc->base.primary);
1614 
1615 	return plane->max_stride(plane, pixel_format, modifier,
1616 				 DRM_MODE_ROTATE_0);
1617 }
1618 
1619 static
1620 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
1621 			u32 pixel_format, u64 modifier)
1622 {
1623 	/*
1624 	 * Arbitrary limit for gen4+ chosen to match the
1625 	 * render engine max stride.
1626 	 *
1627 	 * The new CCS hash mode makes remapping impossible
1628 	 */
1629 	if (DISPLAY_VER(dev_priv) < 4 || is_ccs_modifier(modifier) ||
1630 	    intel_modifier_uses_dpt(dev_priv, modifier))
1631 		return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
1632 	else if (DISPLAY_VER(dev_priv) >= 7)
1633 		return 256 * 1024;
1634 	else
1635 		return 128 * 1024;
1636 }
1637 
1638 static u32
1639 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
1640 {
1641 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
1642 	u32 tile_width;
1643 
1644 	if (is_surface_linear(fb, color_plane)) {
1645 		u32 max_stride = intel_plane_fb_max_stride(dev_priv,
1646 							   fb->format->format,
1647 							   fb->modifier);
1648 
1649 		/*
1650 		 * To make remapping with linear generally feasible
1651 		 * we need the stride to be page aligned.
1652 		 */
1653 		if (fb->pitches[color_plane] > max_stride &&
1654 		    !is_ccs_modifier(fb->modifier))
1655 			return intel_tile_size(dev_priv);
1656 		else
1657 			return 64;
1658 	}
1659 
1660 	tile_width = intel_tile_width_bytes(fb, color_plane);
1661 	if (is_ccs_modifier(fb->modifier)) {
1662 		/*
1663 		 * Display WA #0531: skl,bxt,kbl,glk
1664 		 *
1665 		 * Render decompression and plane width > 3840
1666 		 * combined with horizontal panning requires the
1667 		 * plane stride to be a multiple of 4. We'll just
1668 		 * require the entire fb to accommodate that to avoid
1669 		 * potential runtime errors at plane configuration time.
1670 		 */
1671 		if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) &&
1672 		    color_plane == 0 && fb->width > 3840)
1673 			tile_width *= 4;
1674 		/*
1675 		 * The main surface pitch must be padded to a multiple of four
1676 		 * tile widths.
1677 		 */
1678 		else if (DISPLAY_VER(dev_priv) >= 12)
1679 			tile_width *= 4;
1680 	}
1681 	return tile_width;
1682 }
1683 
1684 static struct i915_vma *
1685 initial_plane_vma(struct drm_i915_private *i915,
1686 		  struct intel_initial_plane_config *plane_config)
1687 {
1688 	struct drm_i915_gem_object *obj;
1689 	struct i915_vma *vma;
1690 	u32 base, size;
1691 
1692 	if (plane_config->size == 0)
1693 		return NULL;
1694 
1695 	base = round_down(plane_config->base,
1696 			  I915_GTT_MIN_ALIGNMENT);
1697 	size = round_up(plane_config->base + plane_config->size,
1698 			I915_GTT_MIN_ALIGNMENT);
1699 	size -= base;
1700 
1701 	/*
1702 	 * If the FB is too big, just don't use it since fbdev is not very
1703 	 * important and we should probably use that space with FBC or other
1704 	 * features.
1705 	 */
1706 	if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
1707 	    size * 2 > i915->stolen_usable_size)
1708 		return NULL;
1709 
1710 	obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
1711 	if (IS_ERR(obj))
1712 		return NULL;
1713 
1714 	/*
1715 	 * Mark it WT ahead of time to avoid changing the
1716 	 * cache_level during fbdev initialization. The
1717 	 * unbind there would get stuck waiting for rcu.
1718 	 */
1719 	i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
1720 					    I915_CACHE_WT : I915_CACHE_NONE);
1721 
1722 	switch (plane_config->tiling) {
1723 	case I915_TILING_NONE:
1724 		break;
1725 	case I915_TILING_X:
1726 	case I915_TILING_Y:
1727 		obj->tiling_and_stride =
1728 			plane_config->fb->base.pitches[0] |
1729 			plane_config->tiling;
1730 		break;
1731 	default:
1732 		MISSING_CASE(plane_config->tiling);
1733 		goto err_obj;
1734 	}
1735 
1736 	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1737 	if (IS_ERR(vma))
1738 		goto err_obj;
1739 
1740 	if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
1741 		goto err_obj;
1742 
1743 	if (i915_gem_object_is_tiled(obj) &&
1744 	    !i915_vma_is_map_and_fenceable(vma))
1745 		goto err_obj;
1746 
1747 	return vma;
1748 
1749 err_obj:
1750 	i915_gem_object_put(obj);
1751 	return NULL;
1752 }
1753 
1754 static bool
1755 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
1756 			      struct intel_initial_plane_config *plane_config)
1757 {
1758 	struct drm_device *dev = crtc->base.dev;
1759 	struct drm_i915_private *dev_priv = to_i915(dev);
1760 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
1761 	struct drm_framebuffer *fb = &plane_config->fb->base;
1762 	struct i915_vma *vma;
1763 
1764 	switch (fb->modifier) {
1765 	case DRM_FORMAT_MOD_LINEAR:
1766 	case I915_FORMAT_MOD_X_TILED:
1767 	case I915_FORMAT_MOD_Y_TILED:
1768 		break;
1769 	default:
1770 		drm_dbg(&dev_priv->drm,
1771 			"Unsupported modifier for initial FB: 0x%llx\n",
1772 			fb->modifier);
1773 		return false;
1774 	}
1775 
1776 	vma = initial_plane_vma(dev_priv, plane_config);
1777 	if (!vma)
1778 		return false;
1779 
1780 	mode_cmd.pixel_format = fb->format->format;
1781 	mode_cmd.width = fb->width;
1782 	mode_cmd.height = fb->height;
1783 	mode_cmd.pitches[0] = fb->pitches[0];
1784 	mode_cmd.modifier[0] = fb->modifier;
1785 	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
1786 
1787 	if (intel_framebuffer_init(to_intel_framebuffer(fb),
1788 				   vma->obj, &mode_cmd)) {
1789 		drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
1790 		goto err_vma;
1791 	}
1792 
1793 	plane_config->vma = vma;
1794 	return true;
1795 
1796 err_vma:
1797 	i915_vma_put(vma);
1798 	return false;
1799 }
1800 
1801 static void
1802 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
1803 			struct intel_plane_state *plane_state,
1804 			bool visible)
1805 {
1806 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1807 
1808 	plane_state->uapi.visible = visible;
1809 
1810 	if (visible)
1811 		crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
1812 	else
1813 		crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
1814 }
1815 
1816 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
1817 {
1818 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1819 	struct drm_plane *plane;
1820 
1821 	/*
1822 	 * Active_planes aliases if multiple "primary" or cursor planes
1823 	 * have been used on the same (or wrong) pipe. plane_mask uses
1824 	 * unique ids, hence we can use that to reconstruct active_planes.
1825 	 */
1826 	crtc_state->enabled_planes = 0;
1827 	crtc_state->active_planes = 0;
1828 
1829 	drm_for_each_plane_mask(plane, &dev_priv->drm,
1830 				crtc_state->uapi.plane_mask) {
1831 		crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
1832 		crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
1833 	}
1834 }
1835 
1836 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
1837 					 struct intel_plane *plane)
1838 {
1839 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1840 	struct intel_crtc_state *crtc_state =
1841 		to_intel_crtc_state(crtc->base.state);
1842 	struct intel_plane_state *plane_state =
1843 		to_intel_plane_state(plane->base.state);
1844 
1845 	drm_dbg_kms(&dev_priv->drm,
1846 		    "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
1847 		    plane->base.base.id, plane->base.name,
1848 		    crtc->base.base.id, crtc->base.name);
1849 
1850 	intel_set_plane_visible(crtc_state, plane_state, false);
1851 	fixup_plane_bitmasks(crtc_state);
1852 	crtc_state->data_rate[plane->id] = 0;
1853 	crtc_state->min_cdclk[plane->id] = 0;
1854 
1855 	if (plane->id == PLANE_PRIMARY)
1856 		hsw_disable_ips(crtc_state);
1857 
1858 	/*
1859 	 * Vblank time updates from the shadow to live plane control register
1860 	 * are blocked if the memory self-refresh mode is active at that
1861 	 * moment. So to make sure the plane gets truly disabled, disable
1862 	 * first the self-refresh mode. The self-refresh enable bit in turn
1863 	 * will be checked/applied by the HW only at the next frame start
1864 	 * event which is after the vblank start event, so we need to have a
1865 	 * wait-for-vblank between disabling the plane and the pipe.
1866 	 */
1867 	if (HAS_GMCH(dev_priv) &&
1868 	    intel_set_memory_cxsr(dev_priv, false))
1869 		intel_wait_for_vblank(dev_priv, crtc->pipe);
1870 
1871 	/*
1872 	 * Gen2 reports pipe underruns whenever all planes are disabled.
1873 	 * So disable underrun reporting before all the planes get disabled.
1874 	 */
1875 	if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
1876 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
1877 
1878 	intel_disable_plane(plane, crtc_state);
1879 	intel_wait_for_vblank(dev_priv, crtc->pipe);
1880 }
1881 
1882 static struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
1883 {
1884 	struct drm_i915_private *i915 = vm->i915;
1885 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
1886 	intel_wakeref_t wakeref;
1887 	struct i915_vma *vma;
1888 	void __iomem *iomem;
1889 
1890 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1891 	atomic_inc(&i915->gpu_error.pending_fb_pin);
1892 
1893 	vma = i915_gem_object_ggtt_pin(dpt->obj, NULL, 0, 4096,
1894 				       HAS_LMEM(i915) ? 0 : PIN_MAPPABLE);
1895 	if (IS_ERR(vma))
1896 		goto err;
1897 
1898 	iomem = i915_vma_pin_iomap(vma);
1899 	i915_vma_unpin(vma);
1900 	if (IS_ERR(iomem)) {
1901 		vma = iomem;
1902 		goto err;
1903 	}
1904 
1905 	dpt->vma = vma;
1906 	dpt->iomem = iomem;
1907 
1908 	i915_vma_get(vma);
1909 
1910 err:
1911 	atomic_dec(&i915->gpu_error.pending_fb_pin);
1912 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1913 
1914 	return vma;
1915 }
1916 
1917 static void intel_dpt_unpin(struct i915_address_space *vm)
1918 {
1919 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
1920 
1921 	i915_vma_unpin_iomap(dpt->vma);
1922 	i915_vma_put(dpt->vma);
1923 }
1924 
1925 static bool
1926 intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
1927 			      const struct intel_initial_plane_config *plane_config,
1928 			      struct drm_framebuffer **fb,
1929 			      struct i915_vma **vma)
1930 {
1931 	struct intel_crtc *crtc;
1932 
1933 	for_each_intel_crtc(&i915->drm, crtc) {
1934 		struct intel_crtc_state *crtc_state =
1935 			to_intel_crtc_state(crtc->base.state);
1936 		struct intel_plane *plane =
1937 			to_intel_plane(crtc->base.primary);
1938 		struct intel_plane_state *plane_state =
1939 			to_intel_plane_state(plane->base.state);
1940 
1941 		if (!crtc_state->uapi.active)
1942 			continue;
1943 
1944 		if (!plane_state->ggtt_vma)
1945 			continue;
1946 
1947 		if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
1948 			*fb = plane_state->hw.fb;
1949 			*vma = plane_state->ggtt_vma;
1950 			return true;
1951 		}
1952 	}
1953 
1954 	return false;
1955 }
1956 
1957 static void
1958 intel_find_initial_plane_obj(struct intel_crtc *crtc,
1959 			     struct intel_initial_plane_config *plane_config)
1960 {
1961 	struct drm_device *dev = crtc->base.dev;
1962 	struct drm_i915_private *dev_priv = to_i915(dev);
1963 	struct intel_crtc_state *crtc_state =
1964 		to_intel_crtc_state(crtc->base.state);
1965 	struct intel_plane *plane =
1966 		to_intel_plane(crtc->base.primary);
1967 	struct intel_plane_state *plane_state =
1968 		to_intel_plane_state(plane->base.state);
1969 	struct drm_framebuffer *fb;
1970 	struct i915_vma *vma;
1971 
1972 	/*
1973 	 * TODO:
1974 	 *   Disable planes if get_initial_plane_config() failed.
1975 	 *   Make sure things work if the surface base is not page aligned.
1976 	 */
1977 	if (!plane_config->fb)
1978 		return;
1979 
1980 	if (intel_alloc_initial_plane_obj(crtc, plane_config)) {
1981 		fb = &plane_config->fb->base;
1982 		vma = plane_config->vma;
1983 		goto valid_fb;
1984 	}
1985 
1986 	/*
1987 	 * Failed to alloc the obj, check to see if we should share
1988 	 * an fb with another CRTC instead
1989 	 */
1990 	if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
1991 		goto valid_fb;
1992 
1993 	/*
1994 	 * We've failed to reconstruct the BIOS FB.  Current display state
1995 	 * indicates that the primary plane is visible, but has a NULL FB,
1996 	 * which will lead to problems later if we don't fix it up.  The
1997 	 * simplest solution is to just disable the primary plane now and
1998 	 * pretend the BIOS never had it enabled.
1999 	 */
2000 	intel_plane_disable_noatomic(crtc, plane);
2001 	if (crtc_state->bigjoiner) {
2002 		struct intel_crtc *slave =
2003 			crtc_state->bigjoiner_linked_crtc;
2004 		intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
2005 	}
2006 
2007 	return;
2008 
2009 valid_fb:
2010 	plane_state->uapi.rotation = plane_config->rotation;
2011 	intel_fb_fill_view(to_intel_framebuffer(fb),
2012 			   plane_state->uapi.rotation, &plane_state->view);
2013 
2014 	__i915_vma_pin(vma);
2015 	plane_state->ggtt_vma = i915_vma_get(vma);
2016 	if (intel_plane_uses_fence(plane_state) &&
2017 	    i915_vma_pin_fence(vma) == 0 && vma->fence)
2018 		plane_state->flags |= PLANE_HAS_FENCE;
2019 
2020 	plane_state->uapi.src_x = 0;
2021 	plane_state->uapi.src_y = 0;
2022 	plane_state->uapi.src_w = fb->width << 16;
2023 	plane_state->uapi.src_h = fb->height << 16;
2024 
2025 	plane_state->uapi.crtc_x = 0;
2026 	plane_state->uapi.crtc_y = 0;
2027 	plane_state->uapi.crtc_w = fb->width;
2028 	plane_state->uapi.crtc_h = fb->height;
2029 
2030 	if (plane_config->tiling)
2031 		dev_priv->preserve_bios_swizzle = true;
2032 
2033 	plane_state->uapi.fb = fb;
2034 	drm_framebuffer_get(fb);
2035 
2036 	plane_state->uapi.crtc = &crtc->base;
2037 	intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
2038 
2039 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
2040 
2041 	atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
2042 }
2043 
2044 unsigned int
2045 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
2046 {
2047 	int x = 0, y = 0;
2048 
2049 	intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
2050 					  plane_state->view.color_plane[0].offset, 0);
2051 
2052 	return y;
2053 }
2054 
2055 static int
2056 __intel_display_resume(struct drm_device *dev,
2057 		       struct drm_atomic_state *state,
2058 		       struct drm_modeset_acquire_ctx *ctx)
2059 {
2060 	struct drm_crtc_state *crtc_state;
2061 	struct drm_crtc *crtc;
2062 	int i, ret;
2063 
2064 	intel_modeset_setup_hw_state(dev, ctx);
2065 	intel_vga_redisable(to_i915(dev));
2066 
2067 	if (!state)
2068 		return 0;
2069 
2070 	/*
2071 	 * We've duplicated the state, pointers to the old state are invalid.
2072 	 *
2073 	 * Don't attempt to use the old state until we commit the duplicated state.
2074 	 */
2075 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2076 		/*
2077 		 * Force recalculation even if we restore
2078 		 * current state. With fast modeset this may not result
2079 		 * in a modeset when the state is compatible.
2080 		 */
2081 		crtc_state->mode_changed = true;
2082 	}
2083 
2084 	/* ignore any reset values/BIOS leftovers in the WM registers */
2085 	if (!HAS_GMCH(to_i915(dev)))
2086 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
2087 
2088 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
2089 
2090 	drm_WARN_ON(dev, ret == -EDEADLK);
2091 	return ret;
2092 }
2093 
2094 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
2095 {
2096 	return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
2097 		intel_has_gpu_reset(&dev_priv->gt));
2098 }
2099 
2100 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
2101 {
2102 	struct drm_device *dev = &dev_priv->drm;
2103 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
2104 	struct drm_atomic_state *state;
2105 	int ret;
2106 
2107 	if (!HAS_DISPLAY(dev_priv))
2108 		return;
2109 
2110 	/* reset doesn't touch the display */
2111 	if (!dev_priv->params.force_reset_modeset_test &&
2112 	    !gpu_reset_clobbers_display(dev_priv))
2113 		return;
2114 
2115 	/* We have a modeset vs reset deadlock, defensively unbreak it. */
2116 	set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
2117 	smp_mb__after_atomic();
2118 	wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
2119 
2120 	if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
2121 		drm_dbg_kms(&dev_priv->drm,
2122 			    "Modeset potentially stuck, unbreaking through wedging\n");
2123 		intel_gt_set_wedged(&dev_priv->gt);
2124 	}
2125 
2126 	/*
2127 	 * Need mode_config.mutex so that we don't
2128 	 * trample ongoing ->detect() and whatnot.
2129 	 */
2130 	mutex_lock(&dev->mode_config.mutex);
2131 	drm_modeset_acquire_init(ctx, 0);
2132 	while (1) {
2133 		ret = drm_modeset_lock_all_ctx(dev, ctx);
2134 		if (ret != -EDEADLK)
2135 			break;
2136 
2137 		drm_modeset_backoff(ctx);
2138 	}
2139 	/*
2140 	 * Disabling the crtcs gracefully seems nicer. Also the
2141 	 * g33 docs say we should at least disable all the planes.
2142 	 */
2143 	state = drm_atomic_helper_duplicate_state(dev, ctx);
2144 	if (IS_ERR(state)) {
2145 		ret = PTR_ERR(state);
2146 		drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
2147 			ret);
2148 		return;
2149 	}
2150 
2151 	ret = drm_atomic_helper_disable_all(dev, ctx);
2152 	if (ret) {
2153 		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2154 			ret);
2155 		drm_atomic_state_put(state);
2156 		return;
2157 	}
2158 
2159 	dev_priv->modeset_restore_state = state;
2160 	state->acquire_ctx = ctx;
2161 }
2162 
2163 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
2164 {
2165 	struct drm_device *dev = &dev_priv->drm;
2166 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
2167 	struct drm_atomic_state *state;
2168 	int ret;
2169 
2170 	if (!HAS_DISPLAY(dev_priv))
2171 		return;
2172 
2173 	/* reset doesn't touch the display */
2174 	if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
2175 		return;
2176 
2177 	state = fetch_and_zero(&dev_priv->modeset_restore_state);
2178 	if (!state)
2179 		goto unlock;
2180 
2181 	/* reset doesn't touch the display */
2182 	if (!gpu_reset_clobbers_display(dev_priv)) {
2183 		/* for testing only restore the display */
2184 		ret = __intel_display_resume(dev, state, ctx);
2185 		if (ret)
2186 			drm_err(&dev_priv->drm,
2187 				"Restoring old state failed with %i\n", ret);
2188 	} else {
2189 		/*
2190 		 * The display has been reset as well,
2191 		 * so need a full re-initialization.
2192 		 */
2193 		intel_pps_unlock_regs_wa(dev_priv);
2194 		intel_modeset_init_hw(dev_priv);
2195 		intel_init_clock_gating(dev_priv);
2196 		intel_hpd_init(dev_priv);
2197 
2198 		ret = __intel_display_resume(dev, state, ctx);
2199 		if (ret)
2200 			drm_err(&dev_priv->drm,
2201 				"Restoring old state failed with %i\n", ret);
2202 
2203 		intel_hpd_poll_disable(dev_priv);
2204 	}
2205 
2206 	drm_atomic_state_put(state);
2207 unlock:
2208 	drm_modeset_drop_locks(ctx);
2209 	drm_modeset_acquire_fini(ctx);
2210 	mutex_unlock(&dev->mode_config.mutex);
2211 
2212 	clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
2213 }
2214 
2215 static bool underrun_recovery_supported(const struct intel_crtc_state *crtc_state)
2216 {
2217 	if (crtc_state->pch_pfit.enabled &&
2218 	    (crtc_state->pipe_src_w > drm_rect_width(&crtc_state->pch_pfit.dst) ||
2219 	     crtc_state->pipe_src_h > drm_rect_height(&crtc_state->pch_pfit.dst) ||
2220 	     crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420))
2221 		return false;
2222 
2223 	if (crtc_state->dsc.compression_enable)
2224 		return false;
2225 
2226 	if (crtc_state->has_psr2)
2227 		return false;
2228 
2229 	if (crtc_state->splitter.enable)
2230 		return false;
2231 
2232 	return true;
2233 }
2234 
2235 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
2236 {
2237 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2238 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2239 	enum pipe pipe = crtc->pipe;
2240 	u32 tmp;
2241 
2242 	tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
2243 
2244 	/*
2245 	 * Display WA #1153: icl
2246 	 * enable hardware to bypass the alpha math
2247 	 * and rounding for per-pixel values 00 and 0xff
2248 	 */
2249 	tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
2250 	/*
2251 	 * Display WA # 1605353570: icl
2252 	 * Set the pixel rounding bit to 1 for allowing
2253 	 * passthrough of Frame buffer pixels unmodified
2254 	 * across pipe
2255 	 */
2256 	tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
2257 
2258 	if (IS_DG2(dev_priv)) {
2259 		/*
2260 		 * Underrun recovery must always be disabled on DG2.  However
2261 		 * the chicken bit meaning is inverted compared to other
2262 		 * platforms.
2263 		 */
2264 		tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
2265 	} else if (DISPLAY_VER(dev_priv) >= 13) {
2266 		if (underrun_recovery_supported(crtc_state))
2267 			tmp &= ~UNDERRUN_RECOVERY_DISABLE_ADLP;
2268 		else
2269 			tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
2270 	}
2271 
2272 	intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
2273 }
2274 
2275 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
2276 {
2277 	struct drm_crtc *crtc;
2278 	bool cleanup_done;
2279 
2280 	drm_for_each_crtc(crtc, &dev_priv->drm) {
2281 		struct drm_crtc_commit *commit;
2282 		spin_lock(&crtc->commit_lock);
2283 		commit = list_first_entry_or_null(&crtc->commit_list,
2284 						  struct drm_crtc_commit, commit_entry);
2285 		cleanup_done = commit ?
2286 			try_wait_for_completion(&commit->cleanup_done) : true;
2287 		spin_unlock(&crtc->commit_lock);
2288 
2289 		if (cleanup_done)
2290 			continue;
2291 
2292 		drm_crtc_wait_one_vblank(crtc);
2293 
2294 		return true;
2295 	}
2296 
2297 	return false;
2298 }
2299 
2300 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
2301 {
2302 	u32 temp;
2303 
2304 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
2305 
2306 	mutex_lock(&dev_priv->sb_lock);
2307 
2308 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2309 	temp |= SBI_SSCCTL_DISABLE;
2310 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2311 
2312 	mutex_unlock(&dev_priv->sb_lock);
2313 }
2314 
2315 /* Program iCLKIP clock to the desired frequency */
2316 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
2317 {
2318 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2319 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2320 	int clock = crtc_state->hw.adjusted_mode.crtc_clock;
2321 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
2322 	u32 temp;
2323 
2324 	lpt_disable_iclkip(dev_priv);
2325 
2326 	/* The iCLK virtual clock root frequency is in MHz,
2327 	 * but the adjusted_mode->crtc_clock in in KHz. To get the
2328 	 * divisors, it is necessary to divide one by another, so we
2329 	 * convert the virtual clock precision to KHz here for higher
2330 	 * precision.
2331 	 */
2332 	for (auxdiv = 0; auxdiv < 2; auxdiv++) {
2333 		u32 iclk_virtual_root_freq = 172800 * 1000;
2334 		u32 iclk_pi_range = 64;
2335 		u32 desired_divisor;
2336 
2337 		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2338 						    clock << auxdiv);
2339 		divsel = (desired_divisor / iclk_pi_range) - 2;
2340 		phaseinc = desired_divisor % iclk_pi_range;
2341 
2342 		/*
2343 		 * Near 20MHz is a corner case which is
2344 		 * out of range for the 7-bit divisor
2345 		 */
2346 		if (divsel <= 0x7f)
2347 			break;
2348 	}
2349 
2350 	/* This should not happen with any sane values */
2351 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2352 		    ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
2353 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
2354 		    ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2355 
2356 	drm_dbg_kms(&dev_priv->drm,
2357 		    "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2358 		    clock, auxdiv, divsel, phasedir, phaseinc);
2359 
2360 	mutex_lock(&dev_priv->sb_lock);
2361 
2362 	/* Program SSCDIVINTPHASE6 */
2363 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2364 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2365 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2366 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2367 	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2368 	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2369 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2370 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
2371 
2372 	/* Program SSCAUXDIV */
2373 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2374 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2375 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2376 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
2377 
2378 	/* Enable modulator and associated divider */
2379 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2380 	temp &= ~SBI_SSCCTL_DISABLE;
2381 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2382 
2383 	mutex_unlock(&dev_priv->sb_lock);
2384 
2385 	/* Wait for initialization time */
2386 	udelay(24);
2387 
2388 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2389 }
2390 
2391 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
2392 {
2393 	u32 divsel, phaseinc, auxdiv;
2394 	u32 iclk_virtual_root_freq = 172800 * 1000;
2395 	u32 iclk_pi_range = 64;
2396 	u32 desired_divisor;
2397 	u32 temp;
2398 
2399 	if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
2400 		return 0;
2401 
2402 	mutex_lock(&dev_priv->sb_lock);
2403 
2404 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2405 	if (temp & SBI_SSCCTL_DISABLE) {
2406 		mutex_unlock(&dev_priv->sb_lock);
2407 		return 0;
2408 	}
2409 
2410 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2411 	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
2412 		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
2413 	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
2414 		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
2415 
2416 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2417 	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
2418 		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
2419 
2420 	mutex_unlock(&dev_priv->sb_lock);
2421 
2422 	desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
2423 
2424 	return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2425 				 desired_divisor << auxdiv);
2426 }
2427 
2428 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
2429 					   enum pipe pch_transcoder)
2430 {
2431 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2432 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2433 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2434 
2435 	intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
2436 		       intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
2437 	intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
2438 		       intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
2439 	intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
2440 		       intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
2441 
2442 	intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
2443 		       intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
2444 	intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
2445 		       intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
2446 	intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
2447 		       intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
2448 	intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
2449 		       intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
2450 }
2451 
2452 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
2453 {
2454 	u32 temp;
2455 
2456 	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
2457 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
2458 		return;
2459 
2460 	drm_WARN_ON(&dev_priv->drm,
2461 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
2462 		    FDI_RX_ENABLE);
2463 	drm_WARN_ON(&dev_priv->drm,
2464 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
2465 		    FDI_RX_ENABLE);
2466 
2467 	temp &= ~FDI_BC_BIFURCATION_SELECT;
2468 	if (enable)
2469 		temp |= FDI_BC_BIFURCATION_SELECT;
2470 
2471 	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
2472 		    enable ? "en" : "dis");
2473 	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
2474 	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
2475 }
2476 
2477 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
2478 {
2479 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2480 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2481 
2482 	switch (crtc->pipe) {
2483 	case PIPE_A:
2484 		break;
2485 	case PIPE_B:
2486 		if (crtc_state->fdi_lanes > 2)
2487 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
2488 		else
2489 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
2490 
2491 		break;
2492 	case PIPE_C:
2493 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
2494 
2495 		break;
2496 	default:
2497 		BUG();
2498 	}
2499 }
2500 
2501 /*
2502  * Finds the encoder associated with the given CRTC. This can only be
2503  * used when we know that the CRTC isn't feeding multiple encoders!
2504  */
2505 struct intel_encoder *
2506 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
2507 			   const struct intel_crtc_state *crtc_state)
2508 {
2509 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2510 	const struct drm_connector_state *connector_state;
2511 	const struct drm_connector *connector;
2512 	struct intel_encoder *encoder = NULL;
2513 	int num_encoders = 0;
2514 	int i;
2515 
2516 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
2517 		if (connector_state->crtc != &crtc->base)
2518 			continue;
2519 
2520 		encoder = to_intel_encoder(connector_state->best_encoder);
2521 		num_encoders++;
2522 	}
2523 
2524 	drm_WARN(encoder->base.dev, num_encoders != 1,
2525 		 "%d encoders for pipe %c\n",
2526 		 num_encoders, pipe_name(crtc->pipe));
2527 
2528 	return encoder;
2529 }
2530 
2531 /*
2532  * Enable PCH resources required for PCH ports:
2533  *   - PCH PLLs
2534  *   - FDI training & RX/TX
2535  *   - update transcoder timings
2536  *   - DP transcoding bits
2537  *   - transcoder
2538  */
2539 static void ilk_pch_enable(const struct intel_atomic_state *state,
2540 			   const struct intel_crtc_state *crtc_state)
2541 {
2542 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2543 	struct drm_device *dev = crtc->base.dev;
2544 	struct drm_i915_private *dev_priv = to_i915(dev);
2545 	enum pipe pipe = crtc->pipe;
2546 	u32 temp;
2547 
2548 	assert_pch_transcoder_disabled(dev_priv, pipe);
2549 
2550 	if (IS_IVYBRIDGE(dev_priv))
2551 		ivb_update_fdi_bc_bifurcation(crtc_state);
2552 
2553 	/* Write the TU size bits before fdi link training, so that error
2554 	 * detection works. */
2555 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
2556 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2557 
2558 	/* For PCH output, training FDI link */
2559 	dev_priv->display.fdi_link_train(crtc, crtc_state);
2560 
2561 	/* We need to program the right clock selection before writing the pixel
2562 	 * mutliplier into the DPLL. */
2563 	if (HAS_PCH_CPT(dev_priv)) {
2564 		u32 sel;
2565 
2566 		temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
2567 		temp |= TRANS_DPLL_ENABLE(pipe);
2568 		sel = TRANS_DPLLB_SEL(pipe);
2569 		if (crtc_state->shared_dpll ==
2570 		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
2571 			temp |= sel;
2572 		else
2573 			temp &= ~sel;
2574 		intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
2575 	}
2576 
2577 	/* XXX: pch pll's can be enabled any time before we enable the PCH
2578 	 * transcoder, and we actually should do this to not upset any PCH
2579 	 * transcoder that already use the clock when we share it.
2580 	 *
2581 	 * Note that enable_shared_dpll tries to do the right thing, but
2582 	 * get_shared_dpll unconditionally resets the pll - we need that to have
2583 	 * the right LVDS enable sequence. */
2584 	intel_enable_shared_dpll(crtc_state);
2585 
2586 	/* set transcoder timing, panel must allow it */
2587 	assert_panel_unlocked(dev_priv, pipe);
2588 	ilk_pch_transcoder_set_timings(crtc_state, pipe);
2589 
2590 	intel_fdi_normal_train(crtc);
2591 
2592 	/* For PCH DP, enable TRANS_DP_CTL */
2593 	if (HAS_PCH_CPT(dev_priv) &&
2594 	    intel_crtc_has_dp_encoder(crtc_state)) {
2595 		const struct drm_display_mode *adjusted_mode =
2596 			&crtc_state->hw.adjusted_mode;
2597 		u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
2598 		i915_reg_t reg = TRANS_DP_CTL(pipe);
2599 		enum port port;
2600 
2601 		temp = intel_de_read(dev_priv, reg);
2602 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
2603 			  TRANS_DP_SYNC_MASK |
2604 			  TRANS_DP_BPC_MASK);
2605 		temp |= TRANS_DP_OUTPUT_ENABLE;
2606 		temp |= bpc << 9; /* same format but at 11:9 */
2607 
2608 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2609 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2610 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2611 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2612 
2613 		port = intel_get_crtc_new_encoder(state, crtc_state)->port;
2614 		drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
2615 		temp |= TRANS_DP_PORT_SEL(port);
2616 
2617 		intel_de_write(dev_priv, reg, temp);
2618 	}
2619 
2620 	ilk_enable_pch_transcoder(crtc_state);
2621 }
2622 
2623 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
2624 {
2625 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2626 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2627 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2628 
2629 	assert_pch_transcoder_disabled(dev_priv, PIPE_A);
2630 
2631 	lpt_program_iclkip(crtc_state);
2632 
2633 	/* Set transcoder timing. */
2634 	ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
2635 
2636 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
2637 }
2638 
2639 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
2640 			       enum pipe pipe)
2641 {
2642 	i915_reg_t dslreg = PIPEDSL(pipe);
2643 	u32 temp;
2644 
2645 	temp = intel_de_read(dev_priv, dslreg);
2646 	udelay(500);
2647 	if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
2648 		if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
2649 			drm_err(&dev_priv->drm,
2650 				"mode set failed: pipe %c stuck\n",
2651 				pipe_name(pipe));
2652 	}
2653 }
2654 
2655 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
2656 {
2657 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2658 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2659 	const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
2660 	enum pipe pipe = crtc->pipe;
2661 	int width = drm_rect_width(dst);
2662 	int height = drm_rect_height(dst);
2663 	int x = dst->x1;
2664 	int y = dst->y1;
2665 
2666 	if (!crtc_state->pch_pfit.enabled)
2667 		return;
2668 
2669 	/* Force use of hard-coded filter coefficients
2670 	 * as some pre-programmed values are broken,
2671 	 * e.g. x201.
2672 	 */
2673 	if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
2674 		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2675 			       PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
2676 	else
2677 		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2678 			       PF_FILTER_MED_3x3);
2679 	intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
2680 	intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
2681 }
2682 
2683 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
2684 {
2685 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2686 	struct drm_device *dev = crtc->base.dev;
2687 	struct drm_i915_private *dev_priv = to_i915(dev);
2688 
2689 	if (!crtc_state->ips_enabled)
2690 		return;
2691 
2692 	/*
2693 	 * We can only enable IPS after we enable a plane and wait for a vblank
2694 	 * This function is called from post_plane_update, which is run after
2695 	 * a vblank wait.
2696 	 */
2697 	drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
2698 
2699 	if (IS_BROADWELL(dev_priv)) {
2700 		drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
2701 							 IPS_ENABLE | IPS_PCODE_CONTROL));
2702 		/* Quoting Art Runyan: "its not safe to expect any particular
2703 		 * value in IPS_CTL bit 31 after enabling IPS through the
2704 		 * mailbox." Moreover, the mailbox may return a bogus state,
2705 		 * so we need to just enable it and continue on.
2706 		 */
2707 	} else {
2708 		intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
2709 		/* The bit only becomes 1 in the next vblank, so this wait here
2710 		 * is essentially intel_wait_for_vblank. If we don't have this
2711 		 * and don't wait for vblanks until the end of crtc_enable, then
2712 		 * the HW state readout code will complain that the expected
2713 		 * IPS_CTL value is not the one we read. */
2714 		if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
2715 			drm_err(&dev_priv->drm,
2716 				"Timed out waiting for IPS enable\n");
2717 	}
2718 }
2719 
2720 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
2721 {
2722 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2723 	struct drm_device *dev = crtc->base.dev;
2724 	struct drm_i915_private *dev_priv = to_i915(dev);
2725 
2726 	if (!crtc_state->ips_enabled)
2727 		return;
2728 
2729 	if (IS_BROADWELL(dev_priv)) {
2730 		drm_WARN_ON(dev,
2731 			    sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
2732 		/*
2733 		 * Wait for PCODE to finish disabling IPS. The BSpec specified
2734 		 * 42ms timeout value leads to occasional timeouts so use 100ms
2735 		 * instead.
2736 		 */
2737 		if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
2738 			drm_err(&dev_priv->drm,
2739 				"Timed out waiting for IPS disable\n");
2740 	} else {
2741 		intel_de_write(dev_priv, IPS_CTL, 0);
2742 		intel_de_posting_read(dev_priv, IPS_CTL);
2743 	}
2744 
2745 	/* We need to wait for a vblank before we can disable the plane. */
2746 	intel_wait_for_vblank(dev_priv, crtc->pipe);
2747 }
2748 
2749 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
2750 {
2751 	if (crtc->overlay)
2752 		(void) intel_overlay_switch_off(crtc->overlay);
2753 
2754 	/* Let userspace switch the overlay on again. In most cases userspace
2755 	 * has to recompute where to put it anyway.
2756 	 */
2757 }
2758 
2759 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
2760 				       const struct intel_crtc_state *new_crtc_state)
2761 {
2762 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2763 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2764 
2765 	if (!old_crtc_state->ips_enabled)
2766 		return false;
2767 
2768 	if (intel_crtc_needs_modeset(new_crtc_state))
2769 		return true;
2770 
2771 	/*
2772 	 * Workaround : Do not read or write the pipe palette/gamma data while
2773 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2774 	 *
2775 	 * Disable IPS before we program the LUT.
2776 	 */
2777 	if (IS_HASWELL(dev_priv) &&
2778 	    (new_crtc_state->uapi.color_mgmt_changed ||
2779 	     new_crtc_state->update_pipe) &&
2780 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2781 		return true;
2782 
2783 	return !new_crtc_state->ips_enabled;
2784 }
2785 
2786 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
2787 				       const struct intel_crtc_state *new_crtc_state)
2788 {
2789 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2790 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2791 
2792 	if (!new_crtc_state->ips_enabled)
2793 		return false;
2794 
2795 	if (intel_crtc_needs_modeset(new_crtc_state))
2796 		return true;
2797 
2798 	/*
2799 	 * Workaround : Do not read or write the pipe palette/gamma data while
2800 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2801 	 *
2802 	 * Re-enable IPS after the LUT has been programmed.
2803 	 */
2804 	if (IS_HASWELL(dev_priv) &&
2805 	    (new_crtc_state->uapi.color_mgmt_changed ||
2806 	     new_crtc_state->update_pipe) &&
2807 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2808 		return true;
2809 
2810 	/*
2811 	 * We can't read out IPS on broadwell, assume the worst and
2812 	 * forcibly enable IPS on the first fastset.
2813 	 */
2814 	if (new_crtc_state->update_pipe && old_crtc_state->inherited)
2815 		return true;
2816 
2817 	return !old_crtc_state->ips_enabled;
2818 }
2819 
2820 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
2821 {
2822 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2823 
2824 	if (!crtc_state->nv12_planes)
2825 		return false;
2826 
2827 	/* WA Display #0827: Gen9:all */
2828 	if (DISPLAY_VER(dev_priv) == 9)
2829 		return true;
2830 
2831 	return false;
2832 }
2833 
2834 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
2835 {
2836 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2837 
2838 	/* Wa_2006604312:icl,ehl */
2839 	if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
2840 		return true;
2841 
2842 	return false;
2843 }
2844 
2845 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
2846 			    const struct intel_crtc_state *new_crtc_state)
2847 {
2848 	return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
2849 		new_crtc_state->active_planes;
2850 }
2851 
2852 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
2853 			     const struct intel_crtc_state *new_crtc_state)
2854 {
2855 	return old_crtc_state->active_planes &&
2856 		(!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
2857 }
2858 
2859 static void intel_post_plane_update(struct intel_atomic_state *state,
2860 				    struct intel_crtc *crtc)
2861 {
2862 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2863 	const struct intel_crtc_state *old_crtc_state =
2864 		intel_atomic_get_old_crtc_state(state, crtc);
2865 	const struct intel_crtc_state *new_crtc_state =
2866 		intel_atomic_get_new_crtc_state(state, crtc);
2867 	enum pipe pipe = crtc->pipe;
2868 
2869 	intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
2870 
2871 	if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
2872 		intel_update_watermarks(crtc);
2873 
2874 	if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
2875 		hsw_enable_ips(new_crtc_state);
2876 
2877 	intel_fbc_post_update(state, crtc);
2878 
2879 	if (needs_nv12_wa(old_crtc_state) &&
2880 	    !needs_nv12_wa(new_crtc_state))
2881 		skl_wa_827(dev_priv, pipe, false);
2882 
2883 	if (needs_scalerclk_wa(old_crtc_state) &&
2884 	    !needs_scalerclk_wa(new_crtc_state))
2885 		icl_wa_scalerclkgating(dev_priv, pipe, false);
2886 }
2887 
2888 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
2889 					struct intel_crtc *crtc)
2890 {
2891 	const struct intel_crtc_state *crtc_state =
2892 		intel_atomic_get_new_crtc_state(state, crtc);
2893 	u8 update_planes = crtc_state->update_planes;
2894 	const struct intel_plane_state *plane_state;
2895 	struct intel_plane *plane;
2896 	int i;
2897 
2898 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2899 		if (plane->enable_flip_done &&
2900 		    plane->pipe == crtc->pipe &&
2901 		    update_planes & BIT(plane->id))
2902 			plane->enable_flip_done(plane);
2903 	}
2904 }
2905 
2906 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
2907 					 struct intel_crtc *crtc)
2908 {
2909 	const struct intel_crtc_state *crtc_state =
2910 		intel_atomic_get_new_crtc_state(state, crtc);
2911 	u8 update_planes = crtc_state->update_planes;
2912 	const struct intel_plane_state *plane_state;
2913 	struct intel_plane *plane;
2914 	int i;
2915 
2916 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2917 		if (plane->disable_flip_done &&
2918 		    plane->pipe == crtc->pipe &&
2919 		    update_planes & BIT(plane->id))
2920 			plane->disable_flip_done(plane);
2921 	}
2922 }
2923 
2924 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
2925 					     struct intel_crtc *crtc)
2926 {
2927 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2928 	const struct intel_crtc_state *old_crtc_state =
2929 		intel_atomic_get_old_crtc_state(state, crtc);
2930 	const struct intel_crtc_state *new_crtc_state =
2931 		intel_atomic_get_new_crtc_state(state, crtc);
2932 	u8 update_planes = new_crtc_state->update_planes;
2933 	const struct intel_plane_state *old_plane_state;
2934 	struct intel_plane *plane;
2935 	bool need_vbl_wait = false;
2936 	int i;
2937 
2938 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
2939 		if (plane->need_async_flip_disable_wa &&
2940 		    plane->pipe == crtc->pipe &&
2941 		    update_planes & BIT(plane->id)) {
2942 			/*
2943 			 * Apart from the async flip bit we want to
2944 			 * preserve the old state for the plane.
2945 			 */
2946 			plane->async_flip(plane, old_crtc_state,
2947 					  old_plane_state, false);
2948 			need_vbl_wait = true;
2949 		}
2950 	}
2951 
2952 	if (need_vbl_wait)
2953 		intel_wait_for_vblank(i915, crtc->pipe);
2954 }
2955 
2956 static void intel_pre_plane_update(struct intel_atomic_state *state,
2957 				   struct intel_crtc *crtc)
2958 {
2959 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2960 	const struct intel_crtc_state *old_crtc_state =
2961 		intel_atomic_get_old_crtc_state(state, crtc);
2962 	const struct intel_crtc_state *new_crtc_state =
2963 		intel_atomic_get_new_crtc_state(state, crtc);
2964 	enum pipe pipe = crtc->pipe;
2965 
2966 	if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
2967 		hsw_disable_ips(old_crtc_state);
2968 
2969 	if (intel_fbc_pre_update(state, crtc))
2970 		intel_wait_for_vblank(dev_priv, pipe);
2971 
2972 	/* Display WA 827 */
2973 	if (!needs_nv12_wa(old_crtc_state) &&
2974 	    needs_nv12_wa(new_crtc_state))
2975 		skl_wa_827(dev_priv, pipe, true);
2976 
2977 	/* Wa_2006604312:icl,ehl */
2978 	if (!needs_scalerclk_wa(old_crtc_state) &&
2979 	    needs_scalerclk_wa(new_crtc_state))
2980 		icl_wa_scalerclkgating(dev_priv, pipe, true);
2981 
2982 	/*
2983 	 * Vblank time updates from the shadow to live plane control register
2984 	 * are blocked if the memory self-refresh mode is active at that
2985 	 * moment. So to make sure the plane gets truly disabled, disable
2986 	 * first the self-refresh mode. The self-refresh enable bit in turn
2987 	 * will be checked/applied by the HW only at the next frame start
2988 	 * event which is after the vblank start event, so we need to have a
2989 	 * wait-for-vblank between disabling the plane and the pipe.
2990 	 */
2991 	if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
2992 	    new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
2993 		intel_wait_for_vblank(dev_priv, pipe);
2994 
2995 	/*
2996 	 * IVB workaround: must disable low power watermarks for at least
2997 	 * one frame before enabling scaling.  LP watermarks can be re-enabled
2998 	 * when scaling is disabled.
2999 	 *
3000 	 * WaCxSRDisabledForSpriteScaling:ivb
3001 	 */
3002 	if (old_crtc_state->hw.active &&
3003 	    new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
3004 		intel_wait_for_vblank(dev_priv, pipe);
3005 
3006 	/*
3007 	 * If we're doing a modeset we don't need to do any
3008 	 * pre-vblank watermark programming here.
3009 	 */
3010 	if (!intel_crtc_needs_modeset(new_crtc_state)) {
3011 		/*
3012 		 * For platforms that support atomic watermarks, program the
3013 		 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
3014 		 * will be the intermediate values that are safe for both pre- and
3015 		 * post- vblank; when vblank happens, the 'active' values will be set
3016 		 * to the final 'target' values and we'll do this again to get the
3017 		 * optimal watermarks.  For gen9+ platforms, the values we program here
3018 		 * will be the final target values which will get automatically latched
3019 		 * at vblank time; no further programming will be necessary.
3020 		 *
3021 		 * If a platform hasn't been transitioned to atomic watermarks yet,
3022 		 * we'll continue to update watermarks the old way, if flags tell
3023 		 * us to.
3024 		 */
3025 		if (dev_priv->display.initial_watermarks)
3026 			dev_priv->display.initial_watermarks(state, crtc);
3027 		else if (new_crtc_state->update_wm_pre)
3028 			intel_update_watermarks(crtc);
3029 	}
3030 
3031 	/*
3032 	 * Gen2 reports pipe underruns whenever all planes are disabled.
3033 	 * So disable underrun reporting before all the planes get disabled.
3034 	 *
3035 	 * We do this after .initial_watermarks() so that we have a
3036 	 * chance of catching underruns with the intermediate watermarks
3037 	 * vs. the old plane configuration.
3038 	 */
3039 	if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
3040 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3041 
3042 	/*
3043 	 * WA for platforms where async address update enable bit
3044 	 * is double buffered and only latched at start of vblank.
3045 	 */
3046 	if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
3047 		intel_crtc_async_flip_disable_wa(state, crtc);
3048 }
3049 
3050 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
3051 				      struct intel_crtc *crtc)
3052 {
3053 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3054 	const struct intel_crtc_state *new_crtc_state =
3055 		intel_atomic_get_new_crtc_state(state, crtc);
3056 	unsigned int update_mask = new_crtc_state->update_planes;
3057 	const struct intel_plane_state *old_plane_state;
3058 	struct intel_plane *plane;
3059 	unsigned fb_bits = 0;
3060 	int i;
3061 
3062 	intel_crtc_dpms_overlay_disable(crtc);
3063 
3064 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
3065 		if (crtc->pipe != plane->pipe ||
3066 		    !(update_mask & BIT(plane->id)))
3067 			continue;
3068 
3069 		intel_disable_plane(plane, new_crtc_state);
3070 
3071 		if (old_plane_state->uapi.visible)
3072 			fb_bits |= plane->frontbuffer_bit;
3073 	}
3074 
3075 	intel_frontbuffer_flip(dev_priv, fb_bits);
3076 }
3077 
3078 /*
3079  * intel_connector_primary_encoder - get the primary encoder for a connector
3080  * @connector: connector for which to return the encoder
3081  *
3082  * Returns the primary encoder for a connector. There is a 1:1 mapping from
3083  * all connectors to their encoder, except for DP-MST connectors which have
3084  * both a virtual and a primary encoder. These DP-MST primary encoders can be
3085  * pointed to by as many DP-MST connectors as there are pipes.
3086  */
3087 static struct intel_encoder *
3088 intel_connector_primary_encoder(struct intel_connector *connector)
3089 {
3090 	struct intel_encoder *encoder;
3091 
3092 	if (connector->mst_port)
3093 		return &dp_to_dig_port(connector->mst_port)->base;
3094 
3095 	encoder = intel_attached_encoder(connector);
3096 	drm_WARN_ON(connector->base.dev, !encoder);
3097 
3098 	return encoder;
3099 }
3100 
3101 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
3102 {
3103 	struct drm_connector_state *new_conn_state;
3104 	struct drm_connector *connector;
3105 	int i;
3106 
3107 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
3108 					i) {
3109 		struct intel_connector *intel_connector;
3110 		struct intel_encoder *encoder;
3111 		struct intel_crtc *crtc;
3112 
3113 		if (!intel_connector_needs_modeset(state, connector))
3114 			continue;
3115 
3116 		intel_connector = to_intel_connector(connector);
3117 		encoder = intel_connector_primary_encoder(intel_connector);
3118 		if (!encoder->update_prepare)
3119 			continue;
3120 
3121 		crtc = new_conn_state->crtc ?
3122 			to_intel_crtc(new_conn_state->crtc) : NULL;
3123 		encoder->update_prepare(state, encoder, crtc);
3124 	}
3125 }
3126 
3127 static void intel_encoders_update_complete(struct intel_atomic_state *state)
3128 {
3129 	struct drm_connector_state *new_conn_state;
3130 	struct drm_connector *connector;
3131 	int i;
3132 
3133 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
3134 					i) {
3135 		struct intel_connector *intel_connector;
3136 		struct intel_encoder *encoder;
3137 		struct intel_crtc *crtc;
3138 
3139 		if (!intel_connector_needs_modeset(state, connector))
3140 			continue;
3141 
3142 		intel_connector = to_intel_connector(connector);
3143 		encoder = intel_connector_primary_encoder(intel_connector);
3144 		if (!encoder->update_complete)
3145 			continue;
3146 
3147 		crtc = new_conn_state->crtc ?
3148 			to_intel_crtc(new_conn_state->crtc) : NULL;
3149 		encoder->update_complete(state, encoder, crtc);
3150 	}
3151 }
3152 
3153 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
3154 					  struct intel_crtc *crtc)
3155 {
3156 	const struct intel_crtc_state *crtc_state =
3157 		intel_atomic_get_new_crtc_state(state, crtc);
3158 	const struct drm_connector_state *conn_state;
3159 	struct drm_connector *conn;
3160 	int i;
3161 
3162 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3163 		struct intel_encoder *encoder =
3164 			to_intel_encoder(conn_state->best_encoder);
3165 
3166 		if (conn_state->crtc != &crtc->base)
3167 			continue;
3168 
3169 		if (encoder->pre_pll_enable)
3170 			encoder->pre_pll_enable(state, encoder,
3171 						crtc_state, conn_state);
3172 	}
3173 }
3174 
3175 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
3176 				      struct intel_crtc *crtc)
3177 {
3178 	const struct intel_crtc_state *crtc_state =
3179 		intel_atomic_get_new_crtc_state(state, crtc);
3180 	const struct drm_connector_state *conn_state;
3181 	struct drm_connector *conn;
3182 	int i;
3183 
3184 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3185 		struct intel_encoder *encoder =
3186 			to_intel_encoder(conn_state->best_encoder);
3187 
3188 		if (conn_state->crtc != &crtc->base)
3189 			continue;
3190 
3191 		if (encoder->pre_enable)
3192 			encoder->pre_enable(state, encoder,
3193 					    crtc_state, conn_state);
3194 	}
3195 }
3196 
3197 static void intel_encoders_enable(struct intel_atomic_state *state,
3198 				  struct intel_crtc *crtc)
3199 {
3200 	const struct intel_crtc_state *crtc_state =
3201 		intel_atomic_get_new_crtc_state(state, crtc);
3202 	const struct drm_connector_state *conn_state;
3203 	struct drm_connector *conn;
3204 	int i;
3205 
3206 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3207 		struct intel_encoder *encoder =
3208 			to_intel_encoder(conn_state->best_encoder);
3209 
3210 		if (conn_state->crtc != &crtc->base)
3211 			continue;
3212 
3213 		if (encoder->enable)
3214 			encoder->enable(state, encoder,
3215 					crtc_state, conn_state);
3216 		intel_opregion_notify_encoder(encoder, true);
3217 	}
3218 }
3219 
3220 static void intel_encoders_pre_disable(struct intel_atomic_state *state,
3221 				       struct intel_crtc *crtc)
3222 {
3223 	const struct intel_crtc_state *old_crtc_state =
3224 		intel_atomic_get_old_crtc_state(state, crtc);
3225 	const struct drm_connector_state *old_conn_state;
3226 	struct drm_connector *conn;
3227 	int i;
3228 
3229 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3230 		struct intel_encoder *encoder =
3231 			to_intel_encoder(old_conn_state->best_encoder);
3232 
3233 		if (old_conn_state->crtc != &crtc->base)
3234 			continue;
3235 
3236 		if (encoder->pre_disable)
3237 			encoder->pre_disable(state, encoder, old_crtc_state,
3238 					     old_conn_state);
3239 	}
3240 }
3241 
3242 static void intel_encoders_disable(struct intel_atomic_state *state,
3243 				   struct intel_crtc *crtc)
3244 {
3245 	const struct intel_crtc_state *old_crtc_state =
3246 		intel_atomic_get_old_crtc_state(state, crtc);
3247 	const struct drm_connector_state *old_conn_state;
3248 	struct drm_connector *conn;
3249 	int i;
3250 
3251 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3252 		struct intel_encoder *encoder =
3253 			to_intel_encoder(old_conn_state->best_encoder);
3254 
3255 		if (old_conn_state->crtc != &crtc->base)
3256 			continue;
3257 
3258 		intel_opregion_notify_encoder(encoder, false);
3259 		if (encoder->disable)
3260 			encoder->disable(state, encoder,
3261 					 old_crtc_state, old_conn_state);
3262 	}
3263 }
3264 
3265 static void intel_encoders_post_disable(struct intel_atomic_state *state,
3266 					struct intel_crtc *crtc)
3267 {
3268 	const struct intel_crtc_state *old_crtc_state =
3269 		intel_atomic_get_old_crtc_state(state, crtc);
3270 	const struct drm_connector_state *old_conn_state;
3271 	struct drm_connector *conn;
3272 	int i;
3273 
3274 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3275 		struct intel_encoder *encoder =
3276 			to_intel_encoder(old_conn_state->best_encoder);
3277 
3278 		if (old_conn_state->crtc != &crtc->base)
3279 			continue;
3280 
3281 		if (encoder->post_disable)
3282 			encoder->post_disable(state, encoder,
3283 					      old_crtc_state, old_conn_state);
3284 	}
3285 }
3286 
3287 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
3288 					    struct intel_crtc *crtc)
3289 {
3290 	const struct intel_crtc_state *old_crtc_state =
3291 		intel_atomic_get_old_crtc_state(state, crtc);
3292 	const struct drm_connector_state *old_conn_state;
3293 	struct drm_connector *conn;
3294 	int i;
3295 
3296 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3297 		struct intel_encoder *encoder =
3298 			to_intel_encoder(old_conn_state->best_encoder);
3299 
3300 		if (old_conn_state->crtc != &crtc->base)
3301 			continue;
3302 
3303 		if (encoder->post_pll_disable)
3304 			encoder->post_pll_disable(state, encoder,
3305 						  old_crtc_state, old_conn_state);
3306 	}
3307 }
3308 
3309 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
3310 				       struct intel_crtc *crtc)
3311 {
3312 	const struct intel_crtc_state *crtc_state =
3313 		intel_atomic_get_new_crtc_state(state, crtc);
3314 	const struct drm_connector_state *conn_state;
3315 	struct drm_connector *conn;
3316 	int i;
3317 
3318 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3319 		struct intel_encoder *encoder =
3320 			to_intel_encoder(conn_state->best_encoder);
3321 
3322 		if (conn_state->crtc != &crtc->base)
3323 			continue;
3324 
3325 		if (encoder->update_pipe)
3326 			encoder->update_pipe(state, encoder,
3327 					     crtc_state, conn_state);
3328 	}
3329 }
3330 
3331 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
3332 {
3333 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3334 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3335 
3336 	plane->disable_plane(plane, crtc_state);
3337 }
3338 
3339 static void ilk_crtc_enable(struct intel_atomic_state *state,
3340 			    struct intel_crtc *crtc)
3341 {
3342 	const struct intel_crtc_state *new_crtc_state =
3343 		intel_atomic_get_new_crtc_state(state, crtc);
3344 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3345 	enum pipe pipe = crtc->pipe;
3346 
3347 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3348 		return;
3349 
3350 	/*
3351 	 * Sometimes spurious CPU pipe underruns happen during FDI
3352 	 * training, at least with VGA+HDMI cloning. Suppress them.
3353 	 *
3354 	 * On ILK we get an occasional spurious CPU pipe underruns
3355 	 * between eDP port A enable and vdd enable. Also PCH port
3356 	 * enable seems to result in the occasional CPU pipe underrun.
3357 	 *
3358 	 * Spurious PCH underruns also occur during PCH enabling.
3359 	 */
3360 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3361 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
3362 
3363 	if (new_crtc_state->has_pch_encoder)
3364 		intel_prepare_shared_dpll(new_crtc_state);
3365 
3366 	if (intel_crtc_has_dp_encoder(new_crtc_state))
3367 		intel_dp_set_m_n(new_crtc_state, M1_N1);
3368 
3369 	intel_set_transcoder_timings(new_crtc_state);
3370 	intel_set_pipe_src_size(new_crtc_state);
3371 
3372 	if (new_crtc_state->has_pch_encoder)
3373 		intel_cpu_transcoder_set_m_n(new_crtc_state,
3374 					     &new_crtc_state->fdi_m_n, NULL);
3375 
3376 	ilk_set_pipeconf(new_crtc_state);
3377 
3378 	crtc->active = true;
3379 
3380 	intel_encoders_pre_enable(state, crtc);
3381 
3382 	if (new_crtc_state->has_pch_encoder) {
3383 		/* Note: FDI PLL enabling _must_ be done before we enable the
3384 		 * cpu pipes, hence this is separate from all the other fdi/pch
3385 		 * enabling. */
3386 		ilk_fdi_pll_enable(new_crtc_state);
3387 	} else {
3388 		assert_fdi_tx_disabled(dev_priv, pipe);
3389 		assert_fdi_rx_disabled(dev_priv, pipe);
3390 	}
3391 
3392 	ilk_pfit_enable(new_crtc_state);
3393 
3394 	/*
3395 	 * On ILK+ LUT must be loaded before the pipe is running but with
3396 	 * clocks enabled
3397 	 */
3398 	intel_color_load_luts(new_crtc_state);
3399 	intel_color_commit(new_crtc_state);
3400 	/* update DSPCNTR to configure gamma for pipe bottom color */
3401 	intel_disable_primary_plane(new_crtc_state);
3402 
3403 	if (dev_priv->display.initial_watermarks)
3404 		dev_priv->display.initial_watermarks(state, crtc);
3405 	intel_enable_pipe(new_crtc_state);
3406 
3407 	if (new_crtc_state->has_pch_encoder)
3408 		ilk_pch_enable(state, new_crtc_state);
3409 
3410 	intel_crtc_vblank_on(new_crtc_state);
3411 
3412 	intel_encoders_enable(state, crtc);
3413 
3414 	if (HAS_PCH_CPT(dev_priv))
3415 		cpt_verify_modeset(dev_priv, pipe);
3416 
3417 	/*
3418 	 * Must wait for vblank to avoid spurious PCH FIFO underruns.
3419 	 * And a second vblank wait is needed at least on ILK with
3420 	 * some interlaced HDMI modes. Let's do the double wait always
3421 	 * in case there are more corner cases we don't know about.
3422 	 */
3423 	if (new_crtc_state->has_pch_encoder) {
3424 		intel_wait_for_vblank(dev_priv, pipe);
3425 		intel_wait_for_vblank(dev_priv, pipe);
3426 	}
3427 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3428 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3429 }
3430 
3431 /* IPS only exists on ULT machines and is tied to pipe A. */
3432 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3433 {
3434 	return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
3435 }
3436 
3437 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
3438 					    enum pipe pipe, bool apply)
3439 {
3440 	u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
3441 	u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
3442 
3443 	if (apply)
3444 		val |= mask;
3445 	else
3446 		val &= ~mask;
3447 
3448 	intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
3449 }
3450 
3451 static void icl_pipe_mbus_enable(struct intel_crtc *crtc, bool joined_mbus)
3452 {
3453 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3454 	enum pipe pipe = crtc->pipe;
3455 	u32 val;
3456 
3457 	/* Wa_22010947358:adl-p */
3458 	if (IS_ALDERLAKE_P(dev_priv))
3459 		val = joined_mbus ? MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
3460 	else
3461 		val = MBUS_DBOX_A_CREDIT(2);
3462 
3463 	if (DISPLAY_VER(dev_priv) >= 12) {
3464 		val |= MBUS_DBOX_BW_CREDIT(2);
3465 		val |= MBUS_DBOX_B_CREDIT(12);
3466 	} else {
3467 		val |= MBUS_DBOX_BW_CREDIT(1);
3468 		val |= MBUS_DBOX_B_CREDIT(8);
3469 	}
3470 
3471 	intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
3472 }
3473 
3474 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
3475 {
3476 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3477 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3478 
3479 	intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
3480 		       HSW_LINETIME(crtc_state->linetime) |
3481 		       HSW_IPS_LINETIME(crtc_state->ips_linetime));
3482 }
3483 
3484 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
3485 {
3486 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3487 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3488 	i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
3489 	u32 val;
3490 
3491 	val = intel_de_read(dev_priv, reg);
3492 	val &= ~HSW_FRAME_START_DELAY_MASK;
3493 	val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3494 	intel_de_write(dev_priv, reg, val);
3495 }
3496 
3497 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
3498 					 const struct intel_crtc_state *crtc_state)
3499 {
3500 	struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
3501 	struct drm_i915_private *dev_priv = to_i915(master->base.dev);
3502 	struct intel_crtc_state *master_crtc_state;
3503 	struct drm_connector_state *conn_state;
3504 	struct drm_connector *conn;
3505 	struct intel_encoder *encoder = NULL;
3506 	int i;
3507 
3508 	if (crtc_state->bigjoiner_slave)
3509 		master = crtc_state->bigjoiner_linked_crtc;
3510 
3511 	master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
3512 
3513 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3514 		if (conn_state->crtc != &master->base)
3515 			continue;
3516 
3517 		encoder = to_intel_encoder(conn_state->best_encoder);
3518 		break;
3519 	}
3520 
3521 	if (!crtc_state->bigjoiner_slave) {
3522 		/* need to enable VDSC, which we skipped in pre-enable */
3523 		intel_dsc_enable(encoder, crtc_state);
3524 	} else {
3525 		/*
3526 		 * Enable sequence steps 1-7 on bigjoiner master
3527 		 */
3528 		intel_encoders_pre_pll_enable(state, master);
3529 		if (master_crtc_state->shared_dpll)
3530 			intel_enable_shared_dpll(master_crtc_state);
3531 		intel_encoders_pre_enable(state, master);
3532 
3533 		/* and DSC on slave */
3534 		intel_dsc_enable(NULL, crtc_state);
3535 	}
3536 
3537 	if (DISPLAY_VER(dev_priv) >= 13)
3538 		intel_uncompressed_joiner_enable(crtc_state);
3539 }
3540 
3541 static void hsw_crtc_enable(struct intel_atomic_state *state,
3542 			    struct intel_crtc *crtc)
3543 {
3544 	const struct intel_crtc_state *new_crtc_state =
3545 		intel_atomic_get_new_crtc_state(state, crtc);
3546 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3547 	enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
3548 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3549 	bool psl_clkgate_wa;
3550 
3551 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3552 		return;
3553 
3554 	if (!new_crtc_state->bigjoiner) {
3555 		intel_encoders_pre_pll_enable(state, crtc);
3556 
3557 		if (new_crtc_state->shared_dpll)
3558 			intel_enable_shared_dpll(new_crtc_state);
3559 
3560 		intel_encoders_pre_enable(state, crtc);
3561 	} else {
3562 		icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
3563 	}
3564 
3565 	intel_set_pipe_src_size(new_crtc_state);
3566 	if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
3567 		bdw_set_pipemisc(new_crtc_state);
3568 
3569 	if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
3570 		intel_set_transcoder_timings(new_crtc_state);
3571 
3572 		if (cpu_transcoder != TRANSCODER_EDP)
3573 			intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
3574 				       new_crtc_state->pixel_multiplier - 1);
3575 
3576 		if (new_crtc_state->has_pch_encoder)
3577 			intel_cpu_transcoder_set_m_n(new_crtc_state,
3578 						     &new_crtc_state->fdi_m_n, NULL);
3579 
3580 		hsw_set_frame_start_delay(new_crtc_state);
3581 	}
3582 
3583 	if (!transcoder_is_dsi(cpu_transcoder))
3584 		hsw_set_pipeconf(new_crtc_state);
3585 
3586 	crtc->active = true;
3587 
3588 	/* Display WA #1180: WaDisableScalarClockGating: glk */
3589 	psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
3590 		new_crtc_state->pch_pfit.enabled;
3591 	if (psl_clkgate_wa)
3592 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
3593 
3594 	if (DISPLAY_VER(dev_priv) >= 9)
3595 		skl_pfit_enable(new_crtc_state);
3596 	else
3597 		ilk_pfit_enable(new_crtc_state);
3598 
3599 	/*
3600 	 * On ILK+ LUT must be loaded before the pipe is running but with
3601 	 * clocks enabled
3602 	 */
3603 	intel_color_load_luts(new_crtc_state);
3604 	intel_color_commit(new_crtc_state);
3605 	/* update DSPCNTR to configure gamma/csc for pipe bottom color */
3606 	if (DISPLAY_VER(dev_priv) < 9)
3607 		intel_disable_primary_plane(new_crtc_state);
3608 
3609 	hsw_set_linetime_wm(new_crtc_state);
3610 
3611 	if (DISPLAY_VER(dev_priv) >= 11)
3612 		icl_set_pipe_chicken(new_crtc_state);
3613 
3614 	if (dev_priv->display.initial_watermarks)
3615 		dev_priv->display.initial_watermarks(state, crtc);
3616 
3617 	if (DISPLAY_VER(dev_priv) >= 11) {
3618 		const struct intel_dbuf_state *dbuf_state =
3619 				intel_atomic_get_new_dbuf_state(state);
3620 
3621 		icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus);
3622 	}
3623 
3624 	if (new_crtc_state->bigjoiner_slave)
3625 		intel_crtc_vblank_on(new_crtc_state);
3626 
3627 	intel_encoders_enable(state, crtc);
3628 
3629 	if (psl_clkgate_wa) {
3630 		intel_wait_for_vblank(dev_priv, pipe);
3631 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
3632 	}
3633 
3634 	/* If we change the relative order between pipe/planes enabling, we need
3635 	 * to change the workaround. */
3636 	hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
3637 	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
3638 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3639 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3640 	}
3641 }
3642 
3643 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3644 {
3645 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3646 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3647 	enum pipe pipe = crtc->pipe;
3648 
3649 	/* To avoid upsetting the power well on haswell only disable the pfit if
3650 	 * it's in use. The hw state code will make sure we get this right. */
3651 	if (!old_crtc_state->pch_pfit.enabled)
3652 		return;
3653 
3654 	intel_de_write(dev_priv, PF_CTL(pipe), 0);
3655 	intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
3656 	intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
3657 }
3658 
3659 static void ilk_crtc_disable(struct intel_atomic_state *state,
3660 			     struct intel_crtc *crtc)
3661 {
3662 	const struct intel_crtc_state *old_crtc_state =
3663 		intel_atomic_get_old_crtc_state(state, crtc);
3664 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3665 	enum pipe pipe = crtc->pipe;
3666 
3667 	/*
3668 	 * Sometimes spurious CPU pipe underruns happen when the
3669 	 * pipe is already disabled, but FDI RX/TX is still enabled.
3670 	 * Happens at least with VGA+HDMI cloning. Suppress them.
3671 	 */
3672 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3673 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
3674 
3675 	intel_encoders_disable(state, crtc);
3676 
3677 	intel_crtc_vblank_off(old_crtc_state);
3678 
3679 	intel_disable_pipe(old_crtc_state);
3680 
3681 	ilk_pfit_disable(old_crtc_state);
3682 
3683 	if (old_crtc_state->has_pch_encoder)
3684 		ilk_fdi_disable(crtc);
3685 
3686 	intel_encoders_post_disable(state, crtc);
3687 
3688 	if (old_crtc_state->has_pch_encoder) {
3689 		ilk_disable_pch_transcoder(dev_priv, pipe);
3690 
3691 		if (HAS_PCH_CPT(dev_priv)) {
3692 			i915_reg_t reg;
3693 			u32 temp;
3694 
3695 			/* disable TRANS_DP_CTL */
3696 			reg = TRANS_DP_CTL(pipe);
3697 			temp = intel_de_read(dev_priv, reg);
3698 			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
3699 				  TRANS_DP_PORT_SEL_MASK);
3700 			temp |= TRANS_DP_PORT_SEL_NONE;
3701 			intel_de_write(dev_priv, reg, temp);
3702 
3703 			/* disable DPLL_SEL */
3704 			temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
3705 			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
3706 			intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
3707 		}
3708 
3709 		ilk_fdi_pll_disable(crtc);
3710 	}
3711 
3712 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3713 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3714 }
3715 
3716 static void hsw_crtc_disable(struct intel_atomic_state *state,
3717 			     struct intel_crtc *crtc)
3718 {
3719 	/*
3720 	 * FIXME collapse everything to one hook.
3721 	 * Need care with mst->ddi interactions.
3722 	 */
3723 	intel_encoders_disable(state, crtc);
3724 	intel_encoders_post_disable(state, crtc);
3725 }
3726 
3727 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
3728 {
3729 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3730 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3731 
3732 	if (!crtc_state->gmch_pfit.control)
3733 		return;
3734 
3735 	/*
3736 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
3737 	 * according to register description and PRM.
3738 	 */
3739 	drm_WARN_ON(&dev_priv->drm,
3740 		    intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
3741 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
3742 
3743 	intel_de_write(dev_priv, PFIT_PGM_RATIOS,
3744 		       crtc_state->gmch_pfit.pgm_ratios);
3745 	intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
3746 
3747 	/* Border color in case we don't scale up to the full screen. Black by
3748 	 * default, change to something else for debugging. */
3749 	intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
3750 }
3751 
3752 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
3753 {
3754 	if (phy == PHY_NONE)
3755 		return false;
3756 	else if (IS_DG2(dev_priv))
3757 		/*
3758 		 * DG2 outputs labelled as "combo PHY" in the bspec use
3759 		 * SNPS PHYs with completely different programming,
3760 		 * hence we always return false here.
3761 		 */
3762 		return false;
3763 	else if (IS_ALDERLAKE_S(dev_priv))
3764 		return phy <= PHY_E;
3765 	else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
3766 		return phy <= PHY_D;
3767 	else if (IS_JSL_EHL(dev_priv))
3768 		return phy <= PHY_C;
3769 	else if (DISPLAY_VER(dev_priv) >= 11)
3770 		return phy <= PHY_B;
3771 	else
3772 		return false;
3773 }
3774 
3775 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
3776 {
3777 	if (IS_DG2(dev_priv))
3778 		/* DG2's "TC1" output uses a SNPS PHY */
3779 		return false;
3780 	else if (IS_ALDERLAKE_P(dev_priv))
3781 		return phy >= PHY_F && phy <= PHY_I;
3782 	else if (IS_TIGERLAKE(dev_priv))
3783 		return phy >= PHY_D && phy <= PHY_I;
3784 	else if (IS_ICELAKE(dev_priv))
3785 		return phy >= PHY_C && phy <= PHY_F;
3786 	else
3787 		return false;
3788 }
3789 
3790 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
3791 {
3792 	if (phy == PHY_NONE)
3793 		return false;
3794 	else if (IS_DG2(dev_priv))
3795 		/*
3796 		 * All four "combo" ports and the TC1 port (PHY E) use
3797 		 * Synopsis PHYs.
3798 		 */
3799 		return phy <= PHY_E;
3800 
3801 	return false;
3802 }
3803 
3804 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
3805 {
3806 	if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
3807 		return PHY_D + port - PORT_D_XELPD;
3808 	else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
3809 		return PHY_F + port - PORT_TC1;
3810 	else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
3811 		return PHY_B + port - PORT_TC1;
3812 	else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
3813 		return PHY_C + port - PORT_TC1;
3814 	else if (IS_JSL_EHL(i915) && port == PORT_D)
3815 		return PHY_A;
3816 
3817 	return PHY_A + port - PORT_A;
3818 }
3819 
3820 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
3821 {
3822 	if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
3823 		return TC_PORT_NONE;
3824 
3825 	if (DISPLAY_VER(dev_priv) >= 12)
3826 		return TC_PORT_1 + port - PORT_TC1;
3827 	else
3828 		return TC_PORT_1 + port - PORT_C;
3829 }
3830 
3831 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
3832 {
3833 	switch (port) {
3834 	case PORT_A:
3835 		return POWER_DOMAIN_PORT_DDI_A_LANES;
3836 	case PORT_B:
3837 		return POWER_DOMAIN_PORT_DDI_B_LANES;
3838 	case PORT_C:
3839 		return POWER_DOMAIN_PORT_DDI_C_LANES;
3840 	case PORT_D:
3841 		return POWER_DOMAIN_PORT_DDI_D_LANES;
3842 	case PORT_E:
3843 		return POWER_DOMAIN_PORT_DDI_E_LANES;
3844 	case PORT_F:
3845 		return POWER_DOMAIN_PORT_DDI_F_LANES;
3846 	case PORT_G:
3847 		return POWER_DOMAIN_PORT_DDI_G_LANES;
3848 	case PORT_H:
3849 		return POWER_DOMAIN_PORT_DDI_H_LANES;
3850 	case PORT_I:
3851 		return POWER_DOMAIN_PORT_DDI_I_LANES;
3852 	default:
3853 		MISSING_CASE(port);
3854 		return POWER_DOMAIN_PORT_OTHER;
3855 	}
3856 }
3857 
3858 enum intel_display_power_domain
3859 intel_aux_power_domain(struct intel_digital_port *dig_port)
3860 {
3861 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3862 	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
3863 
3864 	if (intel_phy_is_tc(dev_priv, phy) &&
3865 	    dig_port->tc_mode == TC_PORT_TBT_ALT) {
3866 		switch (dig_port->aux_ch) {
3867 		case AUX_CH_C:
3868 			return POWER_DOMAIN_AUX_C_TBT;
3869 		case AUX_CH_D:
3870 			return POWER_DOMAIN_AUX_D_TBT;
3871 		case AUX_CH_E:
3872 			return POWER_DOMAIN_AUX_E_TBT;
3873 		case AUX_CH_F:
3874 			return POWER_DOMAIN_AUX_F_TBT;
3875 		case AUX_CH_G:
3876 			return POWER_DOMAIN_AUX_G_TBT;
3877 		case AUX_CH_H:
3878 			return POWER_DOMAIN_AUX_H_TBT;
3879 		case AUX_CH_I:
3880 			return POWER_DOMAIN_AUX_I_TBT;
3881 		default:
3882 			MISSING_CASE(dig_port->aux_ch);
3883 			return POWER_DOMAIN_AUX_C_TBT;
3884 		}
3885 	}
3886 
3887 	return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
3888 }
3889 
3890 /*
3891  * Converts aux_ch to power_domain without caring about TBT ports for that use
3892  * intel_aux_power_domain()
3893  */
3894 enum intel_display_power_domain
3895 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
3896 {
3897 	switch (aux_ch) {
3898 	case AUX_CH_A:
3899 		return POWER_DOMAIN_AUX_A;
3900 	case AUX_CH_B:
3901 		return POWER_DOMAIN_AUX_B;
3902 	case AUX_CH_C:
3903 		return POWER_DOMAIN_AUX_C;
3904 	case AUX_CH_D:
3905 		return POWER_DOMAIN_AUX_D;
3906 	case AUX_CH_E:
3907 		return POWER_DOMAIN_AUX_E;
3908 	case AUX_CH_F:
3909 		return POWER_DOMAIN_AUX_F;
3910 	case AUX_CH_G:
3911 		return POWER_DOMAIN_AUX_G;
3912 	case AUX_CH_H:
3913 		return POWER_DOMAIN_AUX_H;
3914 	case AUX_CH_I:
3915 		return POWER_DOMAIN_AUX_I;
3916 	default:
3917 		MISSING_CASE(aux_ch);
3918 		return POWER_DOMAIN_AUX_A;
3919 	}
3920 }
3921 
3922 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3923 {
3924 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3925 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3926 	struct drm_encoder *encoder;
3927 	enum pipe pipe = crtc->pipe;
3928 	u64 mask;
3929 	enum transcoder transcoder = crtc_state->cpu_transcoder;
3930 
3931 	if (!crtc_state->hw.active)
3932 		return 0;
3933 
3934 	mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
3935 	mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
3936 	if (crtc_state->pch_pfit.enabled ||
3937 	    crtc_state->pch_pfit.force_thru)
3938 		mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
3939 
3940 	drm_for_each_encoder_mask(encoder, &dev_priv->drm,
3941 				  crtc_state->uapi.encoder_mask) {
3942 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3943 
3944 		mask |= BIT_ULL(intel_encoder->power_domain);
3945 	}
3946 
3947 	if (HAS_DDI(dev_priv) && crtc_state->has_audio)
3948 		mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
3949 
3950 	if (crtc_state->shared_dpll)
3951 		mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
3952 
3953 	if (crtc_state->dsc.compression_enable)
3954 		mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
3955 
3956 	return mask;
3957 }
3958 
3959 static u64
3960 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3961 {
3962 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3963 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3964 	enum intel_display_power_domain domain;
3965 	u64 domains, new_domains, old_domains;
3966 
3967 	domains = get_crtc_power_domains(crtc_state);
3968 
3969 	new_domains = domains & ~crtc->enabled_power_domains.mask;
3970 	old_domains = crtc->enabled_power_domains.mask & ~domains;
3971 
3972 	for_each_power_domain(domain, new_domains)
3973 		intel_display_power_get_in_set(dev_priv,
3974 					       &crtc->enabled_power_domains,
3975 					       domain);
3976 
3977 	return old_domains;
3978 }
3979 
3980 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
3981 					   u64 domains)
3982 {
3983 	intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
3984 					    &crtc->enabled_power_domains,
3985 					    domains);
3986 }
3987 
3988 static void valleyview_crtc_enable(struct intel_atomic_state *state,
3989 				   struct intel_crtc *crtc)
3990 {
3991 	const struct intel_crtc_state *new_crtc_state =
3992 		intel_atomic_get_new_crtc_state(state, crtc);
3993 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3994 	enum pipe pipe = crtc->pipe;
3995 
3996 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3997 		return;
3998 
3999 	if (intel_crtc_has_dp_encoder(new_crtc_state))
4000 		intel_dp_set_m_n(new_crtc_state, M1_N1);
4001 
4002 	intel_set_transcoder_timings(new_crtc_state);
4003 	intel_set_pipe_src_size(new_crtc_state);
4004 
4005 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
4006 		intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
4007 		intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
4008 	}
4009 
4010 	i9xx_set_pipeconf(new_crtc_state);
4011 
4012 	crtc->active = true;
4013 
4014 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4015 
4016 	intel_encoders_pre_pll_enable(state, crtc);
4017 
4018 	if (IS_CHERRYVIEW(dev_priv)) {
4019 		chv_prepare_pll(crtc, new_crtc_state);
4020 		chv_enable_pll(crtc, new_crtc_state);
4021 	} else {
4022 		vlv_prepare_pll(crtc, new_crtc_state);
4023 		vlv_enable_pll(crtc, new_crtc_state);
4024 	}
4025 
4026 	intel_encoders_pre_enable(state, crtc);
4027 
4028 	i9xx_pfit_enable(new_crtc_state);
4029 
4030 	intel_color_load_luts(new_crtc_state);
4031 	intel_color_commit(new_crtc_state);
4032 	/* update DSPCNTR to configure gamma for pipe bottom color */
4033 	intel_disable_primary_plane(new_crtc_state);
4034 
4035 	dev_priv->display.initial_watermarks(state, crtc);
4036 	intel_enable_pipe(new_crtc_state);
4037 
4038 	intel_crtc_vblank_on(new_crtc_state);
4039 
4040 	intel_encoders_enable(state, crtc);
4041 }
4042 
4043 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
4044 {
4045 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4046 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4047 
4048 	intel_de_write(dev_priv, FP0(crtc->pipe),
4049 		       crtc_state->dpll_hw_state.fp0);
4050 	intel_de_write(dev_priv, FP1(crtc->pipe),
4051 		       crtc_state->dpll_hw_state.fp1);
4052 }
4053 
4054 static void i9xx_crtc_enable(struct intel_atomic_state *state,
4055 			     struct intel_crtc *crtc)
4056 {
4057 	const struct intel_crtc_state *new_crtc_state =
4058 		intel_atomic_get_new_crtc_state(state, crtc);
4059 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4060 	enum pipe pipe = crtc->pipe;
4061 
4062 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
4063 		return;
4064 
4065 	i9xx_set_pll_dividers(new_crtc_state);
4066 
4067 	if (intel_crtc_has_dp_encoder(new_crtc_state))
4068 		intel_dp_set_m_n(new_crtc_state, M1_N1);
4069 
4070 	intel_set_transcoder_timings(new_crtc_state);
4071 	intel_set_pipe_src_size(new_crtc_state);
4072 
4073 	i9xx_set_pipeconf(new_crtc_state);
4074 
4075 	crtc->active = true;
4076 
4077 	if (DISPLAY_VER(dev_priv) != 2)
4078 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4079 
4080 	intel_encoders_pre_enable(state, crtc);
4081 
4082 	i9xx_enable_pll(crtc, new_crtc_state);
4083 
4084 	i9xx_pfit_enable(new_crtc_state);
4085 
4086 	intel_color_load_luts(new_crtc_state);
4087 	intel_color_commit(new_crtc_state);
4088 	/* update DSPCNTR to configure gamma for pipe bottom color */
4089 	intel_disable_primary_plane(new_crtc_state);
4090 
4091 	if (dev_priv->display.initial_watermarks)
4092 		dev_priv->display.initial_watermarks(state, crtc);
4093 	else
4094 		intel_update_watermarks(crtc);
4095 	intel_enable_pipe(new_crtc_state);
4096 
4097 	intel_crtc_vblank_on(new_crtc_state);
4098 
4099 	intel_encoders_enable(state, crtc);
4100 
4101 	/* prevents spurious underruns */
4102 	if (DISPLAY_VER(dev_priv) == 2)
4103 		intel_wait_for_vblank(dev_priv, pipe);
4104 }
4105 
4106 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
4107 {
4108 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4109 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4110 
4111 	if (!old_crtc_state->gmch_pfit.control)
4112 		return;
4113 
4114 	assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
4115 
4116 	drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
4117 		    intel_de_read(dev_priv, PFIT_CONTROL));
4118 	intel_de_write(dev_priv, PFIT_CONTROL, 0);
4119 }
4120 
4121 static void i9xx_crtc_disable(struct intel_atomic_state *state,
4122 			      struct intel_crtc *crtc)
4123 {
4124 	struct intel_crtc_state *old_crtc_state =
4125 		intel_atomic_get_old_crtc_state(state, crtc);
4126 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4127 	enum pipe pipe = crtc->pipe;
4128 
4129 	/*
4130 	 * On gen2 planes are double buffered but the pipe isn't, so we must
4131 	 * wait for planes to fully turn off before disabling the pipe.
4132 	 */
4133 	if (DISPLAY_VER(dev_priv) == 2)
4134 		intel_wait_for_vblank(dev_priv, pipe);
4135 
4136 	intel_encoders_disable(state, crtc);
4137 
4138 	intel_crtc_vblank_off(old_crtc_state);
4139 
4140 	intel_disable_pipe(old_crtc_state);
4141 
4142 	i9xx_pfit_disable(old_crtc_state);
4143 
4144 	intel_encoders_post_disable(state, crtc);
4145 
4146 	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
4147 		if (IS_CHERRYVIEW(dev_priv))
4148 			chv_disable_pll(dev_priv, pipe);
4149 		else if (IS_VALLEYVIEW(dev_priv))
4150 			vlv_disable_pll(dev_priv, pipe);
4151 		else
4152 			i9xx_disable_pll(old_crtc_state);
4153 	}
4154 
4155 	intel_encoders_post_pll_disable(state, crtc);
4156 
4157 	if (DISPLAY_VER(dev_priv) != 2)
4158 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4159 
4160 	if (!dev_priv->display.initial_watermarks)
4161 		intel_update_watermarks(crtc);
4162 
4163 	/* clock the pipe down to 640x480@60 to potentially save power */
4164 	if (IS_I830(dev_priv))
4165 		i830_enable_pipe(dev_priv, pipe);
4166 }
4167 
4168 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
4169 					struct drm_modeset_acquire_ctx *ctx)
4170 {
4171 	struct intel_encoder *encoder;
4172 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4173 	struct intel_bw_state *bw_state =
4174 		to_intel_bw_state(dev_priv->bw_obj.state);
4175 	struct intel_cdclk_state *cdclk_state =
4176 		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
4177 	struct intel_dbuf_state *dbuf_state =
4178 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
4179 	struct intel_crtc_state *crtc_state =
4180 		to_intel_crtc_state(crtc->base.state);
4181 	struct intel_plane *plane;
4182 	struct drm_atomic_state *state;
4183 	struct intel_crtc_state *temp_crtc_state;
4184 	enum pipe pipe = crtc->pipe;
4185 	int ret;
4186 
4187 	if (!crtc_state->hw.active)
4188 		return;
4189 
4190 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
4191 		const struct intel_plane_state *plane_state =
4192 			to_intel_plane_state(plane->base.state);
4193 
4194 		if (plane_state->uapi.visible)
4195 			intel_plane_disable_noatomic(crtc, plane);
4196 	}
4197 
4198 	state = drm_atomic_state_alloc(&dev_priv->drm);
4199 	if (!state) {
4200 		drm_dbg_kms(&dev_priv->drm,
4201 			    "failed to disable [CRTC:%d:%s], out of memory",
4202 			    crtc->base.base.id, crtc->base.name);
4203 		return;
4204 	}
4205 
4206 	state->acquire_ctx = ctx;
4207 
4208 	/* Everything's already locked, -EDEADLK can't happen. */
4209 	temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
4210 	ret = drm_atomic_add_affected_connectors(state, &crtc->base);
4211 
4212 	drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
4213 
4214 	dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
4215 
4216 	drm_atomic_state_put(state);
4217 
4218 	drm_dbg_kms(&dev_priv->drm,
4219 		    "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
4220 		    crtc->base.base.id, crtc->base.name);
4221 
4222 	crtc->active = false;
4223 	crtc->base.enabled = false;
4224 
4225 	drm_WARN_ON(&dev_priv->drm,
4226 		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
4227 	crtc_state->uapi.active = false;
4228 	crtc_state->uapi.connector_mask = 0;
4229 	crtc_state->uapi.encoder_mask = 0;
4230 	intel_crtc_free_hw_state(crtc_state);
4231 	memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
4232 
4233 	for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
4234 		encoder->base.crtc = NULL;
4235 
4236 	intel_fbc_disable(crtc);
4237 	intel_update_watermarks(crtc);
4238 	intel_disable_shared_dpll(crtc_state);
4239 
4240 	intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
4241 
4242 	dev_priv->active_pipes &= ~BIT(pipe);
4243 	cdclk_state->min_cdclk[pipe] = 0;
4244 	cdclk_state->min_voltage_level[pipe] = 0;
4245 	cdclk_state->active_pipes &= ~BIT(pipe);
4246 
4247 	dbuf_state->active_pipes &= ~BIT(pipe);
4248 
4249 	bw_state->data_rate[pipe] = 0;
4250 	bw_state->num_active_planes[pipe] = 0;
4251 }
4252 
4253 /*
4254  * turn all crtc's off, but do not adjust state
4255  * This has to be paired with a call to intel_modeset_setup_hw_state.
4256  */
4257 int intel_display_suspend(struct drm_device *dev)
4258 {
4259 	struct drm_i915_private *dev_priv = to_i915(dev);
4260 	struct drm_atomic_state *state;
4261 	int ret;
4262 
4263 	if (!HAS_DISPLAY(dev_priv))
4264 		return 0;
4265 
4266 	state = drm_atomic_helper_suspend(dev);
4267 	ret = PTR_ERR_OR_ZERO(state);
4268 	if (ret)
4269 		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
4270 			ret);
4271 	else
4272 		dev_priv->modeset_restore_state = state;
4273 	return ret;
4274 }
4275 
4276 void intel_encoder_destroy(struct drm_encoder *encoder)
4277 {
4278 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4279 
4280 	drm_encoder_cleanup(encoder);
4281 	kfree(intel_encoder);
4282 }
4283 
4284 /* Cross check the actual hw state with our own modeset state tracking (and it's
4285  * internal consistency). */
4286 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
4287 					 struct drm_connector_state *conn_state)
4288 {
4289 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
4290 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
4291 
4292 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
4293 		    connector->base.base.id, connector->base.name);
4294 
4295 	if (connector->get_hw_state(connector)) {
4296 		struct intel_encoder *encoder = intel_attached_encoder(connector);
4297 
4298 		I915_STATE_WARN(!crtc_state,
4299 			 "connector enabled without attached crtc\n");
4300 
4301 		if (!crtc_state)
4302 			return;
4303 
4304 		I915_STATE_WARN(!crtc_state->hw.active,
4305 				"connector is active, but attached crtc isn't\n");
4306 
4307 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
4308 			return;
4309 
4310 		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
4311 			"atomic encoder doesn't match attached encoder\n");
4312 
4313 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
4314 			"attached encoder crtc differs from connector crtc\n");
4315 	} else {
4316 		I915_STATE_WARN(crtc_state && crtc_state->hw.active,
4317 				"attached crtc is active, but connector isn't\n");
4318 		I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
4319 			"best encoder set without crtc!\n");
4320 	}
4321 }
4322 
4323 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
4324 {
4325 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4326 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4327 
4328 	/* IPS only exists on ULT machines and is tied to pipe A. */
4329 	if (!hsw_crtc_supports_ips(crtc))
4330 		return false;
4331 
4332 	if (!dev_priv->params.enable_ips)
4333 		return false;
4334 
4335 	if (crtc_state->pipe_bpp > 24)
4336 		return false;
4337 
4338 	/*
4339 	 * We compare against max which means we must take
4340 	 * the increased cdclk requirement into account when
4341 	 * calculating the new cdclk.
4342 	 *
4343 	 * Should measure whether using a lower cdclk w/o IPS
4344 	 */
4345 	if (IS_BROADWELL(dev_priv) &&
4346 	    crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
4347 		return false;
4348 
4349 	return true;
4350 }
4351 
4352 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
4353 {
4354 	struct drm_i915_private *dev_priv =
4355 		to_i915(crtc_state->uapi.crtc->dev);
4356 	struct intel_atomic_state *state =
4357 		to_intel_atomic_state(crtc_state->uapi.state);
4358 
4359 	crtc_state->ips_enabled = false;
4360 
4361 	if (!hsw_crtc_state_ips_capable(crtc_state))
4362 		return 0;
4363 
4364 	/*
4365 	 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
4366 	 * enabled and disabled dynamically based on package C states,
4367 	 * user space can't make reliable use of the CRCs, so let's just
4368 	 * completely disable it.
4369 	 */
4370 	if (crtc_state->crc_enabled)
4371 		return 0;
4372 
4373 	/* IPS should be fine as long as at least one plane is enabled. */
4374 	if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
4375 		return 0;
4376 
4377 	if (IS_BROADWELL(dev_priv)) {
4378 		const struct intel_cdclk_state *cdclk_state;
4379 
4380 		cdclk_state = intel_atomic_get_cdclk_state(state);
4381 		if (IS_ERR(cdclk_state))
4382 			return PTR_ERR(cdclk_state);
4383 
4384 		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
4385 		if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
4386 			return 0;
4387 	}
4388 
4389 	crtc_state->ips_enabled = true;
4390 
4391 	return 0;
4392 }
4393 
4394 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
4395 {
4396 	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4397 
4398 	/* GDG double wide on either pipe, otherwise pipe A only */
4399 	return DISPLAY_VER(dev_priv) < 4 &&
4400 		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
4401 }
4402 
4403 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
4404 {
4405 	u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
4406 	struct drm_rect src;
4407 
4408 	/*
4409 	 * We only use IF-ID interlacing. If we ever use
4410 	 * PF-ID we'll need to adjust the pixel_rate here.
4411 	 */
4412 
4413 	if (!crtc_state->pch_pfit.enabled)
4414 		return pixel_rate;
4415 
4416 	drm_rect_init(&src, 0, 0,
4417 		      crtc_state->pipe_src_w << 16,
4418 		      crtc_state->pipe_src_h << 16);
4419 
4420 	return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
4421 				   pixel_rate);
4422 }
4423 
4424 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
4425 					 const struct drm_display_mode *timings)
4426 {
4427 	mode->hdisplay = timings->crtc_hdisplay;
4428 	mode->htotal = timings->crtc_htotal;
4429 	mode->hsync_start = timings->crtc_hsync_start;
4430 	mode->hsync_end = timings->crtc_hsync_end;
4431 
4432 	mode->vdisplay = timings->crtc_vdisplay;
4433 	mode->vtotal = timings->crtc_vtotal;
4434 	mode->vsync_start = timings->crtc_vsync_start;
4435 	mode->vsync_end = timings->crtc_vsync_end;
4436 
4437 	mode->flags = timings->flags;
4438 	mode->type = DRM_MODE_TYPE_DRIVER;
4439 
4440 	mode->clock = timings->crtc_clock;
4441 
4442 	drm_mode_set_name(mode);
4443 }
4444 
4445 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
4446 {
4447 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4448 
4449 	if (HAS_GMCH(dev_priv))
4450 		/* FIXME calculate proper pipe pixel rate for GMCH pfit */
4451 		crtc_state->pixel_rate =
4452 			crtc_state->hw.pipe_mode.crtc_clock;
4453 	else
4454 		crtc_state->pixel_rate =
4455 			ilk_pipe_pixel_rate(crtc_state);
4456 }
4457 
4458 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
4459 {
4460 	struct drm_display_mode *mode = &crtc_state->hw.mode;
4461 	struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
4462 	struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4463 
4464 	drm_mode_copy(pipe_mode, adjusted_mode);
4465 
4466 	if (crtc_state->bigjoiner) {
4467 		/*
4468 		 * transcoder is programmed to the full mode,
4469 		 * but pipe timings are half of the transcoder mode
4470 		 */
4471 		pipe_mode->crtc_hdisplay /= 2;
4472 		pipe_mode->crtc_hblank_start /= 2;
4473 		pipe_mode->crtc_hblank_end /= 2;
4474 		pipe_mode->crtc_hsync_start /= 2;
4475 		pipe_mode->crtc_hsync_end /= 2;
4476 		pipe_mode->crtc_htotal /= 2;
4477 		pipe_mode->crtc_clock /= 2;
4478 	}
4479 
4480 	if (crtc_state->splitter.enable) {
4481 		int n = crtc_state->splitter.link_count;
4482 		int overlap = crtc_state->splitter.pixel_overlap;
4483 
4484 		/*
4485 		 * eDP MSO uses segment timings from EDID for transcoder
4486 		 * timings, but full mode for everything else.
4487 		 *
4488 		 * h_full = (h_segment - pixel_overlap) * link_count
4489 		 */
4490 		pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4491 		pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4492 		pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4493 		pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4494 		pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4495 		pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4496 		pipe_mode->crtc_clock *= n;
4497 
4498 		intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4499 		intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
4500 	} else {
4501 		intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4502 		intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
4503 	}
4504 
4505 	intel_crtc_compute_pixel_rate(crtc_state);
4506 
4507 	drm_mode_copy(mode, adjusted_mode);
4508 	mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
4509 	mode->vdisplay = crtc_state->pipe_src_h;
4510 }
4511 
4512 static void intel_encoder_get_config(struct intel_encoder *encoder,
4513 				     struct intel_crtc_state *crtc_state)
4514 {
4515 	encoder->get_config(encoder, crtc_state);
4516 
4517 	intel_crtc_readout_derived_state(crtc_state);
4518 }
4519 
4520 static int intel_crtc_compute_config(struct intel_crtc *crtc,
4521 				     struct intel_crtc_state *pipe_config)
4522 {
4523 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4524 	struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
4525 	int clock_limit = dev_priv->max_dotclk_freq;
4526 
4527 	drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
4528 
4529 	/* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
4530 	if (pipe_config->bigjoiner) {
4531 		pipe_mode->crtc_clock /= 2;
4532 		pipe_mode->crtc_hdisplay /= 2;
4533 		pipe_mode->crtc_hblank_start /= 2;
4534 		pipe_mode->crtc_hblank_end /= 2;
4535 		pipe_mode->crtc_hsync_start /= 2;
4536 		pipe_mode->crtc_hsync_end /= 2;
4537 		pipe_mode->crtc_htotal /= 2;
4538 		pipe_config->pipe_src_w /= 2;
4539 	}
4540 
4541 	if (pipe_config->splitter.enable) {
4542 		int n = pipe_config->splitter.link_count;
4543 		int overlap = pipe_config->splitter.pixel_overlap;
4544 
4545 		pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4546 		pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4547 		pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4548 		pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4549 		pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4550 		pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4551 		pipe_mode->crtc_clock *= n;
4552 	}
4553 
4554 	intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4555 
4556 	if (DISPLAY_VER(dev_priv) < 4) {
4557 		clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
4558 
4559 		/*
4560 		 * Enable double wide mode when the dot clock
4561 		 * is > 90% of the (display) core speed.
4562 		 */
4563 		if (intel_crtc_supports_double_wide(crtc) &&
4564 		    pipe_mode->crtc_clock > clock_limit) {
4565 			clock_limit = dev_priv->max_dotclk_freq;
4566 			pipe_config->double_wide = true;
4567 		}
4568 	}
4569 
4570 	if (pipe_mode->crtc_clock > clock_limit) {
4571 		drm_dbg_kms(&dev_priv->drm,
4572 			    "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
4573 			    pipe_mode->crtc_clock, clock_limit,
4574 			    yesno(pipe_config->double_wide));
4575 		return -EINVAL;
4576 	}
4577 
4578 	/*
4579 	 * Pipe horizontal size must be even in:
4580 	 * - DVO ganged mode
4581 	 * - LVDS dual channel mode
4582 	 * - Double wide pipe
4583 	 */
4584 	if (pipe_config->pipe_src_w & 1) {
4585 		if (pipe_config->double_wide) {
4586 			drm_dbg_kms(&dev_priv->drm,
4587 				    "Odd pipe source width not supported with double wide pipe\n");
4588 			return -EINVAL;
4589 		}
4590 
4591 		if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
4592 		    intel_is_dual_link_lvds(dev_priv)) {
4593 			drm_dbg_kms(&dev_priv->drm,
4594 				    "Odd pipe source width not supported with dual link LVDS\n");
4595 			return -EINVAL;
4596 		}
4597 	}
4598 
4599 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
4600 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4601 	 */
4602 	if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
4603 	    pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
4604 		return -EINVAL;
4605 
4606 	intel_crtc_compute_pixel_rate(pipe_config);
4607 
4608 	if (pipe_config->has_pch_encoder)
4609 		return ilk_fdi_compute_config(crtc, pipe_config);
4610 
4611 	return 0;
4612 }
4613 
4614 static void
4615 intel_reduce_m_n_ratio(u32 *num, u32 *den)
4616 {
4617 	while (*num > DATA_LINK_M_N_MASK ||
4618 	       *den > DATA_LINK_M_N_MASK) {
4619 		*num >>= 1;
4620 		*den >>= 1;
4621 	}
4622 }
4623 
4624 static void compute_m_n(unsigned int m, unsigned int n,
4625 			u32 *ret_m, u32 *ret_n,
4626 			bool constant_n)
4627 {
4628 	/*
4629 	 * Several DP dongles in particular seem to be fussy about
4630 	 * too large link M/N values. Give N value as 0x8000 that
4631 	 * should be acceptable by specific devices. 0x8000 is the
4632 	 * specified fixed N value for asynchronous clock mode,
4633 	 * which the devices expect also in synchronous clock mode.
4634 	 */
4635 	if (constant_n)
4636 		*ret_n = DP_LINK_CONSTANT_N_VALUE;
4637 	else
4638 		*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4639 
4640 	*ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
4641 	intel_reduce_m_n_ratio(ret_m, ret_n);
4642 }
4643 
4644 void
4645 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
4646 		       int pixel_clock, int link_clock,
4647 		       struct intel_link_m_n *m_n,
4648 		       bool constant_n, bool fec_enable)
4649 {
4650 	u32 data_clock = bits_per_pixel * pixel_clock;
4651 
4652 	if (fec_enable)
4653 		data_clock = intel_dp_mode_to_fec_clock(data_clock);
4654 
4655 	m_n->tu = 64;
4656 	compute_m_n(data_clock,
4657 		    link_clock * nlanes * 8,
4658 		    &m_n->gmch_m, &m_n->gmch_n,
4659 		    constant_n);
4660 
4661 	compute_m_n(pixel_clock, link_clock,
4662 		    &m_n->link_m, &m_n->link_n,
4663 		    constant_n);
4664 }
4665 
4666 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
4667 {
4668 	/*
4669 	 * There may be no VBT; and if the BIOS enabled SSC we can
4670 	 * just keep using it to avoid unnecessary flicker.  Whereas if the
4671 	 * BIOS isn't using it, don't assume it will work even if the VBT
4672 	 * indicates as much.
4673 	 */
4674 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
4675 		bool bios_lvds_use_ssc = intel_de_read(dev_priv,
4676 						       PCH_DREF_CONTROL) &
4677 			DREF_SSC1_ENABLE;
4678 
4679 		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
4680 			drm_dbg_kms(&dev_priv->drm,
4681 				    "SSC %s by BIOS, overriding VBT which says %s\n",
4682 				    enableddisabled(bios_lvds_use_ssc),
4683 				    enableddisabled(dev_priv->vbt.lvds_use_ssc));
4684 			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
4685 		}
4686 	}
4687 }
4688 
4689 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4690 					 const struct intel_link_m_n *m_n)
4691 {
4692 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4693 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4694 	enum pipe pipe = crtc->pipe;
4695 
4696 	intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
4697 		       TU_SIZE(m_n->tu) | m_n->gmch_m);
4698 	intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
4699 	intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
4700 	intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
4701 }
4702 
4703 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
4704 				 enum transcoder transcoder)
4705 {
4706 	if (IS_HASWELL(dev_priv))
4707 		return transcoder == TRANSCODER_EDP;
4708 
4709 	/*
4710 	 * Strictly speaking some registers are available before
4711 	 * gen7, but we only support DRRS on gen7+
4712 	 */
4713 	return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv);
4714 }
4715 
4716 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4717 					 const struct intel_link_m_n *m_n,
4718 					 const struct intel_link_m_n *m2_n2)
4719 {
4720 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4721 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4722 	enum pipe pipe = crtc->pipe;
4723 	enum transcoder transcoder = crtc_state->cpu_transcoder;
4724 
4725 	if (DISPLAY_VER(dev_priv) >= 5) {
4726 		intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
4727 			       TU_SIZE(m_n->tu) | m_n->gmch_m);
4728 		intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
4729 			       m_n->gmch_n);
4730 		intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
4731 			       m_n->link_m);
4732 		intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
4733 			       m_n->link_n);
4734 		/*
4735 		 *  M2_N2 registers are set only if DRRS is supported
4736 		 * (to make sure the registers are not unnecessarily accessed).
4737 		 */
4738 		if (m2_n2 && crtc_state->has_drrs &&
4739 		    transcoder_has_m2_n2(dev_priv, transcoder)) {
4740 			intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
4741 				       TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
4742 			intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
4743 				       m2_n2->gmch_n);
4744 			intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
4745 				       m2_n2->link_m);
4746 			intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
4747 				       m2_n2->link_n);
4748 		}
4749 	} else {
4750 		intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
4751 			       TU_SIZE(m_n->tu) | m_n->gmch_m);
4752 		intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
4753 		intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
4754 		intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
4755 	}
4756 }
4757 
4758 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
4759 {
4760 	const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
4761 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
4762 
4763 	if (m_n == M1_N1) {
4764 		dp_m_n = &crtc_state->dp_m_n;
4765 		dp_m2_n2 = &crtc_state->dp_m2_n2;
4766 	} else if (m_n == M2_N2) {
4767 
4768 		/*
4769 		 * M2_N2 registers are not supported. Hence m2_n2 divider value
4770 		 * needs to be programmed into M1_N1.
4771 		 */
4772 		dp_m_n = &crtc_state->dp_m2_n2;
4773 	} else {
4774 		drm_err(&i915->drm, "Unsupported divider value\n");
4775 		return;
4776 	}
4777 
4778 	if (crtc_state->has_pch_encoder)
4779 		intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
4780 	else
4781 		intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
4782 }
4783 
4784 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
4785 {
4786 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4787 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4788 	enum pipe pipe = crtc->pipe;
4789 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4790 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4791 	u32 crtc_vtotal, crtc_vblank_end;
4792 	int vsyncshift = 0;
4793 
4794 	/* We need to be careful not to changed the adjusted mode, for otherwise
4795 	 * the hw state checker will get angry at the mismatch. */
4796 	crtc_vtotal = adjusted_mode->crtc_vtotal;
4797 	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
4798 
4799 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4800 		/* the chip adds 2 halflines automatically */
4801 		crtc_vtotal -= 1;
4802 		crtc_vblank_end -= 1;
4803 
4804 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4805 			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
4806 		else
4807 			vsyncshift = adjusted_mode->crtc_hsync_start -
4808 				adjusted_mode->crtc_htotal / 2;
4809 		if (vsyncshift < 0)
4810 			vsyncshift += adjusted_mode->crtc_htotal;
4811 	}
4812 
4813 	if (DISPLAY_VER(dev_priv) > 3)
4814 		intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
4815 		               vsyncshift);
4816 
4817 	intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
4818 		       (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
4819 	intel_de_write(dev_priv, HBLANK(cpu_transcoder),
4820 		       (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
4821 	intel_de_write(dev_priv, HSYNC(cpu_transcoder),
4822 		       (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
4823 
4824 	intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
4825 		       (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
4826 	intel_de_write(dev_priv, VBLANK(cpu_transcoder),
4827 		       (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
4828 	intel_de_write(dev_priv, VSYNC(cpu_transcoder),
4829 		       (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
4830 
4831 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
4832 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
4833 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
4834 	 * bits. */
4835 	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
4836 	    (pipe == PIPE_B || pipe == PIPE_C))
4837 		intel_de_write(dev_priv, VTOTAL(pipe),
4838 		               intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
4839 
4840 }
4841 
4842 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
4843 {
4844 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4845 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4846 	enum pipe pipe = crtc->pipe;
4847 
4848 	/* pipesrc controls the size that is scaled from, which should
4849 	 * always be the user's requested size.
4850 	 */
4851 	intel_de_write(dev_priv, PIPESRC(pipe),
4852 		       ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
4853 }
4854 
4855 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
4856 {
4857 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4858 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4859 
4860 	if (DISPLAY_VER(dev_priv) == 2)
4861 		return false;
4862 
4863 	if (DISPLAY_VER(dev_priv) >= 9 ||
4864 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4865 		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
4866 	else
4867 		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
4868 }
4869 
4870 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
4871 					 struct intel_crtc_state *pipe_config)
4872 {
4873 	struct drm_device *dev = crtc->base.dev;
4874 	struct drm_i915_private *dev_priv = to_i915(dev);
4875 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
4876 	u32 tmp;
4877 
4878 	tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
4879 	pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
4880 	pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
4881 
4882 	if (!transcoder_is_dsi(cpu_transcoder)) {
4883 		tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
4884 		pipe_config->hw.adjusted_mode.crtc_hblank_start =
4885 							(tmp & 0xffff) + 1;
4886 		pipe_config->hw.adjusted_mode.crtc_hblank_end =
4887 						((tmp >> 16) & 0xffff) + 1;
4888 	}
4889 	tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
4890 	pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
4891 	pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
4892 
4893 	tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
4894 	pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
4895 	pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
4896 
4897 	if (!transcoder_is_dsi(cpu_transcoder)) {
4898 		tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
4899 		pipe_config->hw.adjusted_mode.crtc_vblank_start =
4900 							(tmp & 0xffff) + 1;
4901 		pipe_config->hw.adjusted_mode.crtc_vblank_end =
4902 						((tmp >> 16) & 0xffff) + 1;
4903 	}
4904 	tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
4905 	pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
4906 	pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
4907 
4908 	if (intel_pipe_is_interlaced(pipe_config)) {
4909 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
4910 		pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
4911 		pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
4912 	}
4913 }
4914 
4915 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
4916 				    struct intel_crtc_state *pipe_config)
4917 {
4918 	struct drm_device *dev = crtc->base.dev;
4919 	struct drm_i915_private *dev_priv = to_i915(dev);
4920 	u32 tmp;
4921 
4922 	tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
4923 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
4924 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
4925 }
4926 
4927 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
4928 {
4929 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4930 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4931 	u32 pipeconf;
4932 
4933 	pipeconf = 0;
4934 
4935 	/* we keep both pipes enabled on 830 */
4936 	if (IS_I830(dev_priv))
4937 		pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
4938 
4939 	if (crtc_state->double_wide)
4940 		pipeconf |= PIPECONF_DOUBLE_WIDE;
4941 
4942 	/* only g4x and later have fancy bpc/dither controls */
4943 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4944 	    IS_CHERRYVIEW(dev_priv)) {
4945 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
4946 		if (crtc_state->dither && crtc_state->pipe_bpp != 30)
4947 			pipeconf |= PIPECONF_DITHER_EN |
4948 				    PIPECONF_DITHER_TYPE_SP;
4949 
4950 		switch (crtc_state->pipe_bpp) {
4951 		case 18:
4952 			pipeconf |= PIPECONF_6BPC;
4953 			break;
4954 		case 24:
4955 			pipeconf |= PIPECONF_8BPC;
4956 			break;
4957 		case 30:
4958 			pipeconf |= PIPECONF_10BPC;
4959 			break;
4960 		default:
4961 			/* Case prevented by intel_choose_pipe_bpp_dither. */
4962 			BUG();
4963 		}
4964 	}
4965 
4966 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
4967 		if (DISPLAY_VER(dev_priv) < 4 ||
4968 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4969 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4970 		else
4971 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
4972 	} else {
4973 		pipeconf |= PIPECONF_PROGRESSIVE;
4974 	}
4975 
4976 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
4977 	     crtc_state->limited_color_range)
4978 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
4979 
4980 	pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
4981 
4982 	pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
4983 
4984 	intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
4985 	intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
4986 }
4987 
4988 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
4989 {
4990 	if (IS_I830(dev_priv))
4991 		return false;
4992 
4993 	return DISPLAY_VER(dev_priv) >= 4 ||
4994 		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
4995 }
4996 
4997 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
4998 {
4999 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5000 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5001 	u32 tmp;
5002 
5003 	if (!i9xx_has_pfit(dev_priv))
5004 		return;
5005 
5006 	tmp = intel_de_read(dev_priv, PFIT_CONTROL);
5007 	if (!(tmp & PFIT_ENABLE))
5008 		return;
5009 
5010 	/* Check whether the pfit is attached to our pipe. */
5011 	if (DISPLAY_VER(dev_priv) < 4) {
5012 		if (crtc->pipe != PIPE_B)
5013 			return;
5014 	} else {
5015 		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
5016 			return;
5017 	}
5018 
5019 	crtc_state->gmch_pfit.control = tmp;
5020 	crtc_state->gmch_pfit.pgm_ratios =
5021 		intel_de_read(dev_priv, PFIT_PGM_RATIOS);
5022 }
5023 
5024 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5025 			       struct intel_crtc_state *pipe_config)
5026 {
5027 	struct drm_device *dev = crtc->base.dev;
5028 	struct drm_i915_private *dev_priv = to_i915(dev);
5029 	enum pipe pipe = crtc->pipe;
5030 	struct dpll clock;
5031 	u32 mdiv;
5032 	int refclk = 100000;
5033 
5034 	/* In case of DSI, DPLL will not be used */
5035 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
5036 		return;
5037 
5038 	vlv_dpio_get(dev_priv);
5039 	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
5040 	vlv_dpio_put(dev_priv);
5041 
5042 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
5043 	clock.m2 = mdiv & DPIO_M2DIV_MASK;
5044 	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
5045 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
5046 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
5047 
5048 	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
5049 }
5050 
5051 static void chv_crtc_clock_get(struct intel_crtc *crtc,
5052 			       struct intel_crtc_state *pipe_config)
5053 {
5054 	struct drm_device *dev = crtc->base.dev;
5055 	struct drm_i915_private *dev_priv = to_i915(dev);
5056 	enum pipe pipe = crtc->pipe;
5057 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
5058 	struct dpll clock;
5059 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
5060 	int refclk = 100000;
5061 
5062 	/* In case of DSI, DPLL will not be used */
5063 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
5064 		return;
5065 
5066 	vlv_dpio_get(dev_priv);
5067 	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
5068 	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
5069 	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
5070 	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
5071 	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
5072 	vlv_dpio_put(dev_priv);
5073 
5074 	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
5075 	clock.m2 = (pll_dw0 & 0xff) << 22;
5076 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
5077 		clock.m2 |= pll_dw2 & 0x3fffff;
5078 	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
5079 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
5080 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
5081 
5082 	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
5083 }
5084 
5085 static enum intel_output_format
5086 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
5087 {
5088 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5089 	u32 tmp;
5090 
5091 	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
5092 
5093 	if (tmp & PIPEMISC_YUV420_ENABLE) {
5094 		/* We support 4:2:0 in full blend mode only */
5095 		drm_WARN_ON(&dev_priv->drm,
5096 			    (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
5097 
5098 		return INTEL_OUTPUT_FORMAT_YCBCR420;
5099 	} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
5100 		return INTEL_OUTPUT_FORMAT_YCBCR444;
5101 	} else {
5102 		return INTEL_OUTPUT_FORMAT_RGB;
5103 	}
5104 }
5105 
5106 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
5107 {
5108 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5109 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
5110 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5111 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
5112 	u32 tmp;
5113 
5114 	tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
5115 
5116 	if (tmp & DISPPLANE_GAMMA_ENABLE)
5117 		crtc_state->gamma_enable = true;
5118 
5119 	if (!HAS_GMCH(dev_priv) &&
5120 	    tmp & DISPPLANE_PIPE_CSC_ENABLE)
5121 		crtc_state->csc_enable = true;
5122 }
5123 
5124 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5125 				 struct intel_crtc_state *pipe_config)
5126 {
5127 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5128 	enum intel_display_power_domain power_domain;
5129 	intel_wakeref_t wakeref;
5130 	u32 tmp;
5131 	bool ret;
5132 
5133 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
5134 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
5135 	if (!wakeref)
5136 		return false;
5137 
5138 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5139 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5140 	pipe_config->shared_dpll = NULL;
5141 
5142 	ret = false;
5143 
5144 	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
5145 	if (!(tmp & PIPECONF_ENABLE))
5146 		goto out;
5147 
5148 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5149 	    IS_CHERRYVIEW(dev_priv)) {
5150 		switch (tmp & PIPECONF_BPC_MASK) {
5151 		case PIPECONF_6BPC:
5152 			pipe_config->pipe_bpp = 18;
5153 			break;
5154 		case PIPECONF_8BPC:
5155 			pipe_config->pipe_bpp = 24;
5156 			break;
5157 		case PIPECONF_10BPC:
5158 			pipe_config->pipe_bpp = 30;
5159 			break;
5160 		default:
5161 			break;
5162 		}
5163 	}
5164 
5165 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
5166 	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
5167 		pipe_config->limited_color_range = true;
5168 
5169 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
5170 		PIPECONF_GAMMA_MODE_SHIFT;
5171 
5172 	if (IS_CHERRYVIEW(dev_priv))
5173 		pipe_config->cgm_mode = intel_de_read(dev_priv,
5174 						      CGM_PIPE_MODE(crtc->pipe));
5175 
5176 	i9xx_get_pipe_color_config(pipe_config);
5177 	intel_color_get_config(pipe_config);
5178 
5179 	if (DISPLAY_VER(dev_priv) < 4)
5180 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
5181 
5182 	intel_get_transcoder_timings(crtc, pipe_config);
5183 	intel_get_pipe_src_size(crtc, pipe_config);
5184 
5185 	i9xx_get_pfit_config(pipe_config);
5186 
5187 	if (DISPLAY_VER(dev_priv) >= 4) {
5188 		/* No way to read it out on pipes B and C */
5189 		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
5190 			tmp = dev_priv->chv_dpll_md[crtc->pipe];
5191 		else
5192 			tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
5193 		pipe_config->pixel_multiplier =
5194 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
5195 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
5196 		pipe_config->dpll_hw_state.dpll_md = tmp;
5197 	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
5198 		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
5199 		tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
5200 		pipe_config->pixel_multiplier =
5201 			((tmp & SDVO_MULTIPLIER_MASK)
5202 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
5203 	} else {
5204 		/* Note that on i915G/GM the pixel multiplier is in the sdvo
5205 		 * port and will be fixed up in the encoder->get_config
5206 		 * function. */
5207 		pipe_config->pixel_multiplier = 1;
5208 	}
5209 	pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
5210 							DPLL(crtc->pipe));
5211 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
5212 		pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
5213 							       FP0(crtc->pipe));
5214 		pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
5215 							       FP1(crtc->pipe));
5216 	} else {
5217 		/* Mask out read-only status bits. */
5218 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
5219 						     DPLL_PORTC_READY_MASK |
5220 						     DPLL_PORTB_READY_MASK);
5221 	}
5222 
5223 	if (IS_CHERRYVIEW(dev_priv))
5224 		chv_crtc_clock_get(crtc, pipe_config);
5225 	else if (IS_VALLEYVIEW(dev_priv))
5226 		vlv_crtc_clock_get(crtc, pipe_config);
5227 	else
5228 		i9xx_crtc_clock_get(crtc, pipe_config);
5229 
5230 	/*
5231 	 * Normally the dotclock is filled in by the encoder .get_config()
5232 	 * but in case the pipe is enabled w/o any ports we need a sane
5233 	 * default.
5234 	 */
5235 	pipe_config->hw.adjusted_mode.crtc_clock =
5236 		pipe_config->port_clock / pipe_config->pixel_multiplier;
5237 
5238 	ret = true;
5239 
5240 out:
5241 	intel_display_power_put(dev_priv, power_domain, wakeref);
5242 
5243 	return ret;
5244 }
5245 
5246 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
5247 {
5248 	struct intel_encoder *encoder;
5249 	int i;
5250 	u32 val, final;
5251 	bool has_lvds = false;
5252 	bool has_cpu_edp = false;
5253 	bool has_panel = false;
5254 	bool has_ck505 = false;
5255 	bool can_ssc = false;
5256 	bool using_ssc_source = false;
5257 
5258 	/* We need to take the global config into account */
5259 	for_each_intel_encoder(&dev_priv->drm, encoder) {
5260 		switch (encoder->type) {
5261 		case INTEL_OUTPUT_LVDS:
5262 			has_panel = true;
5263 			has_lvds = true;
5264 			break;
5265 		case INTEL_OUTPUT_EDP:
5266 			has_panel = true;
5267 			if (encoder->port == PORT_A)
5268 				has_cpu_edp = true;
5269 			break;
5270 		default:
5271 			break;
5272 		}
5273 	}
5274 
5275 	if (HAS_PCH_IBX(dev_priv)) {
5276 		has_ck505 = dev_priv->vbt.display_clock_mode;
5277 		can_ssc = has_ck505;
5278 	} else {
5279 		has_ck505 = false;
5280 		can_ssc = true;
5281 	}
5282 
5283 	/* Check if any DPLLs are using the SSC source */
5284 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
5285 		u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
5286 
5287 		if (!(temp & DPLL_VCO_ENABLE))
5288 			continue;
5289 
5290 		if ((temp & PLL_REF_INPUT_MASK) ==
5291 		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
5292 			using_ssc_source = true;
5293 			break;
5294 		}
5295 	}
5296 
5297 	drm_dbg_kms(&dev_priv->drm,
5298 		    "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
5299 		    has_panel, has_lvds, has_ck505, using_ssc_source);
5300 
5301 	/* Ironlake: try to setup display ref clock before DPLL
5302 	 * enabling. This is only under driver's control after
5303 	 * PCH B stepping, previous chipset stepping should be
5304 	 * ignoring this setting.
5305 	 */
5306 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
5307 
5308 	/* As we must carefully and slowly disable/enable each source in turn,
5309 	 * compute the final state we want first and check if we need to
5310 	 * make any changes at all.
5311 	 */
5312 	final = val;
5313 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
5314 	if (has_ck505)
5315 		final |= DREF_NONSPREAD_CK505_ENABLE;
5316 	else
5317 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
5318 
5319 	final &= ~DREF_SSC_SOURCE_MASK;
5320 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5321 	final &= ~DREF_SSC1_ENABLE;
5322 
5323 	if (has_panel) {
5324 		final |= DREF_SSC_SOURCE_ENABLE;
5325 
5326 		if (intel_panel_use_ssc(dev_priv) && can_ssc)
5327 			final |= DREF_SSC1_ENABLE;
5328 
5329 		if (has_cpu_edp) {
5330 			if (intel_panel_use_ssc(dev_priv) && can_ssc)
5331 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5332 			else
5333 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5334 		} else
5335 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5336 	} else if (using_ssc_source) {
5337 		final |= DREF_SSC_SOURCE_ENABLE;
5338 		final |= DREF_SSC1_ENABLE;
5339 	}
5340 
5341 	if (final == val)
5342 		return;
5343 
5344 	/* Always enable nonspread source */
5345 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
5346 
5347 	if (has_ck505)
5348 		val |= DREF_NONSPREAD_CK505_ENABLE;
5349 	else
5350 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
5351 
5352 	if (has_panel) {
5353 		val &= ~DREF_SSC_SOURCE_MASK;
5354 		val |= DREF_SSC_SOURCE_ENABLE;
5355 
5356 		/* SSC must be turned on before enabling the CPU output  */
5357 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5358 			drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
5359 			val |= DREF_SSC1_ENABLE;
5360 		} else
5361 			val &= ~DREF_SSC1_ENABLE;
5362 
5363 		/* Get SSC going before enabling the outputs */
5364 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5365 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5366 		udelay(200);
5367 
5368 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5369 
5370 		/* Enable CPU source on CPU attached eDP */
5371 		if (has_cpu_edp) {
5372 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5373 				drm_dbg_kms(&dev_priv->drm,
5374 					    "Using SSC on eDP\n");
5375 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5376 			} else
5377 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5378 		} else
5379 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5380 
5381 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5382 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5383 		udelay(200);
5384 	} else {
5385 		drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
5386 
5387 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5388 
5389 		/* Turn off CPU output */
5390 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5391 
5392 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5393 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5394 		udelay(200);
5395 
5396 		if (!using_ssc_source) {
5397 			drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
5398 
5399 			/* Turn off the SSC source */
5400 			val &= ~DREF_SSC_SOURCE_MASK;
5401 			val |= DREF_SSC_SOURCE_DISABLE;
5402 
5403 			/* Turn off SSC1 */
5404 			val &= ~DREF_SSC1_ENABLE;
5405 
5406 			intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5407 			intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5408 			udelay(200);
5409 		}
5410 	}
5411 
5412 	BUG_ON(val != final);
5413 }
5414 
5415 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
5416 {
5417 	u32 tmp;
5418 
5419 	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5420 	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5421 	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5422 
5423 	if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5424 			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5425 		drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
5426 
5427 	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5428 	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5429 	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5430 
5431 	if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5432 			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
5433 		drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
5434 }
5435 
5436 /* WaMPhyProgramming:hsw */
5437 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5438 {
5439 	u32 tmp;
5440 
5441 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5442 	tmp &= ~(0xFF << 24);
5443 	tmp |= (0x12 << 24);
5444 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5445 
5446 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5447 	tmp |= (1 << 11);
5448 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5449 
5450 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5451 	tmp |= (1 << 11);
5452 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5453 
5454 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5455 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5456 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5457 
5458 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5459 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5460 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5461 
5462 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5463 	tmp &= ~(7 << 13);
5464 	tmp |= (5 << 13);
5465 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5466 
5467 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5468 	tmp &= ~(7 << 13);
5469 	tmp |= (5 << 13);
5470 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5471 
5472 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5473 	tmp &= ~0xFF;
5474 	tmp |= 0x1C;
5475 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5476 
5477 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5478 	tmp &= ~0xFF;
5479 	tmp |= 0x1C;
5480 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5481 
5482 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5483 	tmp &= ~(0xFF << 16);
5484 	tmp |= (0x1C << 16);
5485 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5486 
5487 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5488 	tmp &= ~(0xFF << 16);
5489 	tmp |= (0x1C << 16);
5490 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5491 
5492 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5493 	tmp |= (1 << 27);
5494 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5495 
5496 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5497 	tmp |= (1 << 27);
5498 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5499 
5500 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5501 	tmp &= ~(0xF << 28);
5502 	tmp |= (4 << 28);
5503 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5504 
5505 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5506 	tmp &= ~(0xF << 28);
5507 	tmp |= (4 << 28);
5508 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5509 }
5510 
5511 /* Implements 3 different sequences from BSpec chapter "Display iCLK
5512  * Programming" based on the parameters passed:
5513  * - Sequence to enable CLKOUT_DP
5514  * - Sequence to enable CLKOUT_DP without spread
5515  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5516  */
5517 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
5518 				 bool with_spread, bool with_fdi)
5519 {
5520 	u32 reg, tmp;
5521 
5522 	if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
5523 		     "FDI requires downspread\n"))
5524 		with_spread = true;
5525 	if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
5526 		     with_fdi, "LP PCH doesn't have FDI\n"))
5527 		with_fdi = false;
5528 
5529 	mutex_lock(&dev_priv->sb_lock);
5530 
5531 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5532 	tmp &= ~SBI_SSCCTL_DISABLE;
5533 	tmp |= SBI_SSCCTL_PATHALT;
5534 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5535 
5536 	udelay(24);
5537 
5538 	if (with_spread) {
5539 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5540 		tmp &= ~SBI_SSCCTL_PATHALT;
5541 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5542 
5543 		if (with_fdi) {
5544 			lpt_reset_fdi_mphy(dev_priv);
5545 			lpt_program_fdi_mphy(dev_priv);
5546 		}
5547 	}
5548 
5549 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5550 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5551 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5552 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5553 
5554 	mutex_unlock(&dev_priv->sb_lock);
5555 }
5556 
5557 /* Sequence to disable CLKOUT_DP */
5558 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
5559 {
5560 	u32 reg, tmp;
5561 
5562 	mutex_lock(&dev_priv->sb_lock);
5563 
5564 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5565 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5566 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5567 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5568 
5569 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5570 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
5571 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
5572 			tmp |= SBI_SSCCTL_PATHALT;
5573 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5574 			udelay(32);
5575 		}
5576 		tmp |= SBI_SSCCTL_DISABLE;
5577 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5578 	}
5579 
5580 	mutex_unlock(&dev_priv->sb_lock);
5581 }
5582 
5583 #define BEND_IDX(steps) ((50 + (steps)) / 5)
5584 
5585 static const u16 sscdivintphase[] = {
5586 	[BEND_IDX( 50)] = 0x3B23,
5587 	[BEND_IDX( 45)] = 0x3B23,
5588 	[BEND_IDX( 40)] = 0x3C23,
5589 	[BEND_IDX( 35)] = 0x3C23,
5590 	[BEND_IDX( 30)] = 0x3D23,
5591 	[BEND_IDX( 25)] = 0x3D23,
5592 	[BEND_IDX( 20)] = 0x3E23,
5593 	[BEND_IDX( 15)] = 0x3E23,
5594 	[BEND_IDX( 10)] = 0x3F23,
5595 	[BEND_IDX(  5)] = 0x3F23,
5596 	[BEND_IDX(  0)] = 0x0025,
5597 	[BEND_IDX( -5)] = 0x0025,
5598 	[BEND_IDX(-10)] = 0x0125,
5599 	[BEND_IDX(-15)] = 0x0125,
5600 	[BEND_IDX(-20)] = 0x0225,
5601 	[BEND_IDX(-25)] = 0x0225,
5602 	[BEND_IDX(-30)] = 0x0325,
5603 	[BEND_IDX(-35)] = 0x0325,
5604 	[BEND_IDX(-40)] = 0x0425,
5605 	[BEND_IDX(-45)] = 0x0425,
5606 	[BEND_IDX(-50)] = 0x0525,
5607 };
5608 
5609 /*
5610  * Bend CLKOUT_DP
5611  * steps -50 to 50 inclusive, in steps of 5
5612  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
5613  * change in clock period = -(steps / 10) * 5.787 ps
5614  */
5615 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
5616 {
5617 	u32 tmp;
5618 	int idx = BEND_IDX(steps);
5619 
5620 	if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
5621 		return;
5622 
5623 	if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
5624 		return;
5625 
5626 	mutex_lock(&dev_priv->sb_lock);
5627 
5628 	if (steps % 10 != 0)
5629 		tmp = 0xAAAAAAAB;
5630 	else
5631 		tmp = 0x00000000;
5632 	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
5633 
5634 	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
5635 	tmp &= 0xffff0000;
5636 	tmp |= sscdivintphase[idx];
5637 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
5638 
5639 	mutex_unlock(&dev_priv->sb_lock);
5640 }
5641 
5642 #undef BEND_IDX
5643 
5644 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
5645 {
5646 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5647 	u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
5648 
5649 	if ((ctl & SPLL_PLL_ENABLE) == 0)
5650 		return false;
5651 
5652 	if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
5653 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5654 		return true;
5655 
5656 	if (IS_BROADWELL(dev_priv) &&
5657 	    (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
5658 		return true;
5659 
5660 	return false;
5661 }
5662 
5663 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
5664 			       enum intel_dpll_id id)
5665 {
5666 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5667 	u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
5668 
5669 	if ((ctl & WRPLL_PLL_ENABLE) == 0)
5670 		return false;
5671 
5672 	if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
5673 		return true;
5674 
5675 	if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
5676 	    (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
5677 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5678 		return true;
5679 
5680 	return false;
5681 }
5682 
5683 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
5684 {
5685 	struct intel_encoder *encoder;
5686 	bool has_fdi = false;
5687 
5688 	for_each_intel_encoder(&dev_priv->drm, encoder) {
5689 		switch (encoder->type) {
5690 		case INTEL_OUTPUT_ANALOG:
5691 			has_fdi = true;
5692 			break;
5693 		default:
5694 			break;
5695 		}
5696 	}
5697 
5698 	/*
5699 	 * The BIOS may have decided to use the PCH SSC
5700 	 * reference so we must not disable it until the
5701 	 * relevant PLLs have stopped relying on it. We'll
5702 	 * just leave the PCH SSC reference enabled in case
5703 	 * any active PLL is using it. It will get disabled
5704 	 * after runtime suspend if we don't have FDI.
5705 	 *
5706 	 * TODO: Move the whole reference clock handling
5707 	 * to the modeset sequence proper so that we can
5708 	 * actually enable/disable/reconfigure these things
5709 	 * safely. To do that we need to introduce a real
5710 	 * clock hierarchy. That would also allow us to do
5711 	 * clock bending finally.
5712 	 */
5713 	dev_priv->pch_ssc_use = 0;
5714 
5715 	if (spll_uses_pch_ssc(dev_priv)) {
5716 		drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
5717 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
5718 	}
5719 
5720 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
5721 		drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
5722 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
5723 	}
5724 
5725 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
5726 		drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
5727 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
5728 	}
5729 
5730 	if (dev_priv->pch_ssc_use)
5731 		return;
5732 
5733 	if (has_fdi) {
5734 		lpt_bend_clkout_dp(dev_priv, 0);
5735 		lpt_enable_clkout_dp(dev_priv, true, true);
5736 	} else {
5737 		lpt_disable_clkout_dp(dev_priv);
5738 	}
5739 }
5740 
5741 /*
5742  * Initialize reference clocks when the driver loads
5743  */
5744 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
5745 {
5746 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
5747 		ilk_init_pch_refclk(dev_priv);
5748 	else if (HAS_PCH_LPT(dev_priv))
5749 		lpt_init_pch_refclk(dev_priv);
5750 }
5751 
5752 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
5753 {
5754 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5755 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5756 	enum pipe pipe = crtc->pipe;
5757 	u32 val;
5758 
5759 	val = 0;
5760 
5761 	switch (crtc_state->pipe_bpp) {
5762 	case 18:
5763 		val |= PIPECONF_6BPC;
5764 		break;
5765 	case 24:
5766 		val |= PIPECONF_8BPC;
5767 		break;
5768 	case 30:
5769 		val |= PIPECONF_10BPC;
5770 		break;
5771 	case 36:
5772 		val |= PIPECONF_12BPC;
5773 		break;
5774 	default:
5775 		/* Case prevented by intel_choose_pipe_bpp_dither. */
5776 		BUG();
5777 	}
5778 
5779 	if (crtc_state->dither)
5780 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5781 
5782 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5783 		val |= PIPECONF_INTERLACED_ILK;
5784 	else
5785 		val |= PIPECONF_PROGRESSIVE;
5786 
5787 	/*
5788 	 * This would end up with an odd purple hue over
5789 	 * the entire display. Make sure we don't do it.
5790 	 */
5791 	drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
5792 		    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
5793 
5794 	if (crtc_state->limited_color_range &&
5795 	    !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
5796 		val |= PIPECONF_COLOR_RANGE_SELECT;
5797 
5798 	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5799 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
5800 
5801 	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
5802 
5803 	val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
5804 
5805 	intel_de_write(dev_priv, PIPECONF(pipe), val);
5806 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
5807 }
5808 
5809 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
5810 {
5811 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5812 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5813 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5814 	u32 val = 0;
5815 
5816 	if (IS_HASWELL(dev_priv) && crtc_state->dither)
5817 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5818 
5819 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5820 		val |= PIPECONF_INTERLACED_ILK;
5821 	else
5822 		val |= PIPECONF_PROGRESSIVE;
5823 
5824 	if (IS_HASWELL(dev_priv) &&
5825 	    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5826 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
5827 
5828 	intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
5829 	intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
5830 }
5831 
5832 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
5833 {
5834 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5835 	const struct intel_crtc_scaler_state *scaler_state =
5836 		&crtc_state->scaler_state;
5837 
5838 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5839 	u32 val = 0;
5840 	int i;
5841 
5842 	switch (crtc_state->pipe_bpp) {
5843 	case 18:
5844 		val |= PIPEMISC_DITHER_6_BPC;
5845 		break;
5846 	case 24:
5847 		val |= PIPEMISC_DITHER_8_BPC;
5848 		break;
5849 	case 30:
5850 		val |= PIPEMISC_DITHER_10_BPC;
5851 		break;
5852 	case 36:
5853 		val |= PIPEMISC_DITHER_12_BPC;
5854 		break;
5855 	default:
5856 		MISSING_CASE(crtc_state->pipe_bpp);
5857 		break;
5858 	}
5859 
5860 	if (crtc_state->dither)
5861 		val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
5862 
5863 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
5864 	    crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
5865 		val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
5866 
5867 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5868 		val |= PIPEMISC_YUV420_ENABLE |
5869 			PIPEMISC_YUV420_MODE_FULL_BLEND;
5870 
5871 	if (DISPLAY_VER(dev_priv) >= 11 &&
5872 	    (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
5873 					   BIT(PLANE_CURSOR))) == 0)
5874 		val |= PIPEMISC_HDR_MODE_PRECISION;
5875 
5876 	if (DISPLAY_VER(dev_priv) >= 12)
5877 		val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
5878 
5879 	if (IS_ALDERLAKE_P(dev_priv)) {
5880 		bool scaler_in_use = false;
5881 
5882 		for (i = 0; i < crtc->num_scalers; i++) {
5883 			if (!scaler_state->scalers[i].in_use)
5884 				continue;
5885 
5886 			scaler_in_use = true;
5887 			break;
5888 		}
5889 
5890 		intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe),
5891 			     PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK,
5892 			     scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN :
5893 			     PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS);
5894 	}
5895 
5896 	intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
5897 }
5898 
5899 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
5900 {
5901 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5902 	u32 tmp;
5903 
5904 	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
5905 
5906 	switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
5907 	case PIPEMISC_DITHER_6_BPC:
5908 		return 18;
5909 	case PIPEMISC_DITHER_8_BPC:
5910 		return 24;
5911 	case PIPEMISC_DITHER_10_BPC:
5912 		return 30;
5913 	case PIPEMISC_DITHER_12_BPC:
5914 		return 36;
5915 	default:
5916 		MISSING_CASE(tmp);
5917 		return 0;
5918 	}
5919 }
5920 
5921 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
5922 {
5923 	/*
5924 	 * Account for spread spectrum to avoid
5925 	 * oversubscribing the link. Max center spread
5926 	 * is 2.5%; use 5% for safety's sake.
5927 	 */
5928 	u32 bps = target_clock * bpp * 21 / 20;
5929 	return DIV_ROUND_UP(bps, link_bw * 8);
5930 }
5931 
5932 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
5933 					 struct intel_link_m_n *m_n)
5934 {
5935 	struct drm_device *dev = crtc->base.dev;
5936 	struct drm_i915_private *dev_priv = to_i915(dev);
5937 	enum pipe pipe = crtc->pipe;
5938 
5939 	m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
5940 	m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
5941 	m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5942 		& ~TU_SIZE_MASK;
5943 	m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
5944 	m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5945 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5946 }
5947 
5948 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
5949 					 enum transcoder transcoder,
5950 					 struct intel_link_m_n *m_n,
5951 					 struct intel_link_m_n *m2_n2)
5952 {
5953 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5954 	enum pipe pipe = crtc->pipe;
5955 
5956 	if (DISPLAY_VER(dev_priv) >= 5) {
5957 		m_n->link_m = intel_de_read(dev_priv,
5958 					    PIPE_LINK_M1(transcoder));
5959 		m_n->link_n = intel_de_read(dev_priv,
5960 					    PIPE_LINK_N1(transcoder));
5961 		m_n->gmch_m = intel_de_read(dev_priv,
5962 					    PIPE_DATA_M1(transcoder))
5963 			& ~TU_SIZE_MASK;
5964 		m_n->gmch_n = intel_de_read(dev_priv,
5965 					    PIPE_DATA_N1(transcoder));
5966 		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
5967 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5968 
5969 		if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
5970 			m2_n2->link_m = intel_de_read(dev_priv,
5971 						      PIPE_LINK_M2(transcoder));
5972 			m2_n2->link_n =	intel_de_read(dev_priv,
5973 							     PIPE_LINK_N2(transcoder));
5974 			m2_n2->gmch_m =	intel_de_read(dev_priv,
5975 							     PIPE_DATA_M2(transcoder))
5976 					& ~TU_SIZE_MASK;
5977 			m2_n2->gmch_n =	intel_de_read(dev_priv,
5978 							     PIPE_DATA_N2(transcoder));
5979 			m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
5980 					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5981 		}
5982 	} else {
5983 		m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
5984 		m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
5985 		m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
5986 			& ~TU_SIZE_MASK;
5987 		m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
5988 		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
5989 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5990 	}
5991 }
5992 
5993 void intel_dp_get_m_n(struct intel_crtc *crtc,
5994 		      struct intel_crtc_state *pipe_config)
5995 {
5996 	if (pipe_config->has_pch_encoder)
5997 		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
5998 	else
5999 		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6000 					     &pipe_config->dp_m_n,
6001 					     &pipe_config->dp_m2_n2);
6002 }
6003 
6004 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
6005 				   struct intel_crtc_state *pipe_config)
6006 {
6007 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6008 				     &pipe_config->fdi_m_n, NULL);
6009 }
6010 
6011 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
6012 				  u32 pos, u32 size)
6013 {
6014 	drm_rect_init(&crtc_state->pch_pfit.dst,
6015 		      pos >> 16, pos & 0xffff,
6016 		      size >> 16, size & 0xffff);
6017 }
6018 
6019 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
6020 {
6021 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6022 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6023 	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
6024 	int id = -1;
6025 	int i;
6026 
6027 	/* find scaler attached to this pipe */
6028 	for (i = 0; i < crtc->num_scalers; i++) {
6029 		u32 ctl, pos, size;
6030 
6031 		ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
6032 		if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
6033 			continue;
6034 
6035 		id = i;
6036 		crtc_state->pch_pfit.enabled = true;
6037 
6038 		pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
6039 		size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
6040 
6041 		ilk_get_pfit_pos_size(crtc_state, pos, size);
6042 
6043 		scaler_state->scalers[i].in_use = true;
6044 		break;
6045 	}
6046 
6047 	scaler_state->scaler_id = id;
6048 	if (id >= 0)
6049 		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
6050 	else
6051 		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
6052 }
6053 
6054 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
6055 {
6056 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6057 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6058 	u32 ctl, pos, size;
6059 
6060 	ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
6061 	if ((ctl & PF_ENABLE) == 0)
6062 		return;
6063 
6064 	crtc_state->pch_pfit.enabled = true;
6065 
6066 	pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
6067 	size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
6068 
6069 	ilk_get_pfit_pos_size(crtc_state, pos, size);
6070 
6071 	/*
6072 	 * We currently do not free assignements of panel fitters on
6073 	 * ivb/hsw (since we don't use the higher upscaling modes which
6074 	 * differentiates them) so just WARN about this case for now.
6075 	 */
6076 	drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
6077 		    (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
6078 }
6079 
6080 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
6081 				struct intel_crtc_state *pipe_config)
6082 {
6083 	struct drm_device *dev = crtc->base.dev;
6084 	struct drm_i915_private *dev_priv = to_i915(dev);
6085 	enum intel_display_power_domain power_domain;
6086 	intel_wakeref_t wakeref;
6087 	u32 tmp;
6088 	bool ret;
6089 
6090 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
6091 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
6092 	if (!wakeref)
6093 		return false;
6094 
6095 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6096 	pipe_config->shared_dpll = NULL;
6097 
6098 	ret = false;
6099 	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
6100 	if (!(tmp & PIPECONF_ENABLE))
6101 		goto out;
6102 
6103 	switch (tmp & PIPECONF_BPC_MASK) {
6104 	case PIPECONF_6BPC:
6105 		pipe_config->pipe_bpp = 18;
6106 		break;
6107 	case PIPECONF_8BPC:
6108 		pipe_config->pipe_bpp = 24;
6109 		break;
6110 	case PIPECONF_10BPC:
6111 		pipe_config->pipe_bpp = 30;
6112 		break;
6113 	case PIPECONF_12BPC:
6114 		pipe_config->pipe_bpp = 36;
6115 		break;
6116 	default:
6117 		break;
6118 	}
6119 
6120 	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
6121 		pipe_config->limited_color_range = true;
6122 
6123 	switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
6124 	case PIPECONF_OUTPUT_COLORSPACE_YUV601:
6125 	case PIPECONF_OUTPUT_COLORSPACE_YUV709:
6126 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
6127 		break;
6128 	default:
6129 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
6130 		break;
6131 	}
6132 
6133 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
6134 		PIPECONF_GAMMA_MODE_SHIFT;
6135 
6136 	pipe_config->csc_mode = intel_de_read(dev_priv,
6137 					      PIPE_CSC_MODE(crtc->pipe));
6138 
6139 	i9xx_get_pipe_color_config(pipe_config);
6140 	intel_color_get_config(pipe_config);
6141 
6142 	if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
6143 		struct intel_shared_dpll *pll;
6144 		enum intel_dpll_id pll_id;
6145 		bool pll_active;
6146 
6147 		pipe_config->has_pch_encoder = true;
6148 
6149 		tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
6150 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6151 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
6152 
6153 		ilk_get_fdi_m_n_config(crtc, pipe_config);
6154 
6155 		if (HAS_PCH_IBX(dev_priv)) {
6156 			/*
6157 			 * The pipe->pch transcoder and pch transcoder->pll
6158 			 * mapping is fixed.
6159 			 */
6160 			pll_id = (enum intel_dpll_id) crtc->pipe;
6161 		} else {
6162 			tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
6163 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
6164 				pll_id = DPLL_ID_PCH_PLL_B;
6165 			else
6166 				pll_id= DPLL_ID_PCH_PLL_A;
6167 		}
6168 
6169 		pipe_config->shared_dpll =
6170 			intel_get_shared_dpll_by_id(dev_priv, pll_id);
6171 		pll = pipe_config->shared_dpll;
6172 
6173 		pll_active = intel_dpll_get_hw_state(dev_priv, pll,
6174 						     &pipe_config->dpll_hw_state);
6175 		drm_WARN_ON(dev, !pll_active);
6176 
6177 		tmp = pipe_config->dpll_hw_state.dpll;
6178 		pipe_config->pixel_multiplier =
6179 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
6180 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
6181 
6182 		ilk_pch_clock_get(crtc, pipe_config);
6183 	} else {
6184 		pipe_config->pixel_multiplier = 1;
6185 	}
6186 
6187 	intel_get_transcoder_timings(crtc, pipe_config);
6188 	intel_get_pipe_src_size(crtc, pipe_config);
6189 
6190 	ilk_get_pfit_config(pipe_config);
6191 
6192 	ret = true;
6193 
6194 out:
6195 	intel_display_power_put(dev_priv, power_domain, wakeref);
6196 
6197 	return ret;
6198 }
6199 
6200 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
6201 				     struct intel_crtc_state *pipe_config,
6202 				     struct intel_display_power_domain_set *power_domain_set)
6203 {
6204 	struct drm_device *dev = crtc->base.dev;
6205 	struct drm_i915_private *dev_priv = to_i915(dev);
6206 	unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
6207 	unsigned long enabled_panel_transcoders = 0;
6208 	enum transcoder panel_transcoder;
6209 	u32 tmp;
6210 
6211 	if (DISPLAY_VER(dev_priv) >= 11)
6212 		panel_transcoder_mask |=
6213 			BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
6214 
6215 	/*
6216 	 * The pipe->transcoder mapping is fixed with the exception of the eDP
6217 	 * and DSI transcoders handled below.
6218 	 */
6219 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6220 
6221 	/*
6222 	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
6223 	 * consistency and less surprising code; it's in always on power).
6224 	 */
6225 	for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
6226 				       panel_transcoder_mask) {
6227 		bool force_thru = false;
6228 		enum pipe trans_pipe;
6229 
6230 		tmp = intel_de_read(dev_priv,
6231 				    TRANS_DDI_FUNC_CTL(panel_transcoder));
6232 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
6233 			continue;
6234 
6235 		/*
6236 		 * Log all enabled ones, only use the first one.
6237 		 *
6238 		 * FIXME: This won't work for two separate DSI displays.
6239 		 */
6240 		enabled_panel_transcoders |= BIT(panel_transcoder);
6241 		if (enabled_panel_transcoders != BIT(panel_transcoder))
6242 			continue;
6243 
6244 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
6245 		default:
6246 			drm_WARN(dev, 1,
6247 				 "unknown pipe linked to transcoder %s\n",
6248 				 transcoder_name(panel_transcoder));
6249 			fallthrough;
6250 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
6251 			force_thru = true;
6252 			fallthrough;
6253 		case TRANS_DDI_EDP_INPUT_A_ON:
6254 			trans_pipe = PIPE_A;
6255 			break;
6256 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
6257 			trans_pipe = PIPE_B;
6258 			break;
6259 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
6260 			trans_pipe = PIPE_C;
6261 			break;
6262 		case TRANS_DDI_EDP_INPUT_D_ONOFF:
6263 			trans_pipe = PIPE_D;
6264 			break;
6265 		}
6266 
6267 		if (trans_pipe == crtc->pipe) {
6268 			pipe_config->cpu_transcoder = panel_transcoder;
6269 			pipe_config->pch_pfit.force_thru = force_thru;
6270 		}
6271 	}
6272 
6273 	/*
6274 	 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
6275 	 */
6276 	drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
6277 		    enabled_panel_transcoders != BIT(TRANSCODER_EDP));
6278 
6279 	if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
6280 						       POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
6281 		return false;
6282 
6283 	tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
6284 
6285 	return tmp & PIPECONF_ENABLE;
6286 }
6287 
6288 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
6289 					 struct intel_crtc_state *pipe_config,
6290 					 struct intel_display_power_domain_set *power_domain_set)
6291 {
6292 	struct drm_device *dev = crtc->base.dev;
6293 	struct drm_i915_private *dev_priv = to_i915(dev);
6294 	enum transcoder cpu_transcoder;
6295 	enum port port;
6296 	u32 tmp;
6297 
6298 	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
6299 		if (port == PORT_A)
6300 			cpu_transcoder = TRANSCODER_DSI_A;
6301 		else
6302 			cpu_transcoder = TRANSCODER_DSI_C;
6303 
6304 		if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
6305 							       POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
6306 			continue;
6307 
6308 		/*
6309 		 * The PLL needs to be enabled with a valid divider
6310 		 * configuration, otherwise accessing DSI registers will hang
6311 		 * the machine. See BSpec North Display Engine
6312 		 * registers/MIPI[BXT]. We can break out here early, since we
6313 		 * need the same DSI PLL to be enabled for both DSI ports.
6314 		 */
6315 		if (!bxt_dsi_pll_is_enabled(dev_priv))
6316 			break;
6317 
6318 		/* XXX: this works for video mode only */
6319 		tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
6320 		if (!(tmp & DPI_ENABLE))
6321 			continue;
6322 
6323 		tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
6324 		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
6325 			continue;
6326 
6327 		pipe_config->cpu_transcoder = cpu_transcoder;
6328 		break;
6329 	}
6330 
6331 	return transcoder_is_dsi(pipe_config->cpu_transcoder);
6332 }
6333 
6334 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
6335 				   struct intel_crtc_state *pipe_config)
6336 {
6337 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6338 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6339 	enum port port;
6340 	u32 tmp;
6341 
6342 	if (transcoder_is_dsi(cpu_transcoder)) {
6343 		port = (cpu_transcoder == TRANSCODER_DSI_A) ?
6344 						PORT_A : PORT_B;
6345 	} else {
6346 		tmp = intel_de_read(dev_priv,
6347 				    TRANS_DDI_FUNC_CTL(cpu_transcoder));
6348 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
6349 			return;
6350 		if (DISPLAY_VER(dev_priv) >= 12)
6351 			port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
6352 		else
6353 			port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
6354 	}
6355 
6356 	/*
6357 	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
6358 	 * DDI E. So just check whether this pipe is wired to DDI E and whether
6359 	 * the PCH transcoder is on.
6360 	 */
6361 	if (DISPLAY_VER(dev_priv) < 9 &&
6362 	    (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
6363 		pipe_config->has_pch_encoder = true;
6364 
6365 		tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
6366 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6367 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
6368 
6369 		ilk_get_fdi_m_n_config(crtc, pipe_config);
6370 	}
6371 }
6372 
6373 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
6374 				struct intel_crtc_state *pipe_config)
6375 {
6376 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6377 	struct intel_display_power_domain_set power_domain_set = { };
6378 	bool active;
6379 	u32 tmp;
6380 
6381 	if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
6382 						       POWER_DOMAIN_PIPE(crtc->pipe)))
6383 		return false;
6384 
6385 	pipe_config->shared_dpll = NULL;
6386 
6387 	active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
6388 
6389 	if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
6390 	    bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
6391 		drm_WARN_ON(&dev_priv->drm, active);
6392 		active = true;
6393 	}
6394 
6395 	intel_dsc_get_config(pipe_config);
6396 	if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
6397 		intel_uncompressed_joiner_get_config(pipe_config);
6398 
6399 	if (!active) {
6400 		/* bigjoiner slave doesn't enable transcoder */
6401 		if (!pipe_config->bigjoiner_slave)
6402 			goto out;
6403 
6404 		active = true;
6405 		pipe_config->pixel_multiplier = 1;
6406 
6407 		/* we cannot read out most state, so don't bother.. */
6408 		pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
6409 	} else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
6410 	    DISPLAY_VER(dev_priv) >= 11) {
6411 		hsw_get_ddi_port_state(crtc, pipe_config);
6412 		intel_get_transcoder_timings(crtc, pipe_config);
6413 	}
6414 
6415 	if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
6416 		intel_vrr_get_config(crtc, pipe_config);
6417 
6418 	intel_get_pipe_src_size(crtc, pipe_config);
6419 
6420 	if (IS_HASWELL(dev_priv)) {
6421 		u32 tmp = intel_de_read(dev_priv,
6422 					PIPECONF(pipe_config->cpu_transcoder));
6423 
6424 		if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
6425 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
6426 		else
6427 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
6428 	} else {
6429 		pipe_config->output_format =
6430 			bdw_get_pipemisc_output_format(crtc);
6431 	}
6432 
6433 	pipe_config->gamma_mode = intel_de_read(dev_priv,
6434 						GAMMA_MODE(crtc->pipe));
6435 
6436 	pipe_config->csc_mode = intel_de_read(dev_priv,
6437 					      PIPE_CSC_MODE(crtc->pipe));
6438 
6439 	if (DISPLAY_VER(dev_priv) >= 9) {
6440 		tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
6441 
6442 		if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
6443 			pipe_config->gamma_enable = true;
6444 
6445 		if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
6446 			pipe_config->csc_enable = true;
6447 	} else {
6448 		i9xx_get_pipe_color_config(pipe_config);
6449 	}
6450 
6451 	intel_color_get_config(pipe_config);
6452 
6453 	tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
6454 	pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
6455 	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6456 		pipe_config->ips_linetime =
6457 			REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
6458 
6459 	if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
6460 						      POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
6461 		if (DISPLAY_VER(dev_priv) >= 9)
6462 			skl_get_pfit_config(pipe_config);
6463 		else
6464 			ilk_get_pfit_config(pipe_config);
6465 	}
6466 
6467 	if (hsw_crtc_supports_ips(crtc)) {
6468 		if (IS_HASWELL(dev_priv))
6469 			pipe_config->ips_enabled = intel_de_read(dev_priv,
6470 								 IPS_CTL) & IPS_ENABLE;
6471 		else {
6472 			/*
6473 			 * We cannot readout IPS state on broadwell, set to
6474 			 * true so we can set it to a defined state on first
6475 			 * commit.
6476 			 */
6477 			pipe_config->ips_enabled = true;
6478 		}
6479 	}
6480 
6481 	if (pipe_config->bigjoiner_slave) {
6482 		/* Cannot be read out as a slave, set to 0. */
6483 		pipe_config->pixel_multiplier = 0;
6484 	} else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
6485 	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
6486 		pipe_config->pixel_multiplier =
6487 			intel_de_read(dev_priv,
6488 				      PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
6489 	} else {
6490 		pipe_config->pixel_multiplier = 1;
6491 	}
6492 
6493 out:
6494 	intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
6495 
6496 	return active;
6497 }
6498 
6499 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
6500 {
6501 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6502 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6503 
6504 	if (!i915->display.get_pipe_config(crtc, crtc_state))
6505 		return false;
6506 
6507 	crtc_state->hw.active = true;
6508 
6509 	intel_crtc_readout_derived_state(crtc_state);
6510 
6511 	return true;
6512 }
6513 
6514 /* VESA 640x480x72Hz mode to set on the pipe */
6515 static const struct drm_display_mode load_detect_mode = {
6516 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6517 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6518 };
6519 
6520 struct drm_framebuffer *
6521 intel_framebuffer_create(struct drm_i915_gem_object *obj,
6522 			 struct drm_mode_fb_cmd2 *mode_cmd)
6523 {
6524 	struct intel_framebuffer *intel_fb;
6525 	int ret;
6526 
6527 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6528 	if (!intel_fb)
6529 		return ERR_PTR(-ENOMEM);
6530 
6531 	ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
6532 	if (ret)
6533 		goto err;
6534 
6535 	return &intel_fb->base;
6536 
6537 err:
6538 	kfree(intel_fb);
6539 	return ERR_PTR(ret);
6540 }
6541 
6542 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
6543 					struct drm_crtc *crtc)
6544 {
6545 	struct drm_plane *plane;
6546 	struct drm_plane_state *plane_state;
6547 	int ret, i;
6548 
6549 	ret = drm_atomic_add_affected_planes(state, crtc);
6550 	if (ret)
6551 		return ret;
6552 
6553 	for_each_new_plane_in_state(state, plane, plane_state, i) {
6554 		if (plane_state->crtc != crtc)
6555 			continue;
6556 
6557 		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
6558 		if (ret)
6559 			return ret;
6560 
6561 		drm_atomic_set_fb_for_plane(plane_state, NULL);
6562 	}
6563 
6564 	return 0;
6565 }
6566 
6567 int intel_get_load_detect_pipe(struct drm_connector *connector,
6568 			       struct intel_load_detect_pipe *old,
6569 			       struct drm_modeset_acquire_ctx *ctx)
6570 {
6571 	struct intel_encoder *encoder =
6572 		intel_attached_encoder(to_intel_connector(connector));
6573 	struct intel_crtc *possible_crtc;
6574 	struct intel_crtc *crtc = NULL;
6575 	struct drm_device *dev = encoder->base.dev;
6576 	struct drm_i915_private *dev_priv = to_i915(dev);
6577 	struct drm_mode_config *config = &dev->mode_config;
6578 	struct drm_atomic_state *state = NULL, *restore_state = NULL;
6579 	struct drm_connector_state *connector_state;
6580 	struct intel_crtc_state *crtc_state;
6581 	int ret;
6582 
6583 	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6584 		    connector->base.id, connector->name,
6585 		    encoder->base.base.id, encoder->base.name);
6586 
6587 	old->restore_state = NULL;
6588 
6589 	drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
6590 
6591 	/*
6592 	 * Algorithm gets a little messy:
6593 	 *
6594 	 *   - if the connector already has an assigned crtc, use it (but make
6595 	 *     sure it's on first)
6596 	 *
6597 	 *   - try to find the first unused crtc that can drive this connector,
6598 	 *     and use that if we find one
6599 	 */
6600 
6601 	/* See if we already have a CRTC for this connector */
6602 	if (connector->state->crtc) {
6603 		crtc = to_intel_crtc(connector->state->crtc);
6604 
6605 		ret = drm_modeset_lock(&crtc->base.mutex, ctx);
6606 		if (ret)
6607 			goto fail;
6608 
6609 		/* Make sure the crtc and connector are running */
6610 		goto found;
6611 	}
6612 
6613 	/* Find an unused one (if possible) */
6614 	for_each_intel_crtc(dev, possible_crtc) {
6615 		if (!(encoder->base.possible_crtcs &
6616 		      drm_crtc_mask(&possible_crtc->base)))
6617 			continue;
6618 
6619 		ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
6620 		if (ret)
6621 			goto fail;
6622 
6623 		if (possible_crtc->base.state->enable) {
6624 			drm_modeset_unlock(&possible_crtc->base.mutex);
6625 			continue;
6626 		}
6627 
6628 		crtc = possible_crtc;
6629 		break;
6630 	}
6631 
6632 	/*
6633 	 * If we didn't find an unused CRTC, don't use any.
6634 	 */
6635 	if (!crtc) {
6636 		drm_dbg_kms(&dev_priv->drm,
6637 			    "no pipe available for load-detect\n");
6638 		ret = -ENODEV;
6639 		goto fail;
6640 	}
6641 
6642 found:
6643 	state = drm_atomic_state_alloc(dev);
6644 	restore_state = drm_atomic_state_alloc(dev);
6645 	if (!state || !restore_state) {
6646 		ret = -ENOMEM;
6647 		goto fail;
6648 	}
6649 
6650 	state->acquire_ctx = ctx;
6651 	restore_state->acquire_ctx = ctx;
6652 
6653 	connector_state = drm_atomic_get_connector_state(state, connector);
6654 	if (IS_ERR(connector_state)) {
6655 		ret = PTR_ERR(connector_state);
6656 		goto fail;
6657 	}
6658 
6659 	ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
6660 	if (ret)
6661 		goto fail;
6662 
6663 	crtc_state = intel_atomic_get_crtc_state(state, crtc);
6664 	if (IS_ERR(crtc_state)) {
6665 		ret = PTR_ERR(crtc_state);
6666 		goto fail;
6667 	}
6668 
6669 	crtc_state->uapi.active = true;
6670 
6671 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
6672 					   &load_detect_mode);
6673 	if (ret)
6674 		goto fail;
6675 
6676 	ret = intel_modeset_disable_planes(state, &crtc->base);
6677 	if (ret)
6678 		goto fail;
6679 
6680 	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
6681 	if (!ret)
6682 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
6683 	if (!ret)
6684 		ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
6685 	if (ret) {
6686 		drm_dbg_kms(&dev_priv->drm,
6687 			    "Failed to create a copy of old state to restore: %i\n",
6688 			    ret);
6689 		goto fail;
6690 	}
6691 
6692 	ret = drm_atomic_commit(state);
6693 	if (ret) {
6694 		drm_dbg_kms(&dev_priv->drm,
6695 			    "failed to set mode on load-detect pipe\n");
6696 		goto fail;
6697 	}
6698 
6699 	old->restore_state = restore_state;
6700 	drm_atomic_state_put(state);
6701 
6702 	/* let the connector get through one full cycle before testing */
6703 	intel_wait_for_vblank(dev_priv, crtc->pipe);
6704 	return true;
6705 
6706 fail:
6707 	if (state) {
6708 		drm_atomic_state_put(state);
6709 		state = NULL;
6710 	}
6711 	if (restore_state) {
6712 		drm_atomic_state_put(restore_state);
6713 		restore_state = NULL;
6714 	}
6715 
6716 	if (ret == -EDEADLK)
6717 		return ret;
6718 
6719 	return false;
6720 }
6721 
6722 void intel_release_load_detect_pipe(struct drm_connector *connector,
6723 				    struct intel_load_detect_pipe *old,
6724 				    struct drm_modeset_acquire_ctx *ctx)
6725 {
6726 	struct intel_encoder *intel_encoder =
6727 		intel_attached_encoder(to_intel_connector(connector));
6728 	struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
6729 	struct drm_encoder *encoder = &intel_encoder->base;
6730 	struct drm_atomic_state *state = old->restore_state;
6731 	int ret;
6732 
6733 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6734 		    connector->base.id, connector->name,
6735 		    encoder->base.id, encoder->name);
6736 
6737 	if (!state)
6738 		return;
6739 
6740 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
6741 	if (ret)
6742 		drm_dbg_kms(&i915->drm,
6743 			    "Couldn't release load detect pipe: %i\n", ret);
6744 	drm_atomic_state_put(state);
6745 }
6746 
6747 static int i9xx_pll_refclk(struct drm_device *dev,
6748 			   const struct intel_crtc_state *pipe_config)
6749 {
6750 	struct drm_i915_private *dev_priv = to_i915(dev);
6751 	u32 dpll = pipe_config->dpll_hw_state.dpll;
6752 
6753 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
6754 		return dev_priv->vbt.lvds_ssc_freq;
6755 	else if (HAS_PCH_SPLIT(dev_priv))
6756 		return 120000;
6757 	else if (DISPLAY_VER(dev_priv) != 2)
6758 		return 96000;
6759 	else
6760 		return 48000;
6761 }
6762 
6763 /* Returns the clock of the currently programmed mode of the given pipe. */
6764 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
6765 				struct intel_crtc_state *pipe_config)
6766 {
6767 	struct drm_device *dev = crtc->base.dev;
6768 	struct drm_i915_private *dev_priv = to_i915(dev);
6769 	enum pipe pipe = crtc->pipe;
6770 	u32 dpll = pipe_config->dpll_hw_state.dpll;
6771 	u32 fp;
6772 	struct dpll clock;
6773 	int port_clock;
6774 	int refclk = i9xx_pll_refclk(dev, pipe_config);
6775 
6776 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6777 		fp = pipe_config->dpll_hw_state.fp0;
6778 	else
6779 		fp = pipe_config->dpll_hw_state.fp1;
6780 
6781 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6782 	if (IS_PINEVIEW(dev_priv)) {
6783 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6784 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6785 	} else {
6786 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6787 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6788 	}
6789 
6790 	if (DISPLAY_VER(dev_priv) != 2) {
6791 		if (IS_PINEVIEW(dev_priv))
6792 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6793 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6794 		else
6795 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6796 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
6797 
6798 		switch (dpll & DPLL_MODE_MASK) {
6799 		case DPLLB_MODE_DAC_SERIAL:
6800 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6801 				5 : 10;
6802 			break;
6803 		case DPLLB_MODE_LVDS:
6804 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6805 				7 : 14;
6806 			break;
6807 		default:
6808 			drm_dbg_kms(&dev_priv->drm,
6809 				    "Unknown DPLL mode %08x in programmed "
6810 				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
6811 			return;
6812 		}
6813 
6814 		if (IS_PINEVIEW(dev_priv))
6815 			port_clock = pnv_calc_dpll_params(refclk, &clock);
6816 		else
6817 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
6818 	} else {
6819 		u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
6820 								 LVDS);
6821 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
6822 
6823 		if (is_lvds) {
6824 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6825 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
6826 
6827 			if (lvds & LVDS_CLKB_POWER_UP)
6828 				clock.p2 = 7;
6829 			else
6830 				clock.p2 = 14;
6831 		} else {
6832 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
6833 				clock.p1 = 2;
6834 			else {
6835 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6836 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6837 			}
6838 			if (dpll & PLL_P2_DIVIDE_BY_4)
6839 				clock.p2 = 4;
6840 			else
6841 				clock.p2 = 2;
6842 		}
6843 
6844 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
6845 	}
6846 
6847 	/*
6848 	 * This value includes pixel_multiplier. We will use
6849 	 * port_clock to compute adjusted_mode.crtc_clock in the
6850 	 * encoder's get_config() function.
6851 	 */
6852 	pipe_config->port_clock = port_clock;
6853 }
6854 
6855 int intel_dotclock_calculate(int link_freq,
6856 			     const struct intel_link_m_n *m_n)
6857 {
6858 	/*
6859 	 * The calculation for the data clock is:
6860 	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
6861 	 * But we want to avoid losing precison if possible, so:
6862 	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
6863 	 *
6864 	 * and the link clock is simpler:
6865 	 * link_clock = (m * link_clock) / n
6866 	 */
6867 
6868 	if (!m_n->link_n)
6869 		return 0;
6870 
6871 	return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
6872 }
6873 
6874 static void ilk_pch_clock_get(struct intel_crtc *crtc,
6875 			      struct intel_crtc_state *pipe_config)
6876 {
6877 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6878 
6879 	/* read out port_clock from the DPLL */
6880 	i9xx_crtc_clock_get(crtc, pipe_config);
6881 
6882 	/*
6883 	 * In case there is an active pipe without active ports,
6884 	 * we may need some idea for the dotclock anyway.
6885 	 * Calculate one based on the FDI configuration.
6886 	 */
6887 	pipe_config->hw.adjusted_mode.crtc_clock =
6888 		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
6889 					 &pipe_config->fdi_m_n);
6890 }
6891 
6892 /* Returns the currently programmed mode of the given encoder. */
6893 struct drm_display_mode *
6894 intel_encoder_current_mode(struct intel_encoder *encoder)
6895 {
6896 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6897 	struct intel_crtc_state *crtc_state;
6898 	struct drm_display_mode *mode;
6899 	struct intel_crtc *crtc;
6900 	enum pipe pipe;
6901 
6902 	if (!encoder->get_hw_state(encoder, &pipe))
6903 		return NULL;
6904 
6905 	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
6906 
6907 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6908 	if (!mode)
6909 		return NULL;
6910 
6911 	crtc_state = intel_crtc_state_alloc(crtc);
6912 	if (!crtc_state) {
6913 		kfree(mode);
6914 		return NULL;
6915 	}
6916 
6917 	if (!intel_crtc_get_pipe_config(crtc_state)) {
6918 		kfree(crtc_state);
6919 		kfree(mode);
6920 		return NULL;
6921 	}
6922 
6923 	intel_encoder_get_config(encoder, crtc_state);
6924 
6925 	intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
6926 
6927 	kfree(crtc_state);
6928 
6929 	return mode;
6930 }
6931 
6932 /**
6933  * intel_wm_need_update - Check whether watermarks need updating
6934  * @cur: current plane state
6935  * @new: new plane state
6936  *
6937  * Check current plane state versus the new one to determine whether
6938  * watermarks need to be recalculated.
6939  *
6940  * Returns true or false.
6941  */
6942 static bool intel_wm_need_update(const struct intel_plane_state *cur,
6943 				 struct intel_plane_state *new)
6944 {
6945 	/* Update watermarks on tiling or size changes. */
6946 	if (new->uapi.visible != cur->uapi.visible)
6947 		return true;
6948 
6949 	if (!cur->hw.fb || !new->hw.fb)
6950 		return false;
6951 
6952 	if (cur->hw.fb->modifier != new->hw.fb->modifier ||
6953 	    cur->hw.rotation != new->hw.rotation ||
6954 	    drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
6955 	    drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
6956 	    drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
6957 	    drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
6958 		return true;
6959 
6960 	return false;
6961 }
6962 
6963 static bool needs_scaling(const struct intel_plane_state *state)
6964 {
6965 	int src_w = drm_rect_width(&state->uapi.src) >> 16;
6966 	int src_h = drm_rect_height(&state->uapi.src) >> 16;
6967 	int dst_w = drm_rect_width(&state->uapi.dst);
6968 	int dst_h = drm_rect_height(&state->uapi.dst);
6969 
6970 	return (src_w != dst_w || src_h != dst_h);
6971 }
6972 
6973 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
6974 				    struct intel_crtc_state *crtc_state,
6975 				    const struct intel_plane_state *old_plane_state,
6976 				    struct intel_plane_state *plane_state)
6977 {
6978 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6979 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
6980 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6981 	bool mode_changed = intel_crtc_needs_modeset(crtc_state);
6982 	bool was_crtc_enabled = old_crtc_state->hw.active;
6983 	bool is_crtc_enabled = crtc_state->hw.active;
6984 	bool turn_off, turn_on, visible, was_visible;
6985 	int ret;
6986 
6987 	if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
6988 		ret = skl_update_scaler_plane(crtc_state, plane_state);
6989 		if (ret)
6990 			return ret;
6991 	}
6992 
6993 	was_visible = old_plane_state->uapi.visible;
6994 	visible = plane_state->uapi.visible;
6995 
6996 	if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
6997 		was_visible = false;
6998 
6999 	/*
7000 	 * Visibility is calculated as if the crtc was on, but
7001 	 * after scaler setup everything depends on it being off
7002 	 * when the crtc isn't active.
7003 	 *
7004 	 * FIXME this is wrong for watermarks. Watermarks should also
7005 	 * be computed as if the pipe would be active. Perhaps move
7006 	 * per-plane wm computation to the .check_plane() hook, and
7007 	 * only combine the results from all planes in the current place?
7008 	 */
7009 	if (!is_crtc_enabled) {
7010 		intel_plane_set_invisible(crtc_state, plane_state);
7011 		visible = false;
7012 	}
7013 
7014 	if (!was_visible && !visible)
7015 		return 0;
7016 
7017 	turn_off = was_visible && (!visible || mode_changed);
7018 	turn_on = visible && (!was_visible || mode_changed);
7019 
7020 	drm_dbg_atomic(&dev_priv->drm,
7021 		       "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
7022 		       crtc->base.base.id, crtc->base.name,
7023 		       plane->base.base.id, plane->base.name,
7024 		       was_visible, visible,
7025 		       turn_off, turn_on, mode_changed);
7026 
7027 	if (turn_on) {
7028 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
7029 			crtc_state->update_wm_pre = true;
7030 
7031 		/* must disable cxsr around plane enable/disable */
7032 		if (plane->id != PLANE_CURSOR)
7033 			crtc_state->disable_cxsr = true;
7034 	} else if (turn_off) {
7035 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
7036 			crtc_state->update_wm_post = true;
7037 
7038 		/* must disable cxsr around plane enable/disable */
7039 		if (plane->id != PLANE_CURSOR)
7040 			crtc_state->disable_cxsr = true;
7041 	} else if (intel_wm_need_update(old_plane_state, plane_state)) {
7042 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
7043 			/* FIXME bollocks */
7044 			crtc_state->update_wm_pre = true;
7045 			crtc_state->update_wm_post = true;
7046 		}
7047 	}
7048 
7049 	if (visible || was_visible)
7050 		crtc_state->fb_bits |= plane->frontbuffer_bit;
7051 
7052 	/*
7053 	 * ILK/SNB DVSACNTR/Sprite Enable
7054 	 * IVB SPR_CTL/Sprite Enable
7055 	 * "When in Self Refresh Big FIFO mode, a write to enable the
7056 	 *  plane will be internally buffered and delayed while Big FIFO
7057 	 *  mode is exiting."
7058 	 *
7059 	 * Which means that enabling the sprite can take an extra frame
7060 	 * when we start in big FIFO mode (LP1+). Thus we need to drop
7061 	 * down to LP0 and wait for vblank in order to make sure the
7062 	 * sprite gets enabled on the next vblank after the register write.
7063 	 * Doing otherwise would risk enabling the sprite one frame after
7064 	 * we've already signalled flip completion. We can resume LP1+
7065 	 * once the sprite has been enabled.
7066 	 *
7067 	 *
7068 	 * WaCxSRDisabledForSpriteScaling:ivb
7069 	 * IVB SPR_SCALE/Scaling Enable
7070 	 * "Low Power watermarks must be disabled for at least one
7071 	 *  frame before enabling sprite scaling, and kept disabled
7072 	 *  until sprite scaling is disabled."
7073 	 *
7074 	 * ILK/SNB DVSASCALE/Scaling Enable
7075 	 * "When in Self Refresh Big FIFO mode, scaling enable will be
7076 	 *  masked off while Big FIFO mode is exiting."
7077 	 *
7078 	 * Despite the w/a only being listed for IVB we assume that
7079 	 * the ILK/SNB note has similar ramifications, hence we apply
7080 	 * the w/a on all three platforms.
7081 	 *
7082 	 * With experimental results seems this is needed also for primary
7083 	 * plane, not only sprite plane.
7084 	 */
7085 	if (plane->id != PLANE_CURSOR &&
7086 	    (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
7087 	     IS_IVYBRIDGE(dev_priv)) &&
7088 	    (turn_on || (!needs_scaling(old_plane_state) &&
7089 			 needs_scaling(plane_state))))
7090 		crtc_state->disable_lp_wm = true;
7091 
7092 	return 0;
7093 }
7094 
7095 static bool encoders_cloneable(const struct intel_encoder *a,
7096 			       const struct intel_encoder *b)
7097 {
7098 	/* masks could be asymmetric, so check both ways */
7099 	return a == b || (a->cloneable & (1 << b->type) &&
7100 			  b->cloneable & (1 << a->type));
7101 }
7102 
7103 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
7104 					 struct intel_crtc *crtc,
7105 					 struct intel_encoder *encoder)
7106 {
7107 	struct intel_encoder *source_encoder;
7108 	struct drm_connector *connector;
7109 	struct drm_connector_state *connector_state;
7110 	int i;
7111 
7112 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7113 		if (connector_state->crtc != &crtc->base)
7114 			continue;
7115 
7116 		source_encoder =
7117 			to_intel_encoder(connector_state->best_encoder);
7118 		if (!encoders_cloneable(encoder, source_encoder))
7119 			return false;
7120 	}
7121 
7122 	return true;
7123 }
7124 
7125 static int icl_add_linked_planes(struct intel_atomic_state *state)
7126 {
7127 	struct intel_plane *plane, *linked;
7128 	struct intel_plane_state *plane_state, *linked_plane_state;
7129 	int i;
7130 
7131 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7132 		linked = plane_state->planar_linked_plane;
7133 
7134 		if (!linked)
7135 			continue;
7136 
7137 		linked_plane_state = intel_atomic_get_plane_state(state, linked);
7138 		if (IS_ERR(linked_plane_state))
7139 			return PTR_ERR(linked_plane_state);
7140 
7141 		drm_WARN_ON(state->base.dev,
7142 			    linked_plane_state->planar_linked_plane != plane);
7143 		drm_WARN_ON(state->base.dev,
7144 			    linked_plane_state->planar_slave == plane_state->planar_slave);
7145 	}
7146 
7147 	return 0;
7148 }
7149 
7150 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
7151 {
7152 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7153 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7154 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
7155 	struct intel_plane *plane, *linked;
7156 	struct intel_plane_state *plane_state;
7157 	int i;
7158 
7159 	if (DISPLAY_VER(dev_priv) < 11)
7160 		return 0;
7161 
7162 	/*
7163 	 * Destroy all old plane links and make the slave plane invisible
7164 	 * in the crtc_state->active_planes mask.
7165 	 */
7166 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7167 		if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
7168 			continue;
7169 
7170 		plane_state->planar_linked_plane = NULL;
7171 		if (plane_state->planar_slave && !plane_state->uapi.visible) {
7172 			crtc_state->enabled_planes &= ~BIT(plane->id);
7173 			crtc_state->active_planes &= ~BIT(plane->id);
7174 			crtc_state->update_planes |= BIT(plane->id);
7175 		}
7176 
7177 		plane_state->planar_slave = false;
7178 	}
7179 
7180 	if (!crtc_state->nv12_planes)
7181 		return 0;
7182 
7183 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7184 		struct intel_plane_state *linked_state = NULL;
7185 
7186 		if (plane->pipe != crtc->pipe ||
7187 		    !(crtc_state->nv12_planes & BIT(plane->id)))
7188 			continue;
7189 
7190 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
7191 			if (!icl_is_nv12_y_plane(dev_priv, linked->id))
7192 				continue;
7193 
7194 			if (crtc_state->active_planes & BIT(linked->id))
7195 				continue;
7196 
7197 			linked_state = intel_atomic_get_plane_state(state, linked);
7198 			if (IS_ERR(linked_state))
7199 				return PTR_ERR(linked_state);
7200 
7201 			break;
7202 		}
7203 
7204 		if (!linked_state) {
7205 			drm_dbg_kms(&dev_priv->drm,
7206 				    "Need %d free Y planes for planar YUV\n",
7207 				    hweight8(crtc_state->nv12_planes));
7208 
7209 			return -EINVAL;
7210 		}
7211 
7212 		plane_state->planar_linked_plane = linked;
7213 
7214 		linked_state->planar_slave = true;
7215 		linked_state->planar_linked_plane = plane;
7216 		crtc_state->enabled_planes |= BIT(linked->id);
7217 		crtc_state->active_planes |= BIT(linked->id);
7218 		crtc_state->update_planes |= BIT(linked->id);
7219 		drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
7220 			    linked->base.name, plane->base.name);
7221 
7222 		/* Copy parameters to slave plane */
7223 		linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
7224 		linked_state->color_ctl = plane_state->color_ctl;
7225 		linked_state->view = plane_state->view;
7226 
7227 		intel_plane_copy_hw_state(linked_state, plane_state);
7228 		linked_state->uapi.src = plane_state->uapi.src;
7229 		linked_state->uapi.dst = plane_state->uapi.dst;
7230 
7231 		if (icl_is_hdr_plane(dev_priv, plane->id)) {
7232 			if (linked->id == PLANE_SPRITE5)
7233 				plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
7234 			else if (linked->id == PLANE_SPRITE4)
7235 				plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
7236 			else if (linked->id == PLANE_SPRITE3)
7237 				plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
7238 			else if (linked->id == PLANE_SPRITE2)
7239 				plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
7240 			else
7241 				MISSING_CASE(linked->id);
7242 		}
7243 	}
7244 
7245 	return 0;
7246 }
7247 
7248 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
7249 {
7250 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
7251 	struct intel_atomic_state *state =
7252 		to_intel_atomic_state(new_crtc_state->uapi.state);
7253 	const struct intel_crtc_state *old_crtc_state =
7254 		intel_atomic_get_old_crtc_state(state, crtc);
7255 
7256 	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
7257 }
7258 
7259 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
7260 {
7261 	const struct drm_display_mode *pipe_mode =
7262 		&crtc_state->hw.pipe_mode;
7263 	int linetime_wm;
7264 
7265 	if (!crtc_state->hw.enable)
7266 		return 0;
7267 
7268 	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
7269 					pipe_mode->crtc_clock);
7270 
7271 	return min(linetime_wm, 0x1ff);
7272 }
7273 
7274 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
7275 			       const struct intel_cdclk_state *cdclk_state)
7276 {
7277 	const struct drm_display_mode *pipe_mode =
7278 		&crtc_state->hw.pipe_mode;
7279 	int linetime_wm;
7280 
7281 	if (!crtc_state->hw.enable)
7282 		return 0;
7283 
7284 	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
7285 					cdclk_state->logical.cdclk);
7286 
7287 	return min(linetime_wm, 0x1ff);
7288 }
7289 
7290 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
7291 {
7292 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7293 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7294 	const struct drm_display_mode *pipe_mode =
7295 		&crtc_state->hw.pipe_mode;
7296 	int linetime_wm;
7297 
7298 	if (!crtc_state->hw.enable)
7299 		return 0;
7300 
7301 	linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
7302 				   crtc_state->pixel_rate);
7303 
7304 	/* Display WA #1135: BXT:ALL GLK:ALL */
7305 	if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
7306 	    dev_priv->ipc_enabled)
7307 		linetime_wm /= 2;
7308 
7309 	return min(linetime_wm, 0x1ff);
7310 }
7311 
7312 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
7313 				   struct intel_crtc *crtc)
7314 {
7315 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7316 	struct intel_crtc_state *crtc_state =
7317 		intel_atomic_get_new_crtc_state(state, crtc);
7318 	const struct intel_cdclk_state *cdclk_state;
7319 
7320 	if (DISPLAY_VER(dev_priv) >= 9)
7321 		crtc_state->linetime = skl_linetime_wm(crtc_state);
7322 	else
7323 		crtc_state->linetime = hsw_linetime_wm(crtc_state);
7324 
7325 	if (!hsw_crtc_supports_ips(crtc))
7326 		return 0;
7327 
7328 	cdclk_state = intel_atomic_get_cdclk_state(state);
7329 	if (IS_ERR(cdclk_state))
7330 		return PTR_ERR(cdclk_state);
7331 
7332 	crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
7333 						       cdclk_state);
7334 
7335 	return 0;
7336 }
7337 
7338 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
7339 				   struct intel_crtc *crtc)
7340 {
7341 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7342 	struct intel_crtc_state *crtc_state =
7343 		intel_atomic_get_new_crtc_state(state, crtc);
7344 	bool mode_changed = intel_crtc_needs_modeset(crtc_state);
7345 	int ret;
7346 
7347 	if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
7348 	    mode_changed && !crtc_state->hw.active)
7349 		crtc_state->update_wm_post = true;
7350 
7351 	if (mode_changed && crtc_state->hw.enable &&
7352 	    dev_priv->display.crtc_compute_clock &&
7353 	    !crtc_state->bigjoiner_slave &&
7354 	    !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
7355 		ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
7356 		if (ret)
7357 			return ret;
7358 	}
7359 
7360 	/*
7361 	 * May need to update pipe gamma enable bits
7362 	 * when C8 planes are getting enabled/disabled.
7363 	 */
7364 	if (c8_planes_changed(crtc_state))
7365 		crtc_state->uapi.color_mgmt_changed = true;
7366 
7367 	if (mode_changed || crtc_state->update_pipe ||
7368 	    crtc_state->uapi.color_mgmt_changed) {
7369 		ret = intel_color_check(crtc_state);
7370 		if (ret)
7371 			return ret;
7372 	}
7373 
7374 	if (dev_priv->display.compute_pipe_wm) {
7375 		ret = dev_priv->display.compute_pipe_wm(state, crtc);
7376 		if (ret) {
7377 			drm_dbg_kms(&dev_priv->drm,
7378 				    "Target pipe watermarks are invalid\n");
7379 			return ret;
7380 		}
7381 
7382 	}
7383 
7384 	if (dev_priv->display.compute_intermediate_wm) {
7385 		if (drm_WARN_ON(&dev_priv->drm,
7386 				!dev_priv->display.compute_pipe_wm))
7387 			return 0;
7388 
7389 		/*
7390 		 * Calculate 'intermediate' watermarks that satisfy both the
7391 		 * old state and the new state.  We can program these
7392 		 * immediately.
7393 		 */
7394 		ret = dev_priv->display.compute_intermediate_wm(state, crtc);
7395 		if (ret) {
7396 			drm_dbg_kms(&dev_priv->drm,
7397 				    "No valid intermediate pipe watermarks are possible\n");
7398 			return ret;
7399 		}
7400 	}
7401 
7402 	if (DISPLAY_VER(dev_priv) >= 9) {
7403 		if (mode_changed || crtc_state->update_pipe) {
7404 			ret = skl_update_scaler_crtc(crtc_state);
7405 			if (ret)
7406 				return ret;
7407 		}
7408 
7409 		ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
7410 		if (ret)
7411 			return ret;
7412 	}
7413 
7414 	if (HAS_IPS(dev_priv)) {
7415 		ret = hsw_compute_ips_config(crtc_state);
7416 		if (ret)
7417 			return ret;
7418 	}
7419 
7420 	if (DISPLAY_VER(dev_priv) >= 9 ||
7421 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
7422 		ret = hsw_compute_linetime_wm(state, crtc);
7423 		if (ret)
7424 			return ret;
7425 
7426 	}
7427 
7428 	if (!mode_changed) {
7429 		ret = intel_psr2_sel_fetch_update(state, crtc);
7430 		if (ret)
7431 			return ret;
7432 	}
7433 
7434 	return 0;
7435 }
7436 
7437 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
7438 {
7439 	struct intel_connector *connector;
7440 	struct drm_connector_list_iter conn_iter;
7441 
7442 	drm_connector_list_iter_begin(dev, &conn_iter);
7443 	for_each_intel_connector_iter(connector, &conn_iter) {
7444 		struct drm_connector_state *conn_state = connector->base.state;
7445 		struct intel_encoder *encoder =
7446 			to_intel_encoder(connector->base.encoder);
7447 
7448 		if (conn_state->crtc)
7449 			drm_connector_put(&connector->base);
7450 
7451 		if (encoder) {
7452 			struct intel_crtc *crtc =
7453 				to_intel_crtc(encoder->base.crtc);
7454 			const struct intel_crtc_state *crtc_state =
7455 				to_intel_crtc_state(crtc->base.state);
7456 
7457 			conn_state->best_encoder = &encoder->base;
7458 			conn_state->crtc = &crtc->base;
7459 			conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
7460 
7461 			drm_connector_get(&connector->base);
7462 		} else {
7463 			conn_state->best_encoder = NULL;
7464 			conn_state->crtc = NULL;
7465 		}
7466 	}
7467 	drm_connector_list_iter_end(&conn_iter);
7468 }
7469 
7470 static int
7471 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
7472 		      struct intel_crtc_state *pipe_config)
7473 {
7474 	struct drm_connector *connector = conn_state->connector;
7475 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7476 	const struct drm_display_info *info = &connector->display_info;
7477 	int bpp;
7478 
7479 	switch (conn_state->max_bpc) {
7480 	case 6 ... 7:
7481 		bpp = 6 * 3;
7482 		break;
7483 	case 8 ... 9:
7484 		bpp = 8 * 3;
7485 		break;
7486 	case 10 ... 11:
7487 		bpp = 10 * 3;
7488 		break;
7489 	case 12 ... 16:
7490 		bpp = 12 * 3;
7491 		break;
7492 	default:
7493 		MISSING_CASE(conn_state->max_bpc);
7494 		return -EINVAL;
7495 	}
7496 
7497 	if (bpp < pipe_config->pipe_bpp) {
7498 		drm_dbg_kms(&i915->drm,
7499 			    "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
7500 			    "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
7501 			    connector->base.id, connector->name,
7502 			    bpp, 3 * info->bpc,
7503 			    3 * conn_state->max_requested_bpc,
7504 			    pipe_config->pipe_bpp);
7505 
7506 		pipe_config->pipe_bpp = bpp;
7507 	}
7508 
7509 	return 0;
7510 }
7511 
7512 static int
7513 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
7514 			  struct intel_crtc_state *pipe_config)
7515 {
7516 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7517 	struct drm_atomic_state *state = pipe_config->uapi.state;
7518 	struct drm_connector *connector;
7519 	struct drm_connector_state *connector_state;
7520 	int bpp, i;
7521 
7522 	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7523 	    IS_CHERRYVIEW(dev_priv)))
7524 		bpp = 10*3;
7525 	else if (DISPLAY_VER(dev_priv) >= 5)
7526 		bpp = 12*3;
7527 	else
7528 		bpp = 8*3;
7529 
7530 	pipe_config->pipe_bpp = bpp;
7531 
7532 	/* Clamp display bpp to connector max bpp */
7533 	for_each_new_connector_in_state(state, connector, connector_state, i) {
7534 		int ret;
7535 
7536 		if (connector_state->crtc != &crtc->base)
7537 			continue;
7538 
7539 		ret = compute_sink_pipe_bpp(connector_state, pipe_config);
7540 		if (ret)
7541 			return ret;
7542 	}
7543 
7544 	return 0;
7545 }
7546 
7547 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
7548 				    const struct drm_display_mode *mode)
7549 {
7550 	drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
7551 		    "type: 0x%x flags: 0x%x\n",
7552 		    mode->crtc_clock,
7553 		    mode->crtc_hdisplay, mode->crtc_hsync_start,
7554 		    mode->crtc_hsync_end, mode->crtc_htotal,
7555 		    mode->crtc_vdisplay, mode->crtc_vsync_start,
7556 		    mode->crtc_vsync_end, mode->crtc_vtotal,
7557 		    mode->type, mode->flags);
7558 }
7559 
7560 static void
7561 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
7562 		      const char *id, unsigned int lane_count,
7563 		      const struct intel_link_m_n *m_n)
7564 {
7565 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7566 
7567 	drm_dbg_kms(&i915->drm,
7568 		    "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
7569 		    id, lane_count,
7570 		    m_n->gmch_m, m_n->gmch_n,
7571 		    m_n->link_m, m_n->link_n, m_n->tu);
7572 }
7573 
7574 static void
7575 intel_dump_infoframe(struct drm_i915_private *dev_priv,
7576 		     const union hdmi_infoframe *frame)
7577 {
7578 	if (!drm_debug_enabled(DRM_UT_KMS))
7579 		return;
7580 
7581 	hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
7582 }
7583 
7584 static void
7585 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
7586 		      const struct drm_dp_vsc_sdp *vsc)
7587 {
7588 	if (!drm_debug_enabled(DRM_UT_KMS))
7589 		return;
7590 
7591 	drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
7592 }
7593 
7594 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
7595 
7596 static const char * const output_type_str[] = {
7597 	OUTPUT_TYPE(UNUSED),
7598 	OUTPUT_TYPE(ANALOG),
7599 	OUTPUT_TYPE(DVO),
7600 	OUTPUT_TYPE(SDVO),
7601 	OUTPUT_TYPE(LVDS),
7602 	OUTPUT_TYPE(TVOUT),
7603 	OUTPUT_TYPE(HDMI),
7604 	OUTPUT_TYPE(DP),
7605 	OUTPUT_TYPE(EDP),
7606 	OUTPUT_TYPE(DSI),
7607 	OUTPUT_TYPE(DDI),
7608 	OUTPUT_TYPE(DP_MST),
7609 };
7610 
7611 #undef OUTPUT_TYPE
7612 
7613 static void snprintf_output_types(char *buf, size_t len,
7614 				  unsigned int output_types)
7615 {
7616 	char *str = buf;
7617 	int i;
7618 
7619 	str[0] = '\0';
7620 
7621 	for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
7622 		int r;
7623 
7624 		if ((output_types & BIT(i)) == 0)
7625 			continue;
7626 
7627 		r = snprintf(str, len, "%s%s",
7628 			     str != buf ? "," : "", output_type_str[i]);
7629 		if (r >= len)
7630 			break;
7631 		str += r;
7632 		len -= r;
7633 
7634 		output_types &= ~BIT(i);
7635 	}
7636 
7637 	WARN_ON_ONCE(output_types != 0);
7638 }
7639 
7640 static const char * const output_format_str[] = {
7641 	[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
7642 	[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
7643 	[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
7644 };
7645 
7646 static const char *output_formats(enum intel_output_format format)
7647 {
7648 	if (format >= ARRAY_SIZE(output_format_str))
7649 		return "invalid";
7650 	return output_format_str[format];
7651 }
7652 
7653 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
7654 {
7655 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
7656 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
7657 	const struct drm_framebuffer *fb = plane_state->hw.fb;
7658 
7659 	if (!fb) {
7660 		drm_dbg_kms(&i915->drm,
7661 			    "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
7662 			    plane->base.base.id, plane->base.name,
7663 			    yesno(plane_state->uapi.visible));
7664 		return;
7665 	}
7666 
7667 	drm_dbg_kms(&i915->drm,
7668 		    "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
7669 		    plane->base.base.id, plane->base.name,
7670 		    fb->base.id, fb->width, fb->height, &fb->format->format,
7671 		    fb->modifier, yesno(plane_state->uapi.visible));
7672 	drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
7673 		    plane_state->hw.rotation, plane_state->scaler_id);
7674 	if (plane_state->uapi.visible)
7675 		drm_dbg_kms(&i915->drm,
7676 			    "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
7677 			    DRM_RECT_FP_ARG(&plane_state->uapi.src),
7678 			    DRM_RECT_ARG(&plane_state->uapi.dst));
7679 }
7680 
7681 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
7682 				   struct intel_atomic_state *state,
7683 				   const char *context)
7684 {
7685 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
7686 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7687 	const struct intel_plane_state *plane_state;
7688 	struct intel_plane *plane;
7689 	char buf[64];
7690 	int i;
7691 
7692 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
7693 		    crtc->base.base.id, crtc->base.name,
7694 		    yesno(pipe_config->hw.enable), context);
7695 
7696 	if (!pipe_config->hw.enable)
7697 		goto dump_planes;
7698 
7699 	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
7700 	drm_dbg_kms(&dev_priv->drm,
7701 		    "active: %s, output_types: %s (0x%x), output format: %s\n",
7702 		    yesno(pipe_config->hw.active),
7703 		    buf, pipe_config->output_types,
7704 		    output_formats(pipe_config->output_format));
7705 
7706 	drm_dbg_kms(&dev_priv->drm,
7707 		    "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
7708 		    transcoder_name(pipe_config->cpu_transcoder),
7709 		    pipe_config->pipe_bpp, pipe_config->dither);
7710 
7711 	drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
7712 		    transcoder_name(pipe_config->mst_master_transcoder));
7713 
7714 	drm_dbg_kms(&dev_priv->drm,
7715 		    "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
7716 		    transcoder_name(pipe_config->master_transcoder),
7717 		    pipe_config->sync_mode_slaves_mask);
7718 
7719 	drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
7720 		    pipe_config->bigjoiner_slave ? "slave" :
7721 		    pipe_config->bigjoiner ? "master" : "no");
7722 
7723 	drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
7724 		    enableddisabled(pipe_config->splitter.enable),
7725 		    pipe_config->splitter.link_count,
7726 		    pipe_config->splitter.pixel_overlap);
7727 
7728 	if (pipe_config->has_pch_encoder)
7729 		intel_dump_m_n_config(pipe_config, "fdi",
7730 				      pipe_config->fdi_lanes,
7731 				      &pipe_config->fdi_m_n);
7732 
7733 	if (intel_crtc_has_dp_encoder(pipe_config)) {
7734 		intel_dump_m_n_config(pipe_config, "dp m_n",
7735 				pipe_config->lane_count, &pipe_config->dp_m_n);
7736 		if (pipe_config->has_drrs)
7737 			intel_dump_m_n_config(pipe_config, "dp m2_n2",
7738 					      pipe_config->lane_count,
7739 					      &pipe_config->dp_m2_n2);
7740 	}
7741 
7742 	drm_dbg_kms(&dev_priv->drm,
7743 		    "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
7744 		    pipe_config->has_audio, pipe_config->has_infoframe,
7745 		    pipe_config->infoframes.enable);
7746 
7747 	if (pipe_config->infoframes.enable &
7748 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
7749 		drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
7750 			    pipe_config->infoframes.gcp);
7751 	if (pipe_config->infoframes.enable &
7752 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
7753 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
7754 	if (pipe_config->infoframes.enable &
7755 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
7756 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
7757 	if (pipe_config->infoframes.enable &
7758 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
7759 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
7760 	if (pipe_config->infoframes.enable &
7761 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
7762 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7763 	if (pipe_config->infoframes.enable &
7764 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
7765 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7766 	if (pipe_config->infoframes.enable &
7767 	    intel_hdmi_infoframe_enable(DP_SDP_VSC))
7768 		intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
7769 
7770 	drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
7771 		    yesno(pipe_config->vrr.enable),
7772 		    pipe_config->vrr.vmin, pipe_config->vrr.vmax,
7773 		    pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
7774 		    pipe_config->vrr.flipline,
7775 		    intel_vrr_vmin_vblank_start(pipe_config),
7776 		    intel_vrr_vmax_vblank_start(pipe_config));
7777 
7778 	drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
7779 	drm_mode_debug_printmodeline(&pipe_config->hw.mode);
7780 	drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
7781 	drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
7782 	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
7783 	drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
7784 	drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
7785 	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
7786 	drm_dbg_kms(&dev_priv->drm,
7787 		    "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
7788 		    pipe_config->port_clock,
7789 		    pipe_config->pipe_src_w, pipe_config->pipe_src_h,
7790 		    pipe_config->pixel_rate);
7791 
7792 	drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
7793 		    pipe_config->linetime, pipe_config->ips_linetime);
7794 
7795 	if (DISPLAY_VER(dev_priv) >= 9)
7796 		drm_dbg_kms(&dev_priv->drm,
7797 			    "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
7798 			    crtc->num_scalers,
7799 			    pipe_config->scaler_state.scaler_users,
7800 			    pipe_config->scaler_state.scaler_id);
7801 
7802 	if (HAS_GMCH(dev_priv))
7803 		drm_dbg_kms(&dev_priv->drm,
7804 			    "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
7805 			    pipe_config->gmch_pfit.control,
7806 			    pipe_config->gmch_pfit.pgm_ratios,
7807 			    pipe_config->gmch_pfit.lvds_border_bits);
7808 	else
7809 		drm_dbg_kms(&dev_priv->drm,
7810 			    "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
7811 			    DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
7812 			    enableddisabled(pipe_config->pch_pfit.enabled),
7813 			    yesno(pipe_config->pch_pfit.force_thru));
7814 
7815 	drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
7816 		    pipe_config->ips_enabled, pipe_config->double_wide);
7817 
7818 	intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
7819 
7820 	if (IS_CHERRYVIEW(dev_priv))
7821 		drm_dbg_kms(&dev_priv->drm,
7822 			    "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7823 			    pipe_config->cgm_mode, pipe_config->gamma_mode,
7824 			    pipe_config->gamma_enable, pipe_config->csc_enable);
7825 	else
7826 		drm_dbg_kms(&dev_priv->drm,
7827 			    "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7828 			    pipe_config->csc_mode, pipe_config->gamma_mode,
7829 			    pipe_config->gamma_enable, pipe_config->csc_enable);
7830 
7831 	drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
7832 		    pipe_config->hw.degamma_lut ?
7833 		    drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
7834 		    pipe_config->hw.gamma_lut ?
7835 		    drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
7836 
7837 dump_planes:
7838 	if (!state)
7839 		return;
7840 
7841 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7842 		if (plane->pipe == crtc->pipe)
7843 			intel_dump_plane_state(plane_state);
7844 	}
7845 }
7846 
7847 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
7848 {
7849 	struct drm_device *dev = state->base.dev;
7850 	struct drm_connector *connector;
7851 	struct drm_connector_list_iter conn_iter;
7852 	unsigned int used_ports = 0;
7853 	unsigned int used_mst_ports = 0;
7854 	bool ret = true;
7855 
7856 	/*
7857 	 * We're going to peek into connector->state,
7858 	 * hence connection_mutex must be held.
7859 	 */
7860 	drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
7861 
7862 	/*
7863 	 * Walk the connector list instead of the encoder
7864 	 * list to detect the problem on ddi platforms
7865 	 * where there's just one encoder per digital port.
7866 	 */
7867 	drm_connector_list_iter_begin(dev, &conn_iter);
7868 	drm_for_each_connector_iter(connector, &conn_iter) {
7869 		struct drm_connector_state *connector_state;
7870 		struct intel_encoder *encoder;
7871 
7872 		connector_state =
7873 			drm_atomic_get_new_connector_state(&state->base,
7874 							   connector);
7875 		if (!connector_state)
7876 			connector_state = connector->state;
7877 
7878 		if (!connector_state->best_encoder)
7879 			continue;
7880 
7881 		encoder = to_intel_encoder(connector_state->best_encoder);
7882 
7883 		drm_WARN_ON(dev, !connector_state->crtc);
7884 
7885 		switch (encoder->type) {
7886 		case INTEL_OUTPUT_DDI:
7887 			if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
7888 				break;
7889 			fallthrough;
7890 		case INTEL_OUTPUT_DP:
7891 		case INTEL_OUTPUT_HDMI:
7892 		case INTEL_OUTPUT_EDP:
7893 			/* the same port mustn't appear more than once */
7894 			if (used_ports & BIT(encoder->port))
7895 				ret = false;
7896 
7897 			used_ports |= BIT(encoder->port);
7898 			break;
7899 		case INTEL_OUTPUT_DP_MST:
7900 			used_mst_ports |=
7901 				1 << encoder->port;
7902 			break;
7903 		default:
7904 			break;
7905 		}
7906 	}
7907 	drm_connector_list_iter_end(&conn_iter);
7908 
7909 	/* can't mix MST and SST/HDMI on the same port */
7910 	if (used_ports & used_mst_ports)
7911 		return false;
7912 
7913 	return ret;
7914 }
7915 
7916 static void
7917 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
7918 					   struct intel_crtc_state *crtc_state)
7919 {
7920 	const struct intel_crtc_state *from_crtc_state = crtc_state;
7921 
7922 	if (crtc_state->bigjoiner_slave) {
7923 		from_crtc_state = intel_atomic_get_new_crtc_state(state,
7924 								  crtc_state->bigjoiner_linked_crtc);
7925 
7926 		/* No need to copy state if the master state is unchanged */
7927 		if (!from_crtc_state)
7928 			return;
7929 	}
7930 
7931 	intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
7932 }
7933 
7934 static void
7935 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
7936 				 struct intel_crtc_state *crtc_state)
7937 {
7938 	crtc_state->hw.enable = crtc_state->uapi.enable;
7939 	crtc_state->hw.active = crtc_state->uapi.active;
7940 	crtc_state->hw.mode = crtc_state->uapi.mode;
7941 	crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
7942 	crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
7943 
7944 	intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
7945 }
7946 
7947 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
7948 {
7949 	if (crtc_state->bigjoiner_slave)
7950 		return;
7951 
7952 	crtc_state->uapi.enable = crtc_state->hw.enable;
7953 	crtc_state->uapi.active = crtc_state->hw.active;
7954 	drm_WARN_ON(crtc_state->uapi.crtc->dev,
7955 		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
7956 
7957 	crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
7958 	crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
7959 
7960 	/* copy color blobs to uapi */
7961 	drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
7962 				  crtc_state->hw.degamma_lut);
7963 	drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
7964 				  crtc_state->hw.gamma_lut);
7965 	drm_property_replace_blob(&crtc_state->uapi.ctm,
7966 				  crtc_state->hw.ctm);
7967 }
7968 
7969 static int
7970 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
7971 			  const struct intel_crtc_state *from_crtc_state)
7972 {
7973 	struct intel_crtc_state *saved_state;
7974 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7975 
7976 	saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
7977 	if (!saved_state)
7978 		return -ENOMEM;
7979 
7980 	saved_state->uapi = crtc_state->uapi;
7981 	saved_state->scaler_state = crtc_state->scaler_state;
7982 	saved_state->shared_dpll = crtc_state->shared_dpll;
7983 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
7984 	saved_state->crc_enabled = crtc_state->crc_enabled;
7985 
7986 	intel_crtc_free_hw_state(crtc_state);
7987 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
7988 	kfree(saved_state);
7989 
7990 	/* Re-init hw state */
7991 	memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
7992 	crtc_state->hw.enable = from_crtc_state->hw.enable;
7993 	crtc_state->hw.active = from_crtc_state->hw.active;
7994 	crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
7995 	crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
7996 
7997 	/* Some fixups */
7998 	crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
7999 	crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
8000 	crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
8001 	crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
8002 	crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
8003 	crtc_state->bigjoiner_slave = true;
8004 	crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
8005 	crtc_state->has_audio = false;
8006 
8007 	return 0;
8008 }
8009 
8010 static int
8011 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
8012 				 struct intel_crtc_state *crtc_state)
8013 {
8014 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8015 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8016 	struct intel_crtc_state *saved_state;
8017 
8018 	saved_state = intel_crtc_state_alloc(crtc);
8019 	if (!saved_state)
8020 		return -ENOMEM;
8021 
8022 	/* free the old crtc_state->hw members */
8023 	intel_crtc_free_hw_state(crtc_state);
8024 
8025 	/* FIXME: before the switch to atomic started, a new pipe_config was
8026 	 * kzalloc'd. Code that depends on any field being zero should be
8027 	 * fixed, so that the crtc_state can be safely duplicated. For now,
8028 	 * only fields that are know to not cause problems are preserved. */
8029 
8030 	saved_state->uapi = crtc_state->uapi;
8031 	saved_state->scaler_state = crtc_state->scaler_state;
8032 	saved_state->shared_dpll = crtc_state->shared_dpll;
8033 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
8034 	memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
8035 	       sizeof(saved_state->icl_port_dplls));
8036 	saved_state->crc_enabled = crtc_state->crc_enabled;
8037 	if (IS_G4X(dev_priv) ||
8038 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8039 		saved_state->wm = crtc_state->wm;
8040 
8041 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
8042 	kfree(saved_state);
8043 
8044 	intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
8045 
8046 	return 0;
8047 }
8048 
8049 static int
8050 intel_modeset_pipe_config(struct intel_atomic_state *state,
8051 			  struct intel_crtc_state *pipe_config)
8052 {
8053 	struct drm_crtc *crtc = pipe_config->uapi.crtc;
8054 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
8055 	struct drm_connector *connector;
8056 	struct drm_connector_state *connector_state;
8057 	int base_bpp, ret, i;
8058 	bool retry = true;
8059 
8060 	pipe_config->cpu_transcoder =
8061 		(enum transcoder) to_intel_crtc(crtc)->pipe;
8062 
8063 	/*
8064 	 * Sanitize sync polarity flags based on requested ones. If neither
8065 	 * positive or negative polarity is requested, treat this as meaning
8066 	 * negative polarity.
8067 	 */
8068 	if (!(pipe_config->hw.adjusted_mode.flags &
8069 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
8070 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
8071 
8072 	if (!(pipe_config->hw.adjusted_mode.flags &
8073 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
8074 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
8075 
8076 	ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
8077 					pipe_config);
8078 	if (ret)
8079 		return ret;
8080 
8081 	base_bpp = pipe_config->pipe_bpp;
8082 
8083 	/*
8084 	 * Determine the real pipe dimensions. Note that stereo modes can
8085 	 * increase the actual pipe size due to the frame doubling and
8086 	 * insertion of additional space for blanks between the frame. This
8087 	 * is stored in the crtc timings. We use the requested mode to do this
8088 	 * computation to clearly distinguish it from the adjusted mode, which
8089 	 * can be changed by the connectors in the below retry loop.
8090 	 */
8091 	drm_mode_get_hv_timing(&pipe_config->hw.mode,
8092 			       &pipe_config->pipe_src_w,
8093 			       &pipe_config->pipe_src_h);
8094 
8095 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
8096 		struct intel_encoder *encoder =
8097 			to_intel_encoder(connector_state->best_encoder);
8098 
8099 		if (connector_state->crtc != crtc)
8100 			continue;
8101 
8102 		if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
8103 			drm_dbg_kms(&i915->drm,
8104 				    "rejecting invalid cloning configuration\n");
8105 			return -EINVAL;
8106 		}
8107 
8108 		/*
8109 		 * Determine output_types before calling the .compute_config()
8110 		 * hooks so that the hooks can use this information safely.
8111 		 */
8112 		if (encoder->compute_output_type)
8113 			pipe_config->output_types |=
8114 				BIT(encoder->compute_output_type(encoder, pipe_config,
8115 								 connector_state));
8116 		else
8117 			pipe_config->output_types |= BIT(encoder->type);
8118 	}
8119 
8120 encoder_retry:
8121 	/* Ensure the port clock defaults are reset when retrying. */
8122 	pipe_config->port_clock = 0;
8123 	pipe_config->pixel_multiplier = 1;
8124 
8125 	/* Fill in default crtc timings, allow encoders to overwrite them. */
8126 	drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
8127 			      CRTC_STEREO_DOUBLE);
8128 
8129 	/* Pass our mode to the connectors and the CRTC to give them a chance to
8130 	 * adjust it according to limitations or connector properties, and also
8131 	 * a chance to reject the mode entirely.
8132 	 */
8133 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
8134 		struct intel_encoder *encoder =
8135 			to_intel_encoder(connector_state->best_encoder);
8136 
8137 		if (connector_state->crtc != crtc)
8138 			continue;
8139 
8140 		ret = encoder->compute_config(encoder, pipe_config,
8141 					      connector_state);
8142 		if (ret < 0) {
8143 			if (ret != -EDEADLK)
8144 				drm_dbg_kms(&i915->drm,
8145 					    "Encoder config failure: %d\n",
8146 					    ret);
8147 			return ret;
8148 		}
8149 	}
8150 
8151 	/* Set default port clock if not overwritten by the encoder. Needs to be
8152 	 * done afterwards in case the encoder adjusts the mode. */
8153 	if (!pipe_config->port_clock)
8154 		pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
8155 			* pipe_config->pixel_multiplier;
8156 
8157 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
8158 	if (ret == -EDEADLK)
8159 		return ret;
8160 	if (ret < 0) {
8161 		drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
8162 		return ret;
8163 	}
8164 
8165 	if (ret == I915_DISPLAY_CONFIG_RETRY) {
8166 		if (drm_WARN(&i915->drm, !retry,
8167 			     "loop in pipe configuration computation\n"))
8168 			return -EINVAL;
8169 
8170 		drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
8171 		retry = false;
8172 		goto encoder_retry;
8173 	}
8174 
8175 	/* Dithering seems to not pass-through bits correctly when it should, so
8176 	 * only enable it on 6bpc panels and when its not a compliance
8177 	 * test requesting 6bpc video pattern.
8178 	 */
8179 	pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
8180 		!pipe_config->dither_force_disable;
8181 	drm_dbg_kms(&i915->drm,
8182 		    "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
8183 		    base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
8184 
8185 	return 0;
8186 }
8187 
8188 static int
8189 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
8190 {
8191 	struct intel_atomic_state *state =
8192 		to_intel_atomic_state(crtc_state->uapi.state);
8193 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8194 	struct drm_connector_state *conn_state;
8195 	struct drm_connector *connector;
8196 	int i;
8197 
8198 	for_each_new_connector_in_state(&state->base, connector,
8199 					conn_state, i) {
8200 		struct intel_encoder *encoder =
8201 			to_intel_encoder(conn_state->best_encoder);
8202 		int ret;
8203 
8204 		if (conn_state->crtc != &crtc->base ||
8205 		    !encoder->compute_config_late)
8206 			continue;
8207 
8208 		ret = encoder->compute_config_late(encoder, crtc_state,
8209 						   conn_state);
8210 		if (ret)
8211 			return ret;
8212 	}
8213 
8214 	return 0;
8215 }
8216 
8217 bool intel_fuzzy_clock_check(int clock1, int clock2)
8218 {
8219 	int diff;
8220 
8221 	if (clock1 == clock2)
8222 		return true;
8223 
8224 	if (!clock1 || !clock2)
8225 		return false;
8226 
8227 	diff = abs(clock1 - clock2);
8228 
8229 	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
8230 		return true;
8231 
8232 	return false;
8233 }
8234 
8235 static bool
8236 intel_compare_m_n(unsigned int m, unsigned int n,
8237 		  unsigned int m2, unsigned int n2,
8238 		  bool exact)
8239 {
8240 	if (m == m2 && n == n2)
8241 		return true;
8242 
8243 	if (exact || !m || !n || !m2 || !n2)
8244 		return false;
8245 
8246 	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
8247 
8248 	if (n > n2) {
8249 		while (n > n2) {
8250 			m2 <<= 1;
8251 			n2 <<= 1;
8252 		}
8253 	} else if (n < n2) {
8254 		while (n < n2) {
8255 			m <<= 1;
8256 			n <<= 1;
8257 		}
8258 	}
8259 
8260 	if (n != n2)
8261 		return false;
8262 
8263 	return intel_fuzzy_clock_check(m, m2);
8264 }
8265 
8266 static bool
8267 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
8268 		       const struct intel_link_m_n *m2_n2,
8269 		       bool exact)
8270 {
8271 	return m_n->tu == m2_n2->tu &&
8272 		intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
8273 				  m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
8274 		intel_compare_m_n(m_n->link_m, m_n->link_n,
8275 				  m2_n2->link_m, m2_n2->link_n, exact);
8276 }
8277 
8278 static bool
8279 intel_compare_infoframe(const union hdmi_infoframe *a,
8280 			const union hdmi_infoframe *b)
8281 {
8282 	return memcmp(a, b, sizeof(*a)) == 0;
8283 }
8284 
8285 static bool
8286 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
8287 			 const struct drm_dp_vsc_sdp *b)
8288 {
8289 	return memcmp(a, b, sizeof(*a)) == 0;
8290 }
8291 
8292 static void
8293 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
8294 			       bool fastset, const char *name,
8295 			       const union hdmi_infoframe *a,
8296 			       const union hdmi_infoframe *b)
8297 {
8298 	if (fastset) {
8299 		if (!drm_debug_enabled(DRM_UT_KMS))
8300 			return;
8301 
8302 		drm_dbg_kms(&dev_priv->drm,
8303 			    "fastset mismatch in %s infoframe\n", name);
8304 		drm_dbg_kms(&dev_priv->drm, "expected:\n");
8305 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
8306 		drm_dbg_kms(&dev_priv->drm, "found:\n");
8307 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
8308 	} else {
8309 		drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
8310 		drm_err(&dev_priv->drm, "expected:\n");
8311 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
8312 		drm_err(&dev_priv->drm, "found:\n");
8313 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
8314 	}
8315 }
8316 
8317 static void
8318 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
8319 				bool fastset, const char *name,
8320 				const struct drm_dp_vsc_sdp *a,
8321 				const struct drm_dp_vsc_sdp *b)
8322 {
8323 	if (fastset) {
8324 		if (!drm_debug_enabled(DRM_UT_KMS))
8325 			return;
8326 
8327 		drm_dbg_kms(&dev_priv->drm,
8328 			    "fastset mismatch in %s dp sdp\n", name);
8329 		drm_dbg_kms(&dev_priv->drm, "expected:\n");
8330 		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
8331 		drm_dbg_kms(&dev_priv->drm, "found:\n");
8332 		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
8333 	} else {
8334 		drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
8335 		drm_err(&dev_priv->drm, "expected:\n");
8336 		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
8337 		drm_err(&dev_priv->drm, "found:\n");
8338 		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
8339 	}
8340 }
8341 
8342 static void __printf(4, 5)
8343 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
8344 		     const char *name, const char *format, ...)
8345 {
8346 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
8347 	struct va_format vaf;
8348 	va_list args;
8349 
8350 	va_start(args, format);
8351 	vaf.fmt = format;
8352 	vaf.va = &args;
8353 
8354 	if (fastset)
8355 		drm_dbg_kms(&i915->drm,
8356 			    "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
8357 			    crtc->base.base.id, crtc->base.name, name, &vaf);
8358 	else
8359 		drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
8360 			crtc->base.base.id, crtc->base.name, name, &vaf);
8361 
8362 	va_end(args);
8363 }
8364 
8365 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
8366 {
8367 	if (dev_priv->params.fastboot != -1)
8368 		return dev_priv->params.fastboot;
8369 
8370 	/* Enable fastboot by default on Skylake and newer */
8371 	if (DISPLAY_VER(dev_priv) >= 9)
8372 		return true;
8373 
8374 	/* Enable fastboot by default on VLV and CHV */
8375 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8376 		return true;
8377 
8378 	/* Disabled by default on all others */
8379 	return false;
8380 }
8381 
8382 static bool
8383 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
8384 			  const struct intel_crtc_state *pipe_config,
8385 			  bool fastset)
8386 {
8387 	struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
8388 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
8389 	bool ret = true;
8390 	u32 bp_gamma = 0;
8391 	bool fixup_inherited = fastset &&
8392 		current_config->inherited && !pipe_config->inherited;
8393 
8394 	if (fixup_inherited && !fastboot_enabled(dev_priv)) {
8395 		drm_dbg_kms(&dev_priv->drm,
8396 			    "initial modeset and fastboot not set\n");
8397 		ret = false;
8398 	}
8399 
8400 #define PIPE_CONF_CHECK_X(name) do { \
8401 	if (current_config->name != pipe_config->name) { \
8402 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8403 				     "(expected 0x%08x, found 0x%08x)", \
8404 				     current_config->name, \
8405 				     pipe_config->name); \
8406 		ret = false; \
8407 	} \
8408 } while (0)
8409 
8410 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
8411 	if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
8412 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8413 				     "(expected 0x%08x, found 0x%08x)", \
8414 				     current_config->name & (mask), \
8415 				     pipe_config->name & (mask)); \
8416 		ret = false; \
8417 	} \
8418 } while (0)
8419 
8420 #define PIPE_CONF_CHECK_I(name) do { \
8421 	if (current_config->name != pipe_config->name) { \
8422 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8423 				     "(expected %i, found %i)", \
8424 				     current_config->name, \
8425 				     pipe_config->name); \
8426 		ret = false; \
8427 	} \
8428 } while (0)
8429 
8430 #define PIPE_CONF_CHECK_BOOL(name) do { \
8431 	if (current_config->name != pipe_config->name) { \
8432 		pipe_config_mismatch(fastset, crtc,  __stringify(name), \
8433 				     "(expected %s, found %s)", \
8434 				     yesno(current_config->name), \
8435 				     yesno(pipe_config->name)); \
8436 		ret = false; \
8437 	} \
8438 } while (0)
8439 
8440 /*
8441  * Checks state where we only read out the enabling, but not the entire
8442  * state itself (like full infoframes or ELD for audio). These states
8443  * require a full modeset on bootup to fix up.
8444  */
8445 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
8446 	if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
8447 		PIPE_CONF_CHECK_BOOL(name); \
8448 	} else { \
8449 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8450 				     "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
8451 				     yesno(current_config->name), \
8452 				     yesno(pipe_config->name)); \
8453 		ret = false; \
8454 	} \
8455 } while (0)
8456 
8457 #define PIPE_CONF_CHECK_P(name) do { \
8458 	if (current_config->name != pipe_config->name) { \
8459 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8460 				     "(expected %p, found %p)", \
8461 				     current_config->name, \
8462 				     pipe_config->name); \
8463 		ret = false; \
8464 	} \
8465 } while (0)
8466 
8467 #define PIPE_CONF_CHECK_M_N(name) do { \
8468 	if (!intel_compare_link_m_n(&current_config->name, \
8469 				    &pipe_config->name,\
8470 				    !fastset)) { \
8471 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8472 				     "(expected tu %i gmch %i/%i link %i/%i, " \
8473 				     "found tu %i, gmch %i/%i link %i/%i)", \
8474 				     current_config->name.tu, \
8475 				     current_config->name.gmch_m, \
8476 				     current_config->name.gmch_n, \
8477 				     current_config->name.link_m, \
8478 				     current_config->name.link_n, \
8479 				     pipe_config->name.tu, \
8480 				     pipe_config->name.gmch_m, \
8481 				     pipe_config->name.gmch_n, \
8482 				     pipe_config->name.link_m, \
8483 				     pipe_config->name.link_n); \
8484 		ret = false; \
8485 	} \
8486 } while (0)
8487 
8488 /* This is required for BDW+ where there is only one set of registers for
8489  * switching between high and low RR.
8490  * This macro can be used whenever a comparison has to be made between one
8491  * hw state and multiple sw state variables.
8492  */
8493 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
8494 	if (!intel_compare_link_m_n(&current_config->name, \
8495 				    &pipe_config->name, !fastset) && \
8496 	    !intel_compare_link_m_n(&current_config->alt_name, \
8497 				    &pipe_config->name, !fastset)) { \
8498 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8499 				     "(expected tu %i gmch %i/%i link %i/%i, " \
8500 				     "or tu %i gmch %i/%i link %i/%i, " \
8501 				     "found tu %i, gmch %i/%i link %i/%i)", \
8502 				     current_config->name.tu, \
8503 				     current_config->name.gmch_m, \
8504 				     current_config->name.gmch_n, \
8505 				     current_config->name.link_m, \
8506 				     current_config->name.link_n, \
8507 				     current_config->alt_name.tu, \
8508 				     current_config->alt_name.gmch_m, \
8509 				     current_config->alt_name.gmch_n, \
8510 				     current_config->alt_name.link_m, \
8511 				     current_config->alt_name.link_n, \
8512 				     pipe_config->name.tu, \
8513 				     pipe_config->name.gmch_m, \
8514 				     pipe_config->name.gmch_n, \
8515 				     pipe_config->name.link_m, \
8516 				     pipe_config->name.link_n); \
8517 		ret = false; \
8518 	} \
8519 } while (0)
8520 
8521 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
8522 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
8523 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8524 				     "(%x) (expected %i, found %i)", \
8525 				     (mask), \
8526 				     current_config->name & (mask), \
8527 				     pipe_config->name & (mask)); \
8528 		ret = false; \
8529 	} \
8530 } while (0)
8531 
8532 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
8533 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
8534 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8535 				     "(expected %i, found %i)", \
8536 				     current_config->name, \
8537 				     pipe_config->name); \
8538 		ret = false; \
8539 	} \
8540 } while (0)
8541 
8542 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
8543 	if (!intel_compare_infoframe(&current_config->infoframes.name, \
8544 				     &pipe_config->infoframes.name)) { \
8545 		pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
8546 					       &current_config->infoframes.name, \
8547 					       &pipe_config->infoframes.name); \
8548 		ret = false; \
8549 	} \
8550 } while (0)
8551 
8552 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
8553 	if (!current_config->has_psr && !pipe_config->has_psr && \
8554 	    !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
8555 				      &pipe_config->infoframes.name)) { \
8556 		pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
8557 						&current_config->infoframes.name, \
8558 						&pipe_config->infoframes.name); \
8559 		ret = false; \
8560 	} \
8561 } while (0)
8562 
8563 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
8564 	if (current_config->name1 != pipe_config->name1) { \
8565 		pipe_config_mismatch(fastset, crtc, __stringify(name1), \
8566 				"(expected %i, found %i, won't compare lut values)", \
8567 				current_config->name1, \
8568 				pipe_config->name1); \
8569 		ret = false;\
8570 	} else { \
8571 		if (!intel_color_lut_equal(current_config->name2, \
8572 					pipe_config->name2, pipe_config->name1, \
8573 					bit_precision)) { \
8574 			pipe_config_mismatch(fastset, crtc, __stringify(name2), \
8575 					"hw_state doesn't match sw_state"); \
8576 			ret = false; \
8577 		} \
8578 	} \
8579 } while (0)
8580 
8581 #define PIPE_CONF_QUIRK(quirk) \
8582 	((current_config->quirks | pipe_config->quirks) & (quirk))
8583 
8584 	PIPE_CONF_CHECK_I(cpu_transcoder);
8585 
8586 	PIPE_CONF_CHECK_BOOL(has_pch_encoder);
8587 	PIPE_CONF_CHECK_I(fdi_lanes);
8588 	PIPE_CONF_CHECK_M_N(fdi_m_n);
8589 
8590 	PIPE_CONF_CHECK_I(lane_count);
8591 	PIPE_CONF_CHECK_X(lane_lat_optim_mask);
8592 
8593 	if (DISPLAY_VER(dev_priv) < 8) {
8594 		PIPE_CONF_CHECK_M_N(dp_m_n);
8595 
8596 		if (current_config->has_drrs)
8597 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
8598 	} else
8599 		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
8600 
8601 	PIPE_CONF_CHECK_X(output_types);
8602 
8603 	/* FIXME do the readout properly and get rid of this quirk */
8604 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8605 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
8606 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
8607 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
8608 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
8609 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
8610 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
8611 
8612 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
8613 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
8614 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
8615 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
8616 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
8617 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
8618 
8619 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
8620 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
8621 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
8622 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
8623 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
8624 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
8625 
8626 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
8627 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
8628 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
8629 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
8630 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
8631 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
8632 
8633 		PIPE_CONF_CHECK_I(pixel_multiplier);
8634 
8635 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8636 				      DRM_MODE_FLAG_INTERLACE);
8637 
8638 		if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
8639 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8640 					      DRM_MODE_FLAG_PHSYNC);
8641 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8642 					      DRM_MODE_FLAG_NHSYNC);
8643 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8644 					      DRM_MODE_FLAG_PVSYNC);
8645 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8646 					      DRM_MODE_FLAG_NVSYNC);
8647 		}
8648 	}
8649 
8650 	PIPE_CONF_CHECK_I(output_format);
8651 	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
8652 	if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
8653 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8654 		PIPE_CONF_CHECK_BOOL(limited_color_range);
8655 
8656 	PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
8657 	PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
8658 	PIPE_CONF_CHECK_BOOL(has_infoframe);
8659 	/* FIXME do the readout properly and get rid of this quirk */
8660 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8661 		PIPE_CONF_CHECK_BOOL(fec_enable);
8662 
8663 	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
8664 
8665 	PIPE_CONF_CHECK_X(gmch_pfit.control);
8666 	/* pfit ratios are autocomputed by the hw on gen4+ */
8667 	if (DISPLAY_VER(dev_priv) < 4)
8668 		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
8669 	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
8670 
8671 	/*
8672 	 * Changing the EDP transcoder input mux
8673 	 * (A_ONOFF vs. A_ON) requires a full modeset.
8674 	 */
8675 	PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
8676 
8677 	if (!fastset) {
8678 		PIPE_CONF_CHECK_I(pipe_src_w);
8679 		PIPE_CONF_CHECK_I(pipe_src_h);
8680 
8681 		PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
8682 		if (current_config->pch_pfit.enabled) {
8683 			PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
8684 			PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
8685 			PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
8686 			PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
8687 		}
8688 
8689 		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
8690 		/* FIXME do the readout properly and get rid of this quirk */
8691 		if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8692 			PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
8693 
8694 		PIPE_CONF_CHECK_X(gamma_mode);
8695 		if (IS_CHERRYVIEW(dev_priv))
8696 			PIPE_CONF_CHECK_X(cgm_mode);
8697 		else
8698 			PIPE_CONF_CHECK_X(csc_mode);
8699 		PIPE_CONF_CHECK_BOOL(gamma_enable);
8700 		PIPE_CONF_CHECK_BOOL(csc_enable);
8701 
8702 		PIPE_CONF_CHECK_I(linetime);
8703 		PIPE_CONF_CHECK_I(ips_linetime);
8704 
8705 		bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
8706 		if (bp_gamma)
8707 			PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
8708 
8709 		PIPE_CONF_CHECK_BOOL(has_psr);
8710 		PIPE_CONF_CHECK_BOOL(has_psr2);
8711 		PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
8712 		PIPE_CONF_CHECK_I(dc3co_exitline);
8713 	}
8714 
8715 	PIPE_CONF_CHECK_BOOL(double_wide);
8716 
8717 	if (dev_priv->dpll.mgr)
8718 		PIPE_CONF_CHECK_P(shared_dpll);
8719 
8720 	/* FIXME do the readout properly and get rid of this quirk */
8721 	if (dev_priv->dpll.mgr && !PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8722 		PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8723 		PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
8724 		PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8725 		PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
8726 		PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
8727 		PIPE_CONF_CHECK_X(dpll_hw_state.spll);
8728 		PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
8729 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
8730 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
8731 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
8732 		PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
8733 		PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
8734 		PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
8735 		PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
8736 		PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
8737 		PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
8738 		PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
8739 		PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
8740 		PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
8741 		PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
8742 		PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
8743 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
8744 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
8745 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
8746 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
8747 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
8748 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
8749 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
8750 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
8751 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
8752 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
8753 	}
8754 
8755 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8756 		PIPE_CONF_CHECK_X(dsi_pll.ctrl);
8757 		PIPE_CONF_CHECK_X(dsi_pll.div);
8758 
8759 		if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
8760 			PIPE_CONF_CHECK_I(pipe_bpp);
8761 
8762 		PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
8763 		PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
8764 		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
8765 
8766 		PIPE_CONF_CHECK_I(min_voltage_level);
8767 	}
8768 
8769 	if (fastset && (current_config->has_psr || pipe_config->has_psr))
8770 		PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
8771 					    ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
8772 	else
8773 		PIPE_CONF_CHECK_X(infoframes.enable);
8774 
8775 	PIPE_CONF_CHECK_X(infoframes.gcp);
8776 	PIPE_CONF_CHECK_INFOFRAME(avi);
8777 	PIPE_CONF_CHECK_INFOFRAME(spd);
8778 	PIPE_CONF_CHECK_INFOFRAME(hdmi);
8779 	PIPE_CONF_CHECK_INFOFRAME(drm);
8780 	PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
8781 
8782 	PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
8783 	PIPE_CONF_CHECK_I(master_transcoder);
8784 	PIPE_CONF_CHECK_BOOL(bigjoiner);
8785 	PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
8786 	PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
8787 
8788 	PIPE_CONF_CHECK_I(dsc.compression_enable);
8789 	PIPE_CONF_CHECK_I(dsc.dsc_split);
8790 	PIPE_CONF_CHECK_I(dsc.compressed_bpp);
8791 
8792 	PIPE_CONF_CHECK_BOOL(splitter.enable);
8793 	PIPE_CONF_CHECK_I(splitter.link_count);
8794 	PIPE_CONF_CHECK_I(splitter.pixel_overlap);
8795 
8796 	PIPE_CONF_CHECK_I(mst_master_transcoder);
8797 
8798 	PIPE_CONF_CHECK_BOOL(vrr.enable);
8799 	PIPE_CONF_CHECK_I(vrr.vmin);
8800 	PIPE_CONF_CHECK_I(vrr.vmax);
8801 	PIPE_CONF_CHECK_I(vrr.flipline);
8802 	PIPE_CONF_CHECK_I(vrr.pipeline_full);
8803 	PIPE_CONF_CHECK_I(vrr.guardband);
8804 
8805 #undef PIPE_CONF_CHECK_X
8806 #undef PIPE_CONF_CHECK_I
8807 #undef PIPE_CONF_CHECK_BOOL
8808 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
8809 #undef PIPE_CONF_CHECK_P
8810 #undef PIPE_CONF_CHECK_FLAGS
8811 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
8812 #undef PIPE_CONF_CHECK_COLOR_LUT
8813 #undef PIPE_CONF_QUIRK
8814 
8815 	return ret;
8816 }
8817 
8818 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
8819 					   const struct intel_crtc_state *pipe_config)
8820 {
8821 	if (pipe_config->has_pch_encoder) {
8822 		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
8823 							    &pipe_config->fdi_m_n);
8824 		int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
8825 
8826 		/*
8827 		 * FDI already provided one idea for the dotclock.
8828 		 * Yell if the encoder disagrees.
8829 		 */
8830 		drm_WARN(&dev_priv->drm,
8831 			 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
8832 			 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
8833 			 fdi_dotclock, dotclock);
8834 	}
8835 }
8836 
8837 static void verify_wm_state(struct intel_crtc *crtc,
8838 			    struct intel_crtc_state *new_crtc_state)
8839 {
8840 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8841 	struct skl_hw_state {
8842 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
8843 		struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
8844 		struct skl_pipe_wm wm;
8845 	} *hw;
8846 	const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
8847 	int level, max_level = ilk_wm_max_level(dev_priv);
8848 	struct intel_plane *plane;
8849 	u8 hw_enabled_slices;
8850 
8851 	if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
8852 		return;
8853 
8854 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
8855 	if (!hw)
8856 		return;
8857 
8858 	skl_pipe_wm_get_hw_state(crtc, &hw->wm);
8859 
8860 	skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
8861 
8862 	hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
8863 
8864 	if (DISPLAY_VER(dev_priv) >= 11 &&
8865 	    hw_enabled_slices != dev_priv->dbuf.enabled_slices)
8866 		drm_err(&dev_priv->drm,
8867 			"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
8868 			dev_priv->dbuf.enabled_slices,
8869 			hw_enabled_slices);
8870 
8871 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
8872 		const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
8873 		const struct skl_wm_level *hw_wm_level, *sw_wm_level;
8874 
8875 		/* Watermarks */
8876 		for (level = 0; level <= max_level; level++) {
8877 			hw_wm_level = &hw->wm.planes[plane->id].wm[level];
8878 			sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
8879 
8880 			if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
8881 				continue;
8882 
8883 			drm_err(&dev_priv->drm,
8884 				"[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8885 				plane->base.base.id, plane->base.name, level,
8886 				sw_wm_level->enable,
8887 				sw_wm_level->blocks,
8888 				sw_wm_level->lines,
8889 				hw_wm_level->enable,
8890 				hw_wm_level->blocks,
8891 				hw_wm_level->lines);
8892 		}
8893 
8894 		hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
8895 		sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
8896 
8897 		if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8898 			drm_err(&dev_priv->drm,
8899 				"[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8900 				plane->base.base.id, plane->base.name,
8901 				sw_wm_level->enable,
8902 				sw_wm_level->blocks,
8903 				sw_wm_level->lines,
8904 				hw_wm_level->enable,
8905 				hw_wm_level->blocks,
8906 				hw_wm_level->lines);
8907 		}
8908 
8909 		hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
8910 		sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
8911 
8912 		if (HAS_HW_SAGV_WM(dev_priv) &&
8913 		    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8914 			drm_err(&dev_priv->drm,
8915 				"[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8916 				plane->base.base.id, plane->base.name,
8917 				sw_wm_level->enable,
8918 				sw_wm_level->blocks,
8919 				sw_wm_level->lines,
8920 				hw_wm_level->enable,
8921 				hw_wm_level->blocks,
8922 				hw_wm_level->lines);
8923 		}
8924 
8925 		hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
8926 		sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
8927 
8928 		if (HAS_HW_SAGV_WM(dev_priv) &&
8929 		    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8930 			drm_err(&dev_priv->drm,
8931 				"[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8932 				plane->base.base.id, plane->base.name,
8933 				sw_wm_level->enable,
8934 				sw_wm_level->blocks,
8935 				sw_wm_level->lines,
8936 				hw_wm_level->enable,
8937 				hw_wm_level->blocks,
8938 				hw_wm_level->lines);
8939 		}
8940 
8941 		/* DDB */
8942 		hw_ddb_entry = &hw->ddb_y[plane->id];
8943 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
8944 
8945 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
8946 			drm_err(&dev_priv->drm,
8947 				"[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
8948 				plane->base.base.id, plane->base.name,
8949 				sw_ddb_entry->start, sw_ddb_entry->end,
8950 				hw_ddb_entry->start, hw_ddb_entry->end);
8951 		}
8952 	}
8953 
8954 	kfree(hw);
8955 }
8956 
8957 static void
8958 verify_connector_state(struct intel_atomic_state *state,
8959 		       struct intel_crtc *crtc)
8960 {
8961 	struct drm_connector *connector;
8962 	struct drm_connector_state *new_conn_state;
8963 	int i;
8964 
8965 	for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
8966 		struct drm_encoder *encoder = connector->encoder;
8967 		struct intel_crtc_state *crtc_state = NULL;
8968 
8969 		if (new_conn_state->crtc != &crtc->base)
8970 			continue;
8971 
8972 		if (crtc)
8973 			crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
8974 
8975 		intel_connector_verify_state(crtc_state, new_conn_state);
8976 
8977 		I915_STATE_WARN(new_conn_state->best_encoder != encoder,
8978 		     "connector's atomic encoder doesn't match legacy encoder\n");
8979 	}
8980 }
8981 
8982 static void
8983 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
8984 {
8985 	struct intel_encoder *encoder;
8986 	struct drm_connector *connector;
8987 	struct drm_connector_state *old_conn_state, *new_conn_state;
8988 	int i;
8989 
8990 	for_each_intel_encoder(&dev_priv->drm, encoder) {
8991 		bool enabled = false, found = false;
8992 		enum pipe pipe;
8993 
8994 		drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
8995 			    encoder->base.base.id,
8996 			    encoder->base.name);
8997 
8998 		for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
8999 						   new_conn_state, i) {
9000 			if (old_conn_state->best_encoder == &encoder->base)
9001 				found = true;
9002 
9003 			if (new_conn_state->best_encoder != &encoder->base)
9004 				continue;
9005 			found = enabled = true;
9006 
9007 			I915_STATE_WARN(new_conn_state->crtc !=
9008 					encoder->base.crtc,
9009 			     "connector's crtc doesn't match encoder crtc\n");
9010 		}
9011 
9012 		if (!found)
9013 			continue;
9014 
9015 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
9016 		     "encoder's enabled state mismatch "
9017 		     "(expected %i, found %i)\n",
9018 		     !!encoder->base.crtc, enabled);
9019 
9020 		if (!encoder->base.crtc) {
9021 			bool active;
9022 
9023 			active = encoder->get_hw_state(encoder, &pipe);
9024 			I915_STATE_WARN(active,
9025 			     "encoder detached but still enabled on pipe %c.\n",
9026 			     pipe_name(pipe));
9027 		}
9028 	}
9029 }
9030 
9031 static void
9032 verify_crtc_state(struct intel_crtc *crtc,
9033 		  struct intel_crtc_state *old_crtc_state,
9034 		  struct intel_crtc_state *new_crtc_state)
9035 {
9036 	struct drm_device *dev = crtc->base.dev;
9037 	struct drm_i915_private *dev_priv = to_i915(dev);
9038 	struct intel_encoder *encoder;
9039 	struct intel_crtc_state *pipe_config = old_crtc_state;
9040 	struct drm_atomic_state *state = old_crtc_state->uapi.state;
9041 	struct intel_crtc *master = crtc;
9042 
9043 	__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
9044 	intel_crtc_free_hw_state(old_crtc_state);
9045 	intel_crtc_state_reset(old_crtc_state, crtc);
9046 	old_crtc_state->uapi.state = state;
9047 
9048 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
9049 		    crtc->base.name);
9050 
9051 	pipe_config->hw.enable = new_crtc_state->hw.enable;
9052 
9053 	intel_crtc_get_pipe_config(pipe_config);
9054 
9055 	/* we keep both pipes enabled on 830 */
9056 	if (IS_I830(dev_priv) && pipe_config->hw.active)
9057 		pipe_config->hw.active = new_crtc_state->hw.active;
9058 
9059 	I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
9060 			"crtc active state doesn't match with hw state "
9061 			"(expected %i, found %i)\n",
9062 			new_crtc_state->hw.active, pipe_config->hw.active);
9063 
9064 	I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
9065 			"transitional active state does not match atomic hw state "
9066 			"(expected %i, found %i)\n",
9067 			new_crtc_state->hw.active, crtc->active);
9068 
9069 	if (new_crtc_state->bigjoiner_slave)
9070 		master = new_crtc_state->bigjoiner_linked_crtc;
9071 
9072 	for_each_encoder_on_crtc(dev, &master->base, encoder) {
9073 		enum pipe pipe;
9074 		bool active;
9075 
9076 		active = encoder->get_hw_state(encoder, &pipe);
9077 		I915_STATE_WARN(active != new_crtc_state->hw.active,
9078 				"[ENCODER:%i] active %i with crtc active %i\n",
9079 				encoder->base.base.id, active,
9080 				new_crtc_state->hw.active);
9081 
9082 		I915_STATE_WARN(active && master->pipe != pipe,
9083 				"Encoder connected to wrong pipe %c\n",
9084 				pipe_name(pipe));
9085 
9086 		if (active)
9087 			intel_encoder_get_config(encoder, pipe_config);
9088 	}
9089 
9090 	if (!new_crtc_state->hw.active)
9091 		return;
9092 
9093 	if (new_crtc_state->bigjoiner_slave)
9094 		/* No PLLs set for slave */
9095 		pipe_config->shared_dpll = NULL;
9096 
9097 	intel_pipe_config_sanity_check(dev_priv, pipe_config);
9098 
9099 	if (!intel_pipe_config_compare(new_crtc_state,
9100 				       pipe_config, false)) {
9101 		I915_STATE_WARN(1, "pipe state doesn't match!\n");
9102 		intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
9103 		intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
9104 	}
9105 }
9106 
9107 static void
9108 intel_verify_planes(struct intel_atomic_state *state)
9109 {
9110 	struct intel_plane *plane;
9111 	const struct intel_plane_state *plane_state;
9112 	int i;
9113 
9114 	for_each_new_intel_plane_in_state(state, plane,
9115 					  plane_state, i)
9116 		assert_plane(plane, plane_state->planar_slave ||
9117 			     plane_state->uapi.visible);
9118 }
9119 
9120 static void
9121 verify_single_dpll_state(struct drm_i915_private *dev_priv,
9122 			 struct intel_shared_dpll *pll,
9123 			 struct intel_crtc *crtc,
9124 			 struct intel_crtc_state *new_crtc_state)
9125 {
9126 	struct intel_dpll_hw_state dpll_hw_state;
9127 	u8 pipe_mask;
9128 	bool active;
9129 
9130 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
9131 
9132 	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
9133 
9134 	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
9135 
9136 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
9137 		I915_STATE_WARN(!pll->on && pll->active_mask,
9138 		     "pll in active use but not on in sw tracking\n");
9139 		I915_STATE_WARN(pll->on && !pll->active_mask,
9140 		     "pll is on but not used by any active pipe\n");
9141 		I915_STATE_WARN(pll->on != active,
9142 		     "pll on state mismatch (expected %i, found %i)\n",
9143 		     pll->on, active);
9144 	}
9145 
9146 	if (!crtc) {
9147 		I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
9148 				"more active pll users than references: 0x%x vs 0x%x\n",
9149 				pll->active_mask, pll->state.pipe_mask);
9150 
9151 		return;
9152 	}
9153 
9154 	pipe_mask = BIT(crtc->pipe);
9155 
9156 	if (new_crtc_state->hw.active)
9157 		I915_STATE_WARN(!(pll->active_mask & pipe_mask),
9158 				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
9159 				pipe_name(crtc->pipe), pll->active_mask);
9160 	else
9161 		I915_STATE_WARN(pll->active_mask & pipe_mask,
9162 				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
9163 				pipe_name(crtc->pipe), pll->active_mask);
9164 
9165 	I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
9166 			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
9167 			pipe_mask, pll->state.pipe_mask);
9168 
9169 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
9170 					  &dpll_hw_state,
9171 					  sizeof(dpll_hw_state)),
9172 			"pll hw state mismatch\n");
9173 }
9174 
9175 static void
9176 verify_shared_dpll_state(struct intel_crtc *crtc,
9177 			 struct intel_crtc_state *old_crtc_state,
9178 			 struct intel_crtc_state *new_crtc_state)
9179 {
9180 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9181 
9182 	if (new_crtc_state->shared_dpll)
9183 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
9184 
9185 	if (old_crtc_state->shared_dpll &&
9186 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
9187 		u8 pipe_mask = BIT(crtc->pipe);
9188 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
9189 
9190 		I915_STATE_WARN(pll->active_mask & pipe_mask,
9191 				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
9192 				pipe_name(crtc->pipe), pll->active_mask);
9193 		I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
9194 				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
9195 				pipe_name(crtc->pipe), pll->state.pipe_mask);
9196 	}
9197 }
9198 
9199 static void
9200 verify_mpllb_state(struct intel_atomic_state *state,
9201 		   struct intel_crtc_state *new_crtc_state)
9202 {
9203 	struct drm_i915_private *i915 = to_i915(state->base.dev);
9204 	struct intel_mpllb_state mpllb_hw_state = { 0 };
9205 	struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
9206 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
9207 	struct intel_encoder *encoder;
9208 
9209 	if (!IS_DG2(i915))
9210 		return;
9211 
9212 	if (!new_crtc_state->hw.active)
9213 		return;
9214 
9215 	if (new_crtc_state->bigjoiner_slave)
9216 		return;
9217 
9218 	encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
9219 	intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
9220 
9221 #define MPLLB_CHECK(name) do { \
9222 	if (mpllb_sw_state->name != mpllb_hw_state.name) { \
9223 		pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \
9224 				     "(expected 0x%08x, found 0x%08x)", \
9225 				     mpllb_sw_state->name, \
9226 				     mpllb_hw_state.name); \
9227 	} \
9228 } while (0)
9229 
9230 	MPLLB_CHECK(mpllb_cp);
9231 	MPLLB_CHECK(mpllb_div);
9232 	MPLLB_CHECK(mpllb_div2);
9233 	MPLLB_CHECK(mpllb_fracn1);
9234 	MPLLB_CHECK(mpllb_fracn2);
9235 	MPLLB_CHECK(mpllb_sscen);
9236 	MPLLB_CHECK(mpllb_sscstep);
9237 
9238 	/*
9239 	 * ref_control is handled by the hardware/firemware and never
9240 	 * programmed by the software, but the proper values are supplied
9241 	 * in the bspec for verification purposes.
9242 	 */
9243 	MPLLB_CHECK(ref_control);
9244 
9245 #undef MPLLB_CHECK
9246 }
9247 
9248 static void
9249 intel_modeset_verify_crtc(struct intel_crtc *crtc,
9250 			  struct intel_atomic_state *state,
9251 			  struct intel_crtc_state *old_crtc_state,
9252 			  struct intel_crtc_state *new_crtc_state)
9253 {
9254 	if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
9255 		return;
9256 
9257 	verify_wm_state(crtc, new_crtc_state);
9258 	verify_connector_state(state, crtc);
9259 	verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
9260 	verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
9261 	verify_mpllb_state(state, new_crtc_state);
9262 }
9263 
9264 static void
9265 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
9266 {
9267 	int i;
9268 
9269 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
9270 		verify_single_dpll_state(dev_priv,
9271 					 &dev_priv->dpll.shared_dplls[i],
9272 					 NULL, NULL);
9273 }
9274 
9275 static void
9276 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
9277 			      struct intel_atomic_state *state)
9278 {
9279 	verify_encoder_state(dev_priv, state);
9280 	verify_connector_state(state, NULL);
9281 	verify_disabled_dpll_state(dev_priv);
9282 }
9283 
9284 int intel_modeset_all_pipes(struct intel_atomic_state *state)
9285 {
9286 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9287 	struct intel_crtc *crtc;
9288 
9289 	/*
9290 	 * Add all pipes to the state, and force
9291 	 * a modeset on all the active ones.
9292 	 */
9293 	for_each_intel_crtc(&dev_priv->drm, crtc) {
9294 		struct intel_crtc_state *crtc_state;
9295 		int ret;
9296 
9297 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
9298 		if (IS_ERR(crtc_state))
9299 			return PTR_ERR(crtc_state);
9300 
9301 		if (!crtc_state->hw.active ||
9302 		    drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
9303 			continue;
9304 
9305 		crtc_state->uapi.mode_changed = true;
9306 
9307 		ret = drm_atomic_add_affected_connectors(&state->base,
9308 							 &crtc->base);
9309 		if (ret)
9310 			return ret;
9311 
9312 		ret = intel_atomic_add_affected_planes(state, crtc);
9313 		if (ret)
9314 			return ret;
9315 
9316 		crtc_state->update_planes |= crtc_state->active_planes;
9317 	}
9318 
9319 	return 0;
9320 }
9321 
9322 static void
9323 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
9324 {
9325 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9326 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9327 	struct drm_display_mode adjusted_mode =
9328 		crtc_state->hw.adjusted_mode;
9329 
9330 	if (crtc_state->vrr.enable) {
9331 		adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
9332 		adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
9333 		adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
9334 		crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
9335 	}
9336 
9337 	drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
9338 
9339 	crtc->mode_flags = crtc_state->mode_flags;
9340 
9341 	/*
9342 	 * The scanline counter increments at the leading edge of hsync.
9343 	 *
9344 	 * On most platforms it starts counting from vtotal-1 on the
9345 	 * first active line. That means the scanline counter value is
9346 	 * always one less than what we would expect. Ie. just after
9347 	 * start of vblank, which also occurs at start of hsync (on the
9348 	 * last active line), the scanline counter will read vblank_start-1.
9349 	 *
9350 	 * On gen2 the scanline counter starts counting from 1 instead
9351 	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
9352 	 * to keep the value positive), instead of adding one.
9353 	 *
9354 	 * On HSW+ the behaviour of the scanline counter depends on the output
9355 	 * type. For DP ports it behaves like most other platforms, but on HDMI
9356 	 * there's an extra 1 line difference. So we need to add two instead of
9357 	 * one to the value.
9358 	 *
9359 	 * On VLV/CHV DSI the scanline counter would appear to increment
9360 	 * approx. 1/3 of a scanline before start of vblank. Unfortunately
9361 	 * that means we can't tell whether we're in vblank or not while
9362 	 * we're on that particular line. We must still set scanline_offset
9363 	 * to 1 so that the vblank timestamps come out correct when we query
9364 	 * the scanline counter from within the vblank interrupt handler.
9365 	 * However if queried just before the start of vblank we'll get an
9366 	 * answer that's slightly in the future.
9367 	 */
9368 	if (DISPLAY_VER(dev_priv) == 2) {
9369 		int vtotal;
9370 
9371 		vtotal = adjusted_mode.crtc_vtotal;
9372 		if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9373 			vtotal /= 2;
9374 
9375 		crtc->scanline_offset = vtotal - 1;
9376 	} else if (HAS_DDI(dev_priv) &&
9377 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
9378 		crtc->scanline_offset = 2;
9379 	} else {
9380 		crtc->scanline_offset = 1;
9381 	}
9382 }
9383 
9384 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
9385 {
9386 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9387 	struct intel_crtc_state *new_crtc_state;
9388 	struct intel_crtc *crtc;
9389 	int i;
9390 
9391 	if (!dev_priv->display.crtc_compute_clock)
9392 		return;
9393 
9394 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9395 		if (!intel_crtc_needs_modeset(new_crtc_state))
9396 			continue;
9397 
9398 		intel_release_shared_dplls(state, crtc);
9399 	}
9400 }
9401 
9402 /*
9403  * This implements the workaround described in the "notes" section of the mode
9404  * set sequence documentation. When going from no pipes or single pipe to
9405  * multiple pipes, and planes are enabled after the pipe, we need to wait at
9406  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
9407  */
9408 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
9409 {
9410 	struct intel_crtc_state *crtc_state;
9411 	struct intel_crtc *crtc;
9412 	struct intel_crtc_state *first_crtc_state = NULL;
9413 	struct intel_crtc_state *other_crtc_state = NULL;
9414 	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
9415 	int i;
9416 
9417 	/* look at all crtc's that are going to be enabled in during modeset */
9418 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9419 		if (!crtc_state->hw.active ||
9420 		    !intel_crtc_needs_modeset(crtc_state))
9421 			continue;
9422 
9423 		if (first_crtc_state) {
9424 			other_crtc_state = crtc_state;
9425 			break;
9426 		} else {
9427 			first_crtc_state = crtc_state;
9428 			first_pipe = crtc->pipe;
9429 		}
9430 	}
9431 
9432 	/* No workaround needed? */
9433 	if (!first_crtc_state)
9434 		return 0;
9435 
9436 	/* w/a possibly needed, check how many crtc's are already enabled. */
9437 	for_each_intel_crtc(state->base.dev, crtc) {
9438 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
9439 		if (IS_ERR(crtc_state))
9440 			return PTR_ERR(crtc_state);
9441 
9442 		crtc_state->hsw_workaround_pipe = INVALID_PIPE;
9443 
9444 		if (!crtc_state->hw.active ||
9445 		    intel_crtc_needs_modeset(crtc_state))
9446 			continue;
9447 
9448 		/* 2 or more enabled crtcs means no need for w/a */
9449 		if (enabled_pipe != INVALID_PIPE)
9450 			return 0;
9451 
9452 		enabled_pipe = crtc->pipe;
9453 	}
9454 
9455 	if (enabled_pipe != INVALID_PIPE)
9456 		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
9457 	else if (other_crtc_state)
9458 		other_crtc_state->hsw_workaround_pipe = first_pipe;
9459 
9460 	return 0;
9461 }
9462 
9463 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
9464 			   u8 active_pipes)
9465 {
9466 	const struct intel_crtc_state *crtc_state;
9467 	struct intel_crtc *crtc;
9468 	int i;
9469 
9470 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9471 		if (crtc_state->hw.active)
9472 			active_pipes |= BIT(crtc->pipe);
9473 		else
9474 			active_pipes &= ~BIT(crtc->pipe);
9475 	}
9476 
9477 	return active_pipes;
9478 }
9479 
9480 static int intel_modeset_checks(struct intel_atomic_state *state)
9481 {
9482 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9483 
9484 	state->modeset = true;
9485 
9486 	if (IS_HASWELL(dev_priv))
9487 		return hsw_mode_set_planes_workaround(state);
9488 
9489 	return 0;
9490 }
9491 
9492 /*
9493  * Handle calculation of various watermark data at the end of the atomic check
9494  * phase.  The code here should be run after the per-crtc and per-plane 'check'
9495  * handlers to ensure that all derived state has been updated.
9496  */
9497 static int calc_watermark_data(struct intel_atomic_state *state)
9498 {
9499 	struct drm_device *dev = state->base.dev;
9500 	struct drm_i915_private *dev_priv = to_i915(dev);
9501 
9502 	/* Is there platform-specific watermark information to calculate? */
9503 	if (dev_priv->display.compute_global_watermarks)
9504 		return dev_priv->display.compute_global_watermarks(state);
9505 
9506 	return 0;
9507 }
9508 
9509 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
9510 				     struct intel_crtc_state *new_crtc_state)
9511 {
9512 	if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
9513 		return;
9514 
9515 	new_crtc_state->uapi.mode_changed = false;
9516 	new_crtc_state->update_pipe = true;
9517 }
9518 
9519 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
9520 				    struct intel_crtc_state *new_crtc_state)
9521 {
9522 	/*
9523 	 * If we're not doing the full modeset we want to
9524 	 * keep the current M/N values as they may be
9525 	 * sufficiently different to the computed values
9526 	 * to cause problems.
9527 	 *
9528 	 * FIXME: should really copy more fuzzy state here
9529 	 */
9530 	new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
9531 	new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
9532 	new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
9533 	new_crtc_state->has_drrs = old_crtc_state->has_drrs;
9534 }
9535 
9536 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
9537 					  struct intel_crtc *crtc,
9538 					  u8 plane_ids_mask)
9539 {
9540 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9541 	struct intel_plane *plane;
9542 
9543 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
9544 		struct intel_plane_state *plane_state;
9545 
9546 		if ((plane_ids_mask & BIT(plane->id)) == 0)
9547 			continue;
9548 
9549 		plane_state = intel_atomic_get_plane_state(state, plane);
9550 		if (IS_ERR(plane_state))
9551 			return PTR_ERR(plane_state);
9552 	}
9553 
9554 	return 0;
9555 }
9556 
9557 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
9558 				     struct intel_crtc *crtc)
9559 {
9560 	const struct intel_crtc_state *old_crtc_state =
9561 		intel_atomic_get_old_crtc_state(state, crtc);
9562 	const struct intel_crtc_state *new_crtc_state =
9563 		intel_atomic_get_new_crtc_state(state, crtc);
9564 
9565 	return intel_crtc_add_planes_to_state(state, crtc,
9566 					      old_crtc_state->enabled_planes |
9567 					      new_crtc_state->enabled_planes);
9568 }
9569 
9570 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
9571 {
9572 	/* See {hsw,vlv,ivb}_plane_ratio() */
9573 	return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
9574 		IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9575 		IS_IVYBRIDGE(dev_priv);
9576 }
9577 
9578 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
9579 					   struct intel_crtc *crtc,
9580 					   struct intel_crtc *other)
9581 {
9582 	const struct intel_plane_state *plane_state;
9583 	struct intel_plane *plane;
9584 	u8 plane_ids = 0;
9585 	int i;
9586 
9587 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9588 		if (plane->pipe == crtc->pipe)
9589 			plane_ids |= BIT(plane->id);
9590 	}
9591 
9592 	return intel_crtc_add_planes_to_state(state, other, plane_ids);
9593 }
9594 
9595 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
9596 {
9597 	const struct intel_crtc_state *crtc_state;
9598 	struct intel_crtc *crtc;
9599 	int i;
9600 
9601 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9602 		int ret;
9603 
9604 		if (!crtc_state->bigjoiner)
9605 			continue;
9606 
9607 		ret = intel_crtc_add_bigjoiner_planes(state, crtc,
9608 						      crtc_state->bigjoiner_linked_crtc);
9609 		if (ret)
9610 			return ret;
9611 	}
9612 
9613 	return 0;
9614 }
9615 
9616 static int intel_atomic_check_planes(struct intel_atomic_state *state)
9617 {
9618 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9619 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9620 	struct intel_plane_state *plane_state;
9621 	struct intel_plane *plane;
9622 	struct intel_crtc *crtc;
9623 	int i, ret;
9624 
9625 	ret = icl_add_linked_planes(state);
9626 	if (ret)
9627 		return ret;
9628 
9629 	ret = intel_bigjoiner_add_affected_planes(state);
9630 	if (ret)
9631 		return ret;
9632 
9633 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9634 		ret = intel_plane_atomic_check(state, plane);
9635 		if (ret) {
9636 			drm_dbg_atomic(&dev_priv->drm,
9637 				       "[PLANE:%d:%s] atomic driver check failed\n",
9638 				       plane->base.base.id, plane->base.name);
9639 			return ret;
9640 		}
9641 	}
9642 
9643 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9644 					    new_crtc_state, i) {
9645 		u8 old_active_planes, new_active_planes;
9646 
9647 		ret = icl_check_nv12_planes(new_crtc_state);
9648 		if (ret)
9649 			return ret;
9650 
9651 		/*
9652 		 * On some platforms the number of active planes affects
9653 		 * the planes' minimum cdclk calculation. Add such planes
9654 		 * to the state before we compute the minimum cdclk.
9655 		 */
9656 		if (!active_planes_affects_min_cdclk(dev_priv))
9657 			continue;
9658 
9659 		old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9660 		new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9661 
9662 		if (hweight8(old_active_planes) == hweight8(new_active_planes))
9663 			continue;
9664 
9665 		ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
9666 		if (ret)
9667 			return ret;
9668 	}
9669 
9670 	return 0;
9671 }
9672 
9673 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
9674 				    bool *need_cdclk_calc)
9675 {
9676 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9677 	const struct intel_cdclk_state *old_cdclk_state;
9678 	const struct intel_cdclk_state *new_cdclk_state;
9679 	struct intel_plane_state *plane_state;
9680 	struct intel_bw_state *new_bw_state;
9681 	struct intel_plane *plane;
9682 	int min_cdclk = 0;
9683 	enum pipe pipe;
9684 	int ret;
9685 	int i;
9686 	/*
9687 	 * active_planes bitmask has been updated, and potentially
9688 	 * affected planes are part of the state. We can now
9689 	 * compute the minimum cdclk for each plane.
9690 	 */
9691 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9692 		ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
9693 		if (ret)
9694 			return ret;
9695 	}
9696 
9697 	old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
9698 	new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
9699 
9700 	if (new_cdclk_state &&
9701 	    old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
9702 		*need_cdclk_calc = true;
9703 
9704 	ret = dev_priv->display.bw_calc_min_cdclk(state);
9705 	if (ret)
9706 		return ret;
9707 
9708 	new_bw_state = intel_atomic_get_new_bw_state(state);
9709 
9710 	if (!new_cdclk_state || !new_bw_state)
9711 		return 0;
9712 
9713 	for_each_pipe(dev_priv, pipe) {
9714 		min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
9715 
9716 		/*
9717 		 * Currently do this change only if we need to increase
9718 		 */
9719 		if (new_bw_state->min_cdclk > min_cdclk)
9720 			*need_cdclk_calc = true;
9721 	}
9722 
9723 	return 0;
9724 }
9725 
9726 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
9727 {
9728 	struct intel_crtc_state *crtc_state;
9729 	struct intel_crtc *crtc;
9730 	int i;
9731 
9732 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9733 		struct drm_i915_private *i915 = to_i915(crtc->base.dev);
9734 		int ret;
9735 
9736 		ret = intel_crtc_atomic_check(state, crtc);
9737 		if (ret) {
9738 			drm_dbg_atomic(&i915->drm,
9739 				       "[CRTC:%d:%s] atomic driver check failed\n",
9740 				       crtc->base.base.id, crtc->base.name);
9741 			return ret;
9742 		}
9743 	}
9744 
9745 	return 0;
9746 }
9747 
9748 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
9749 					       u8 transcoders)
9750 {
9751 	const struct intel_crtc_state *new_crtc_state;
9752 	struct intel_crtc *crtc;
9753 	int i;
9754 
9755 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9756 		if (new_crtc_state->hw.enable &&
9757 		    transcoders & BIT(new_crtc_state->cpu_transcoder) &&
9758 		    intel_crtc_needs_modeset(new_crtc_state))
9759 			return true;
9760 	}
9761 
9762 	return false;
9763 }
9764 
9765 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
9766 					struct intel_crtc *crtc,
9767 					struct intel_crtc_state *old_crtc_state,
9768 					struct intel_crtc_state *new_crtc_state)
9769 {
9770 	struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
9771 	struct intel_crtc *slave, *master;
9772 
9773 	/* slave being enabled, is master is still claiming this crtc? */
9774 	if (old_crtc_state->bigjoiner_slave) {
9775 		slave = crtc;
9776 		master = old_crtc_state->bigjoiner_linked_crtc;
9777 		master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
9778 		if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
9779 			goto claimed;
9780 	}
9781 
9782 	if (!new_crtc_state->bigjoiner)
9783 		return 0;
9784 
9785 	slave = intel_dsc_get_bigjoiner_secondary(crtc);
9786 	if (!slave) {
9787 		DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
9788 			      "CRTC + 1 to be used, doesn't exist\n",
9789 			      crtc->base.base.id, crtc->base.name);
9790 		return -EINVAL;
9791 	}
9792 
9793 	new_crtc_state->bigjoiner_linked_crtc = slave;
9794 	slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
9795 	master = crtc;
9796 	if (IS_ERR(slave_crtc_state))
9797 		return PTR_ERR(slave_crtc_state);
9798 
9799 	/* master being enabled, slave was already configured? */
9800 	if (slave_crtc_state->uapi.enable)
9801 		goto claimed;
9802 
9803 	DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
9804 		      slave->base.base.id, slave->base.name);
9805 
9806 	return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
9807 
9808 claimed:
9809 	DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
9810 		      "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
9811 		      slave->base.base.id, slave->base.name,
9812 		      master->base.base.id, master->base.name);
9813 	return -EINVAL;
9814 }
9815 
9816 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
9817 				 struct intel_crtc_state *master_crtc_state)
9818 {
9819 	struct intel_crtc_state *slave_crtc_state =
9820 		intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
9821 
9822 	slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
9823 	slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
9824 	slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
9825 	intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
9826 }
9827 
9828 /**
9829  * DOC: asynchronous flip implementation
9830  *
9831  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
9832  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
9833  * Correspondingly, support is currently added for primary plane only.
9834  *
9835  * Async flip can only change the plane surface address, so anything else
9836  * changing is rejected from the intel_atomic_check_async() function.
9837  * Once this check is cleared, flip done interrupt is enabled using
9838  * the intel_crtc_enable_flip_done() function.
9839  *
9840  * As soon as the surface address register is written, flip done interrupt is
9841  * generated and the requested events are sent to the usersapce in the interrupt
9842  * handler itself. The timestamp and sequence sent during the flip done event
9843  * correspond to the last vblank and have no relation to the actual time when
9844  * the flip done event was sent.
9845  */
9846 static int intel_atomic_check_async(struct intel_atomic_state *state)
9847 {
9848 	struct drm_i915_private *i915 = to_i915(state->base.dev);
9849 	const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9850 	const struct intel_plane_state *new_plane_state, *old_plane_state;
9851 	struct intel_crtc *crtc;
9852 	struct intel_plane *plane;
9853 	int i;
9854 
9855 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9856 					    new_crtc_state, i) {
9857 		if (intel_crtc_needs_modeset(new_crtc_state)) {
9858 			drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
9859 			return -EINVAL;
9860 		}
9861 
9862 		if (!new_crtc_state->hw.active) {
9863 			drm_dbg_kms(&i915->drm, "CRTC inactive\n");
9864 			return -EINVAL;
9865 		}
9866 		if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
9867 			drm_dbg_kms(&i915->drm,
9868 				    "Active planes cannot be changed during async flip\n");
9869 			return -EINVAL;
9870 		}
9871 	}
9872 
9873 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
9874 					     new_plane_state, i) {
9875 		/*
9876 		 * TODO: Async flip is only supported through the page flip IOCTL
9877 		 * as of now. So support currently added for primary plane only.
9878 		 * Support for other planes on platforms on which supports
9879 		 * this(vlv/chv and icl+) should be added when async flip is
9880 		 * enabled in the atomic IOCTL path.
9881 		 */
9882 		if (!plane->async_flip)
9883 			return -EINVAL;
9884 
9885 		/*
9886 		 * FIXME: This check is kept generic for all platforms.
9887 		 * Need to verify this for all gen9 platforms to enable
9888 		 * this selectively if required.
9889 		 */
9890 		switch (new_plane_state->hw.fb->modifier) {
9891 		case I915_FORMAT_MOD_X_TILED:
9892 		case I915_FORMAT_MOD_Y_TILED:
9893 		case I915_FORMAT_MOD_Yf_TILED:
9894 			break;
9895 		default:
9896 			drm_dbg_kms(&i915->drm,
9897 				    "Linear memory/CCS does not support async flips\n");
9898 			return -EINVAL;
9899 		}
9900 
9901 		if (old_plane_state->view.color_plane[0].stride !=
9902 		    new_plane_state->view.color_plane[0].stride) {
9903 			drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
9904 			return -EINVAL;
9905 		}
9906 
9907 		if (old_plane_state->hw.fb->modifier !=
9908 		    new_plane_state->hw.fb->modifier) {
9909 			drm_dbg_kms(&i915->drm,
9910 				    "Framebuffer modifiers cannot be changed in async flip\n");
9911 			return -EINVAL;
9912 		}
9913 
9914 		if (old_plane_state->hw.fb->format !=
9915 		    new_plane_state->hw.fb->format) {
9916 			drm_dbg_kms(&i915->drm,
9917 				    "Framebuffer format cannot be changed in async flip\n");
9918 			return -EINVAL;
9919 		}
9920 
9921 		if (old_plane_state->hw.rotation !=
9922 		    new_plane_state->hw.rotation) {
9923 			drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
9924 			return -EINVAL;
9925 		}
9926 
9927 		if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
9928 		    !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
9929 			drm_dbg_kms(&i915->drm,
9930 				    "Plane size/co-ordinates cannot be changed in async flip\n");
9931 			return -EINVAL;
9932 		}
9933 
9934 		if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
9935 			drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
9936 			return -EINVAL;
9937 		}
9938 
9939 		if (old_plane_state->hw.pixel_blend_mode !=
9940 		    new_plane_state->hw.pixel_blend_mode) {
9941 			drm_dbg_kms(&i915->drm,
9942 				    "Pixel blend mode cannot be changed in async flip\n");
9943 			return -EINVAL;
9944 		}
9945 
9946 		if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
9947 			drm_dbg_kms(&i915->drm,
9948 				    "Color encoding cannot be changed in async flip\n");
9949 			return -EINVAL;
9950 		}
9951 
9952 		if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
9953 			drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
9954 			return -EINVAL;
9955 		}
9956 	}
9957 
9958 	return 0;
9959 }
9960 
9961 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
9962 {
9963 	struct intel_crtc_state *crtc_state;
9964 	struct intel_crtc *crtc;
9965 	int i;
9966 
9967 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9968 		struct intel_crtc_state *linked_crtc_state;
9969 		struct intel_crtc *linked_crtc;
9970 		int ret;
9971 
9972 		if (!crtc_state->bigjoiner)
9973 			continue;
9974 
9975 		linked_crtc = crtc_state->bigjoiner_linked_crtc;
9976 		linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
9977 		if (IS_ERR(linked_crtc_state))
9978 			return PTR_ERR(linked_crtc_state);
9979 
9980 		if (!intel_crtc_needs_modeset(crtc_state))
9981 			continue;
9982 
9983 		linked_crtc_state->uapi.mode_changed = true;
9984 
9985 		ret = drm_atomic_add_affected_connectors(&state->base,
9986 							 &linked_crtc->base);
9987 		if (ret)
9988 			return ret;
9989 
9990 		ret = intel_atomic_add_affected_planes(state, linked_crtc);
9991 		if (ret)
9992 			return ret;
9993 	}
9994 
9995 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9996 		/* Kill old bigjoiner link, we may re-establish afterwards */
9997 		if (intel_crtc_needs_modeset(crtc_state) &&
9998 		    crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
9999 			kill_bigjoiner_slave(state, crtc_state);
10000 	}
10001 
10002 	return 0;
10003 }
10004 
10005 /**
10006  * intel_atomic_check - validate state object
10007  * @dev: drm device
10008  * @_state: state to validate
10009  */
10010 static int intel_atomic_check(struct drm_device *dev,
10011 			      struct drm_atomic_state *_state)
10012 {
10013 	struct drm_i915_private *dev_priv = to_i915(dev);
10014 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
10015 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10016 	struct intel_crtc *crtc;
10017 	int ret, i;
10018 	bool any_ms = false;
10019 
10020 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10021 					    new_crtc_state, i) {
10022 		if (new_crtc_state->inherited != old_crtc_state->inherited)
10023 			new_crtc_state->uapi.mode_changed = true;
10024 	}
10025 
10026 	intel_vrr_check_modeset(state);
10027 
10028 	ret = drm_atomic_helper_check_modeset(dev, &state->base);
10029 	if (ret)
10030 		goto fail;
10031 
10032 	ret = intel_bigjoiner_add_affected_crtcs(state);
10033 	if (ret)
10034 		goto fail;
10035 
10036 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10037 					    new_crtc_state, i) {
10038 		if (!intel_crtc_needs_modeset(new_crtc_state)) {
10039 			/* Light copy */
10040 			intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
10041 
10042 			continue;
10043 		}
10044 
10045 		if (!new_crtc_state->uapi.enable) {
10046 			if (!new_crtc_state->bigjoiner_slave) {
10047 				intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
10048 				any_ms = true;
10049 			}
10050 			continue;
10051 		}
10052 
10053 		ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
10054 		if (ret)
10055 			goto fail;
10056 
10057 		ret = intel_modeset_pipe_config(state, new_crtc_state);
10058 		if (ret)
10059 			goto fail;
10060 
10061 		ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
10062 						   new_crtc_state);
10063 		if (ret)
10064 			goto fail;
10065 	}
10066 
10067 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10068 					    new_crtc_state, i) {
10069 		if (!intel_crtc_needs_modeset(new_crtc_state))
10070 			continue;
10071 
10072 		ret = intel_modeset_pipe_config_late(new_crtc_state);
10073 		if (ret)
10074 			goto fail;
10075 
10076 		intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
10077 	}
10078 
10079 	/**
10080 	 * Check if fastset is allowed by external dependencies like other
10081 	 * pipes and transcoders.
10082 	 *
10083 	 * Right now it only forces a fullmodeset when the MST master
10084 	 * transcoder did not changed but the pipe of the master transcoder
10085 	 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
10086 	 * in case of port synced crtcs, if one of the synced crtcs
10087 	 * needs a full modeset, all other synced crtcs should be
10088 	 * forced a full modeset.
10089 	 */
10090 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10091 		if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
10092 			continue;
10093 
10094 		if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
10095 			enum transcoder master = new_crtc_state->mst_master_transcoder;
10096 
10097 			if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
10098 				new_crtc_state->uapi.mode_changed = true;
10099 				new_crtc_state->update_pipe = false;
10100 			}
10101 		}
10102 
10103 		if (is_trans_port_sync_mode(new_crtc_state)) {
10104 			u8 trans = new_crtc_state->sync_mode_slaves_mask;
10105 
10106 			if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
10107 				trans |= BIT(new_crtc_state->master_transcoder);
10108 
10109 			if (intel_cpu_transcoders_need_modeset(state, trans)) {
10110 				new_crtc_state->uapi.mode_changed = true;
10111 				new_crtc_state->update_pipe = false;
10112 			}
10113 		}
10114 
10115 		if (new_crtc_state->bigjoiner) {
10116 			struct intel_crtc_state *linked_crtc_state =
10117 				intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
10118 
10119 			if (intel_crtc_needs_modeset(linked_crtc_state)) {
10120 				new_crtc_state->uapi.mode_changed = true;
10121 				new_crtc_state->update_pipe = false;
10122 			}
10123 		}
10124 	}
10125 
10126 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10127 					    new_crtc_state, i) {
10128 		if (intel_crtc_needs_modeset(new_crtc_state)) {
10129 			any_ms = true;
10130 			continue;
10131 		}
10132 
10133 		if (!new_crtc_state->update_pipe)
10134 			continue;
10135 
10136 		intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
10137 	}
10138 
10139 	if (any_ms && !check_digital_port_conflicts(state)) {
10140 		drm_dbg_kms(&dev_priv->drm,
10141 			    "rejecting conflicting digital port configuration\n");
10142 		ret = -EINVAL;
10143 		goto fail;
10144 	}
10145 
10146 	ret = drm_dp_mst_atomic_check(&state->base);
10147 	if (ret)
10148 		goto fail;
10149 
10150 	ret = intel_atomic_check_planes(state);
10151 	if (ret)
10152 		goto fail;
10153 
10154 	intel_fbc_choose_crtc(dev_priv, state);
10155 	ret = calc_watermark_data(state);
10156 	if (ret)
10157 		goto fail;
10158 
10159 	ret = intel_bw_atomic_check(state);
10160 	if (ret)
10161 		goto fail;
10162 
10163 	ret = intel_atomic_check_cdclk(state, &any_ms);
10164 	if (ret)
10165 		goto fail;
10166 
10167 	if (intel_any_crtc_needs_modeset(state))
10168 		any_ms = true;
10169 
10170 	if (any_ms) {
10171 		ret = intel_modeset_checks(state);
10172 		if (ret)
10173 			goto fail;
10174 
10175 		ret = intel_modeset_calc_cdclk(state);
10176 		if (ret)
10177 			return ret;
10178 
10179 		intel_modeset_clear_plls(state);
10180 	}
10181 
10182 	ret = intel_atomic_check_crtcs(state);
10183 	if (ret)
10184 		goto fail;
10185 
10186 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10187 					    new_crtc_state, i) {
10188 		if (new_crtc_state->uapi.async_flip) {
10189 			ret = intel_atomic_check_async(state);
10190 			if (ret)
10191 				goto fail;
10192 		}
10193 
10194 		if (!intel_crtc_needs_modeset(new_crtc_state) &&
10195 		    !new_crtc_state->update_pipe)
10196 			continue;
10197 
10198 		intel_dump_pipe_config(new_crtc_state, state,
10199 				       intel_crtc_needs_modeset(new_crtc_state) ?
10200 				       "[modeset]" : "[fastset]");
10201 	}
10202 
10203 	return 0;
10204 
10205  fail:
10206 	if (ret == -EDEADLK)
10207 		return ret;
10208 
10209 	/*
10210 	 * FIXME would probably be nice to know which crtc specifically
10211 	 * caused the failure, in cases where we can pinpoint it.
10212 	 */
10213 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10214 					    new_crtc_state, i)
10215 		intel_dump_pipe_config(new_crtc_state, state, "[failed]");
10216 
10217 	return ret;
10218 }
10219 
10220 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
10221 {
10222 	struct intel_crtc_state *crtc_state;
10223 	struct intel_crtc *crtc;
10224 	int i, ret;
10225 
10226 	ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
10227 	if (ret < 0)
10228 		return ret;
10229 
10230 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10231 		bool mode_changed = intel_crtc_needs_modeset(crtc_state);
10232 
10233 		if (mode_changed || crtc_state->update_pipe ||
10234 		    crtc_state->uapi.color_mgmt_changed) {
10235 			intel_dsb_prepare(crtc_state);
10236 		}
10237 	}
10238 
10239 	return 0;
10240 }
10241 
10242 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
10243 				  struct intel_crtc_state *crtc_state)
10244 {
10245 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10246 
10247 	if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
10248 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
10249 
10250 	if (crtc_state->has_pch_encoder) {
10251 		enum pipe pch_transcoder =
10252 			intel_crtc_pch_transcoder(crtc);
10253 
10254 		intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
10255 	}
10256 }
10257 
10258 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
10259 			       const struct intel_crtc_state *new_crtc_state)
10260 {
10261 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
10262 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10263 
10264 	/*
10265 	 * Update pipe size and adjust fitter if needed: the reason for this is
10266 	 * that in compute_mode_changes we check the native mode (not the pfit
10267 	 * mode) to see if we can flip rather than do a full mode set. In the
10268 	 * fastboot case, we'll flip, but if we don't update the pipesrc and
10269 	 * pfit state, we'll end up with a big fb scanned out into the wrong
10270 	 * sized surface.
10271 	 */
10272 	intel_set_pipe_src_size(new_crtc_state);
10273 
10274 	/* on skylake this is done by detaching scalers */
10275 	if (DISPLAY_VER(dev_priv) >= 9) {
10276 		if (new_crtc_state->pch_pfit.enabled)
10277 			skl_pfit_enable(new_crtc_state);
10278 	} else if (HAS_PCH_SPLIT(dev_priv)) {
10279 		if (new_crtc_state->pch_pfit.enabled)
10280 			ilk_pfit_enable(new_crtc_state);
10281 		else if (old_crtc_state->pch_pfit.enabled)
10282 			ilk_pfit_disable(old_crtc_state);
10283 	}
10284 
10285 	/*
10286 	 * The register is supposedly single buffered so perhaps
10287 	 * not 100% correct to do this here. But SKL+ calculate
10288 	 * this based on the adjust pixel rate so pfit changes do
10289 	 * affect it and so it must be updated for fastsets.
10290 	 * HSW/BDW only really need this here for fastboot, after
10291 	 * that the value should not change without a full modeset.
10292 	 */
10293 	if (DISPLAY_VER(dev_priv) >= 9 ||
10294 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
10295 		hsw_set_linetime_wm(new_crtc_state);
10296 
10297 	if (DISPLAY_VER(dev_priv) >= 11)
10298 		icl_set_pipe_chicken(new_crtc_state);
10299 }
10300 
10301 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
10302 				   struct intel_crtc *crtc)
10303 {
10304 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10305 	const struct intel_crtc_state *old_crtc_state =
10306 		intel_atomic_get_old_crtc_state(state, crtc);
10307 	const struct intel_crtc_state *new_crtc_state =
10308 		intel_atomic_get_new_crtc_state(state, crtc);
10309 	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10310 
10311 	/*
10312 	 * During modesets pipe configuration was programmed as the
10313 	 * CRTC was enabled.
10314 	 */
10315 	if (!modeset) {
10316 		if (new_crtc_state->uapi.color_mgmt_changed ||
10317 		    new_crtc_state->update_pipe)
10318 			intel_color_commit(new_crtc_state);
10319 
10320 		if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
10321 			bdw_set_pipemisc(new_crtc_state);
10322 
10323 		if (new_crtc_state->update_pipe)
10324 			intel_pipe_fastset(old_crtc_state, new_crtc_state);
10325 
10326 		intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
10327 	}
10328 
10329 	if (dev_priv->display.atomic_update_watermarks)
10330 		dev_priv->display.atomic_update_watermarks(state, crtc);
10331 }
10332 
10333 static void commit_pipe_post_planes(struct intel_atomic_state *state,
10334 				    struct intel_crtc *crtc)
10335 {
10336 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10337 	const struct intel_crtc_state *new_crtc_state =
10338 		intel_atomic_get_new_crtc_state(state, crtc);
10339 
10340 	/*
10341 	 * Disable the scaler(s) after the plane(s) so that we don't
10342 	 * get a catastrophic underrun even if the two operations
10343 	 * end up happening in two different frames.
10344 	 */
10345 	if (DISPLAY_VER(dev_priv) >= 9 &&
10346 	    !intel_crtc_needs_modeset(new_crtc_state))
10347 		skl_detach_scalers(new_crtc_state);
10348 }
10349 
10350 static void intel_enable_crtc(struct intel_atomic_state *state,
10351 			      struct intel_crtc *crtc)
10352 {
10353 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10354 	const struct intel_crtc_state *new_crtc_state =
10355 		intel_atomic_get_new_crtc_state(state, crtc);
10356 
10357 	if (!intel_crtc_needs_modeset(new_crtc_state))
10358 		return;
10359 
10360 	intel_crtc_update_active_timings(new_crtc_state);
10361 
10362 	dev_priv->display.crtc_enable(state, crtc);
10363 
10364 	if (new_crtc_state->bigjoiner_slave)
10365 		return;
10366 
10367 	/* vblanks work again, re-enable pipe CRC. */
10368 	intel_crtc_enable_pipe_crc(crtc);
10369 }
10370 
10371 static void intel_update_crtc(struct intel_atomic_state *state,
10372 			      struct intel_crtc *crtc)
10373 {
10374 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10375 	const struct intel_crtc_state *old_crtc_state =
10376 		intel_atomic_get_old_crtc_state(state, crtc);
10377 	struct intel_crtc_state *new_crtc_state =
10378 		intel_atomic_get_new_crtc_state(state, crtc);
10379 	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10380 
10381 	if (!modeset) {
10382 		if (new_crtc_state->preload_luts &&
10383 		    (new_crtc_state->uapi.color_mgmt_changed ||
10384 		     new_crtc_state->update_pipe))
10385 			intel_color_load_luts(new_crtc_state);
10386 
10387 		intel_pre_plane_update(state, crtc);
10388 
10389 		if (new_crtc_state->update_pipe)
10390 			intel_encoders_update_pipe(state, crtc);
10391 	}
10392 
10393 	if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
10394 		intel_fbc_disable(crtc);
10395 	else
10396 		intel_fbc_enable(state, crtc);
10397 
10398 	/* Perform vblank evasion around commit operation */
10399 	intel_pipe_update_start(new_crtc_state);
10400 
10401 	commit_pipe_pre_planes(state, crtc);
10402 
10403 	if (DISPLAY_VER(dev_priv) >= 9)
10404 		skl_update_planes_on_crtc(state, crtc);
10405 	else
10406 		i9xx_update_planes_on_crtc(state, crtc);
10407 
10408 	commit_pipe_post_planes(state, crtc);
10409 
10410 	intel_pipe_update_end(new_crtc_state);
10411 
10412 	/*
10413 	 * We usually enable FIFO underrun interrupts as part of the
10414 	 * CRTC enable sequence during modesets.  But when we inherit a
10415 	 * valid pipe configuration from the BIOS we need to take care
10416 	 * of enabling them on the CRTC's first fastset.
10417 	 */
10418 	if (new_crtc_state->update_pipe && !modeset &&
10419 	    old_crtc_state->inherited)
10420 		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
10421 }
10422 
10423 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
10424 					  struct intel_crtc_state *old_crtc_state,
10425 					  struct intel_crtc_state *new_crtc_state,
10426 					  struct intel_crtc *crtc)
10427 {
10428 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10429 
10430 	drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
10431 
10432 	intel_encoders_pre_disable(state, crtc);
10433 
10434 	intel_crtc_disable_planes(state, crtc);
10435 
10436 	/*
10437 	 * We still need special handling for disabling bigjoiner master
10438 	 * and slaves since for slave we do not have encoder or plls
10439 	 * so we dont need to disable those.
10440 	 */
10441 	if (old_crtc_state->bigjoiner) {
10442 		intel_crtc_disable_planes(state,
10443 					  old_crtc_state->bigjoiner_linked_crtc);
10444 		old_crtc_state->bigjoiner_linked_crtc->active = false;
10445 	}
10446 
10447 	/*
10448 	 * We need to disable pipe CRC before disabling the pipe,
10449 	 * or we race against vblank off.
10450 	 */
10451 	intel_crtc_disable_pipe_crc(crtc);
10452 
10453 	dev_priv->display.crtc_disable(state, crtc);
10454 	crtc->active = false;
10455 	intel_fbc_disable(crtc);
10456 	intel_disable_shared_dpll(old_crtc_state);
10457 
10458 	/* FIXME unify this for all platforms */
10459 	if (!new_crtc_state->hw.active &&
10460 	    !HAS_GMCH(dev_priv) &&
10461 	    dev_priv->display.initial_watermarks)
10462 		dev_priv->display.initial_watermarks(state, crtc);
10463 }
10464 
10465 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
10466 {
10467 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10468 	struct intel_crtc *crtc;
10469 	u32 handled = 0;
10470 	int i;
10471 
10472 	/* Only disable port sync and MST slaves */
10473 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10474 					    new_crtc_state, i) {
10475 		if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
10476 			continue;
10477 
10478 		if (!old_crtc_state->hw.active)
10479 			continue;
10480 
10481 		/* In case of Transcoder port Sync master slave CRTCs can be
10482 		 * assigned in any order and we need to make sure that
10483 		 * slave CRTCs are disabled first and then master CRTC since
10484 		 * Slave vblanks are masked till Master Vblanks.
10485 		 */
10486 		if (!is_trans_port_sync_slave(old_crtc_state) &&
10487 		    !intel_dp_mst_is_slave_trans(old_crtc_state))
10488 			continue;
10489 
10490 		intel_pre_plane_update(state, crtc);
10491 		intel_old_crtc_state_disables(state, old_crtc_state,
10492 					      new_crtc_state, crtc);
10493 		handled |= BIT(crtc->pipe);
10494 	}
10495 
10496 	/* Disable everything else left on */
10497 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10498 					    new_crtc_state, i) {
10499 		if (!intel_crtc_needs_modeset(new_crtc_state) ||
10500 		    (handled & BIT(crtc->pipe)) ||
10501 		    old_crtc_state->bigjoiner_slave)
10502 			continue;
10503 
10504 		intel_pre_plane_update(state, crtc);
10505 		if (old_crtc_state->bigjoiner) {
10506 			struct intel_crtc *slave =
10507 				old_crtc_state->bigjoiner_linked_crtc;
10508 
10509 			intel_pre_plane_update(state, slave);
10510 		}
10511 
10512 		if (old_crtc_state->hw.active)
10513 			intel_old_crtc_state_disables(state, old_crtc_state,
10514 						      new_crtc_state, crtc);
10515 	}
10516 }
10517 
10518 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
10519 {
10520 	struct intel_crtc_state *new_crtc_state;
10521 	struct intel_crtc *crtc;
10522 	int i;
10523 
10524 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10525 		if (!new_crtc_state->hw.active)
10526 			continue;
10527 
10528 		intel_enable_crtc(state, crtc);
10529 		intel_update_crtc(state, crtc);
10530 	}
10531 }
10532 
10533 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
10534 {
10535 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10536 	struct intel_crtc *crtc;
10537 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10538 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
10539 	u8 update_pipes = 0, modeset_pipes = 0;
10540 	int i;
10541 
10542 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10543 		enum pipe pipe = crtc->pipe;
10544 
10545 		if (!new_crtc_state->hw.active)
10546 			continue;
10547 
10548 		/* ignore allocations for crtc's that have been turned off. */
10549 		if (!intel_crtc_needs_modeset(new_crtc_state)) {
10550 			entries[pipe] = old_crtc_state->wm.skl.ddb;
10551 			update_pipes |= BIT(pipe);
10552 		} else {
10553 			modeset_pipes |= BIT(pipe);
10554 		}
10555 	}
10556 
10557 	/*
10558 	 * Whenever the number of active pipes changes, we need to make sure we
10559 	 * update the pipes in the right order so that their ddb allocations
10560 	 * never overlap with each other between CRTC updates. Otherwise we'll
10561 	 * cause pipe underruns and other bad stuff.
10562 	 *
10563 	 * So first lets enable all pipes that do not need a fullmodeset as
10564 	 * those don't have any external dependency.
10565 	 */
10566 	while (update_pipes) {
10567 		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10568 						    new_crtc_state, i) {
10569 			enum pipe pipe = crtc->pipe;
10570 
10571 			if ((update_pipes & BIT(pipe)) == 0)
10572 				continue;
10573 
10574 			if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10575 							entries, I915_MAX_PIPES, pipe))
10576 				continue;
10577 
10578 			entries[pipe] = new_crtc_state->wm.skl.ddb;
10579 			update_pipes &= ~BIT(pipe);
10580 
10581 			intel_update_crtc(state, crtc);
10582 
10583 			/*
10584 			 * If this is an already active pipe, it's DDB changed,
10585 			 * and this isn't the last pipe that needs updating
10586 			 * then we need to wait for a vblank to pass for the
10587 			 * new ddb allocation to take effect.
10588 			 */
10589 			if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
10590 						 &old_crtc_state->wm.skl.ddb) &&
10591 			    (update_pipes | modeset_pipes))
10592 				intel_wait_for_vblank(dev_priv, pipe);
10593 		}
10594 	}
10595 
10596 	update_pipes = modeset_pipes;
10597 
10598 	/*
10599 	 * Enable all pipes that needs a modeset and do not depends on other
10600 	 * pipes
10601 	 */
10602 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10603 		enum pipe pipe = crtc->pipe;
10604 
10605 		if ((modeset_pipes & BIT(pipe)) == 0)
10606 			continue;
10607 
10608 		if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
10609 		    is_trans_port_sync_master(new_crtc_state) ||
10610 		    (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
10611 			continue;
10612 
10613 		modeset_pipes &= ~BIT(pipe);
10614 
10615 		intel_enable_crtc(state, crtc);
10616 	}
10617 
10618 	/*
10619 	 * Then we enable all remaining pipes that depend on other
10620 	 * pipes: MST slaves and port sync masters, big joiner master
10621 	 */
10622 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10623 		enum pipe pipe = crtc->pipe;
10624 
10625 		if ((modeset_pipes & BIT(pipe)) == 0)
10626 			continue;
10627 
10628 		modeset_pipes &= ~BIT(pipe);
10629 
10630 		intel_enable_crtc(state, crtc);
10631 	}
10632 
10633 	/*
10634 	 * Finally we do the plane updates/etc. for all pipes that got enabled.
10635 	 */
10636 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10637 		enum pipe pipe = crtc->pipe;
10638 
10639 		if ((update_pipes & BIT(pipe)) == 0)
10640 			continue;
10641 
10642 		drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10643 									entries, I915_MAX_PIPES, pipe));
10644 
10645 		entries[pipe] = new_crtc_state->wm.skl.ddb;
10646 		update_pipes &= ~BIT(pipe);
10647 
10648 		intel_update_crtc(state, crtc);
10649 	}
10650 
10651 	drm_WARN_ON(&dev_priv->drm, modeset_pipes);
10652 	drm_WARN_ON(&dev_priv->drm, update_pipes);
10653 }
10654 
10655 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
10656 {
10657 	struct intel_atomic_state *state, *next;
10658 	struct llist_node *freed;
10659 
10660 	freed = llist_del_all(&dev_priv->atomic_helper.free_list);
10661 	llist_for_each_entry_safe(state, next, freed, freed)
10662 		drm_atomic_state_put(&state->base);
10663 }
10664 
10665 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
10666 {
10667 	struct drm_i915_private *dev_priv =
10668 		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
10669 
10670 	intel_atomic_helper_free_state(dev_priv);
10671 }
10672 
10673 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
10674 {
10675 	struct wait_queue_entry wait_fence, wait_reset;
10676 	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
10677 
10678 	init_wait_entry(&wait_fence, 0);
10679 	init_wait_entry(&wait_reset, 0);
10680 	for (;;) {
10681 		prepare_to_wait(&intel_state->commit_ready.wait,
10682 				&wait_fence, TASK_UNINTERRUPTIBLE);
10683 		prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10684 					      I915_RESET_MODESET),
10685 				&wait_reset, TASK_UNINTERRUPTIBLE);
10686 
10687 
10688 		if (i915_sw_fence_done(&intel_state->commit_ready) ||
10689 		    test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
10690 			break;
10691 
10692 		schedule();
10693 	}
10694 	finish_wait(&intel_state->commit_ready.wait, &wait_fence);
10695 	finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10696 				  I915_RESET_MODESET),
10697 		    &wait_reset);
10698 }
10699 
10700 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
10701 {
10702 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10703 	struct intel_crtc *crtc;
10704 	int i;
10705 
10706 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10707 					    new_crtc_state, i)
10708 		intel_dsb_cleanup(old_crtc_state);
10709 }
10710 
10711 static void intel_atomic_cleanup_work(struct work_struct *work)
10712 {
10713 	struct intel_atomic_state *state =
10714 		container_of(work, struct intel_atomic_state, base.commit_work);
10715 	struct drm_i915_private *i915 = to_i915(state->base.dev);
10716 
10717 	intel_cleanup_dsbs(state);
10718 	drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
10719 	drm_atomic_helper_commit_cleanup_done(&state->base);
10720 	drm_atomic_state_put(&state->base);
10721 
10722 	intel_atomic_helper_free_state(i915);
10723 }
10724 
10725 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
10726 {
10727 	struct drm_i915_private *i915 = to_i915(state->base.dev);
10728 	struct intel_plane *plane;
10729 	struct intel_plane_state *plane_state;
10730 	int i;
10731 
10732 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10733 		struct drm_framebuffer *fb = plane_state->hw.fb;
10734 		int ret;
10735 
10736 		if (!fb ||
10737 		    fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
10738 			continue;
10739 
10740 		/*
10741 		 * The layout of the fast clear color value expected by HW
10742 		 * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
10743 		 * - 4 x 4 bytes per-channel value
10744 		 *   (in surface type specific float/int format provided by the fb user)
10745 		 * - 8 bytes native color value used by the display
10746 		 *   (converted/written by GPU during a fast clear operation using the
10747 		 *    above per-channel values)
10748 		 *
10749 		 * The commit's FB prepare hook already ensured that FB obj is pinned and the
10750 		 * caller made sure that the object is synced wrt. the related color clear value
10751 		 * GPU write on it.
10752 		 */
10753 		ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
10754 						     fb->offsets[2] + 16,
10755 						     &plane_state->ccval,
10756 						     sizeof(plane_state->ccval));
10757 		/* The above could only fail if the FB obj has an unexpected backing store type. */
10758 		drm_WARN_ON(&i915->drm, ret);
10759 	}
10760 }
10761 
10762 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
10763 {
10764 	struct drm_device *dev = state->base.dev;
10765 	struct drm_i915_private *dev_priv = to_i915(dev);
10766 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10767 	struct intel_crtc *crtc;
10768 	u64 put_domains[I915_MAX_PIPES] = {};
10769 	intel_wakeref_t wakeref = 0;
10770 	int i;
10771 
10772 	intel_atomic_commit_fence_wait(state);
10773 
10774 	drm_atomic_helper_wait_for_dependencies(&state->base);
10775 
10776 	if (state->modeset)
10777 		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
10778 
10779 	intel_atomic_prepare_plane_clear_colors(state);
10780 
10781 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10782 					    new_crtc_state, i) {
10783 		if (intel_crtc_needs_modeset(new_crtc_state) ||
10784 		    new_crtc_state->update_pipe) {
10785 
10786 			put_domains[crtc->pipe] =
10787 				modeset_get_crtc_power_domains(new_crtc_state);
10788 		}
10789 	}
10790 
10791 	intel_commit_modeset_disables(state);
10792 
10793 	/* FIXME: Eventually get rid of our crtc->config pointer */
10794 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10795 		crtc->config = new_crtc_state;
10796 
10797 	if (state->modeset) {
10798 		drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
10799 
10800 		intel_set_cdclk_pre_plane_update(state);
10801 
10802 		intel_modeset_verify_disabled(dev_priv, state);
10803 	}
10804 
10805 	intel_sagv_pre_plane_update(state);
10806 
10807 	/* Complete the events for pipes that have now been disabled */
10808 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10809 		bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10810 
10811 		/* Complete events for now disable pipes here. */
10812 		if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
10813 			spin_lock_irq(&dev->event_lock);
10814 			drm_crtc_send_vblank_event(&crtc->base,
10815 						   new_crtc_state->uapi.event);
10816 			spin_unlock_irq(&dev->event_lock);
10817 
10818 			new_crtc_state->uapi.event = NULL;
10819 		}
10820 	}
10821 
10822 	if (state->modeset)
10823 		intel_encoders_update_prepare(state);
10824 
10825 	intel_dbuf_pre_plane_update(state);
10826 
10827 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10828 		if (new_crtc_state->uapi.async_flip)
10829 			intel_crtc_enable_flip_done(state, crtc);
10830 	}
10831 
10832 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
10833 	dev_priv->display.commit_modeset_enables(state);
10834 
10835 	if (state->modeset) {
10836 		intel_encoders_update_complete(state);
10837 
10838 		intel_set_cdclk_post_plane_update(state);
10839 	}
10840 
10841 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
10842 	 * already, but still need the state for the delayed optimization. To
10843 	 * fix this:
10844 	 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
10845 	 * - schedule that vblank worker _before_ calling hw_done
10846 	 * - at the start of commit_tail, cancel it _synchrously
10847 	 * - switch over to the vblank wait helper in the core after that since
10848 	 *   we don't need out special handling any more.
10849 	 */
10850 	drm_atomic_helper_wait_for_flip_done(dev, &state->base);
10851 
10852 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10853 		if (new_crtc_state->uapi.async_flip)
10854 			intel_crtc_disable_flip_done(state, crtc);
10855 
10856 		if (new_crtc_state->hw.active &&
10857 		    !intel_crtc_needs_modeset(new_crtc_state) &&
10858 		    !new_crtc_state->preload_luts &&
10859 		    (new_crtc_state->uapi.color_mgmt_changed ||
10860 		     new_crtc_state->update_pipe))
10861 			intel_color_load_luts(new_crtc_state);
10862 	}
10863 
10864 	/*
10865 	 * Now that the vblank has passed, we can go ahead and program the
10866 	 * optimal watermarks on platforms that need two-step watermark
10867 	 * programming.
10868 	 *
10869 	 * TODO: Move this (and other cleanup) to an async worker eventually.
10870 	 */
10871 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10872 					    new_crtc_state, i) {
10873 		/*
10874 		 * Gen2 reports pipe underruns whenever all planes are disabled.
10875 		 * So re-enable underrun reporting after some planes get enabled.
10876 		 *
10877 		 * We do this before .optimize_watermarks() so that we have a
10878 		 * chance of catching underruns with the intermediate watermarks
10879 		 * vs. the new plane configuration.
10880 		 */
10881 		if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
10882 			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
10883 
10884 		if (dev_priv->display.optimize_watermarks)
10885 			dev_priv->display.optimize_watermarks(state, crtc);
10886 	}
10887 
10888 	intel_dbuf_post_plane_update(state);
10889 
10890 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10891 		intel_post_plane_update(state, crtc);
10892 
10893 		modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
10894 
10895 		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
10896 
10897 		/*
10898 		 * DSB cleanup is done in cleanup_work aligning with framebuffer
10899 		 * cleanup. So copy and reset the dsb structure to sync with
10900 		 * commit_done and later do dsb cleanup in cleanup_work.
10901 		 */
10902 		old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
10903 	}
10904 
10905 	/* Underruns don't always raise interrupts, so check manually */
10906 	intel_check_cpu_fifo_underruns(dev_priv);
10907 	intel_check_pch_fifo_underruns(dev_priv);
10908 
10909 	if (state->modeset)
10910 		intel_verify_planes(state);
10911 
10912 	intel_sagv_post_plane_update(state);
10913 
10914 	drm_atomic_helper_commit_hw_done(&state->base);
10915 
10916 	if (state->modeset) {
10917 		/* As one of the primary mmio accessors, KMS has a high
10918 		 * likelihood of triggering bugs in unclaimed access. After we
10919 		 * finish modesetting, see if an error has been flagged, and if
10920 		 * so enable debugging for the next modeset - and hope we catch
10921 		 * the culprit.
10922 		 */
10923 		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
10924 		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
10925 	}
10926 	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10927 
10928 	/*
10929 	 * Defer the cleanup of the old state to a separate worker to not
10930 	 * impede the current task (userspace for blocking modesets) that
10931 	 * are executed inline. For out-of-line asynchronous modesets/flips,
10932 	 * deferring to a new worker seems overkill, but we would place a
10933 	 * schedule point (cond_resched()) here anyway to keep latencies
10934 	 * down.
10935 	 */
10936 	INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
10937 	queue_work(system_highpri_wq, &state->base.commit_work);
10938 }
10939 
10940 static void intel_atomic_commit_work(struct work_struct *work)
10941 {
10942 	struct intel_atomic_state *state =
10943 		container_of(work, struct intel_atomic_state, base.commit_work);
10944 
10945 	intel_atomic_commit_tail(state);
10946 }
10947 
10948 static int __i915_sw_fence_call
10949 intel_atomic_commit_ready(struct i915_sw_fence *fence,
10950 			  enum i915_sw_fence_notify notify)
10951 {
10952 	struct intel_atomic_state *state =
10953 		container_of(fence, struct intel_atomic_state, commit_ready);
10954 
10955 	switch (notify) {
10956 	case FENCE_COMPLETE:
10957 		/* we do blocking waits in the worker, nothing to do here */
10958 		break;
10959 	case FENCE_FREE:
10960 		{
10961 			struct intel_atomic_helper *helper =
10962 				&to_i915(state->base.dev)->atomic_helper;
10963 
10964 			if (llist_add(&state->freed, &helper->free_list))
10965 				schedule_work(&helper->free_work);
10966 			break;
10967 		}
10968 	}
10969 
10970 	return NOTIFY_DONE;
10971 }
10972 
10973 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
10974 {
10975 	struct intel_plane_state *old_plane_state, *new_plane_state;
10976 	struct intel_plane *plane;
10977 	int i;
10978 
10979 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
10980 					     new_plane_state, i)
10981 		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
10982 					to_intel_frontbuffer(new_plane_state->hw.fb),
10983 					plane->frontbuffer_bit);
10984 }
10985 
10986 static int intel_atomic_commit(struct drm_device *dev,
10987 			       struct drm_atomic_state *_state,
10988 			       bool nonblock)
10989 {
10990 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
10991 	struct drm_i915_private *dev_priv = to_i915(dev);
10992 	int ret = 0;
10993 
10994 	state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
10995 
10996 	drm_atomic_state_get(&state->base);
10997 	i915_sw_fence_init(&state->commit_ready,
10998 			   intel_atomic_commit_ready);
10999 
11000 	/*
11001 	 * The intel_legacy_cursor_update() fast path takes care
11002 	 * of avoiding the vblank waits for simple cursor
11003 	 * movement and flips. For cursor on/off and size changes,
11004 	 * we want to perform the vblank waits so that watermark
11005 	 * updates happen during the correct frames. Gen9+ have
11006 	 * double buffered watermarks and so shouldn't need this.
11007 	 *
11008 	 * Unset state->legacy_cursor_update before the call to
11009 	 * drm_atomic_helper_setup_commit() because otherwise
11010 	 * drm_atomic_helper_wait_for_flip_done() is a noop and
11011 	 * we get FIFO underruns because we didn't wait
11012 	 * for vblank.
11013 	 *
11014 	 * FIXME doing watermarks and fb cleanup from a vblank worker
11015 	 * (assuming we had any) would solve these problems.
11016 	 */
11017 	if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
11018 		struct intel_crtc_state *new_crtc_state;
11019 		struct intel_crtc *crtc;
11020 		int i;
11021 
11022 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
11023 			if (new_crtc_state->wm.need_postvbl_update ||
11024 			    new_crtc_state->update_wm_post)
11025 				state->base.legacy_cursor_update = false;
11026 	}
11027 
11028 	ret = intel_atomic_prepare_commit(state);
11029 	if (ret) {
11030 		drm_dbg_atomic(&dev_priv->drm,
11031 			       "Preparing state failed with %i\n", ret);
11032 		i915_sw_fence_commit(&state->commit_ready);
11033 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
11034 		return ret;
11035 	}
11036 
11037 	ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
11038 	if (!ret)
11039 		ret = drm_atomic_helper_swap_state(&state->base, true);
11040 	if (!ret)
11041 		intel_atomic_swap_global_state(state);
11042 
11043 	if (ret) {
11044 		struct intel_crtc_state *new_crtc_state;
11045 		struct intel_crtc *crtc;
11046 		int i;
11047 
11048 		i915_sw_fence_commit(&state->commit_ready);
11049 
11050 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
11051 			intel_dsb_cleanup(new_crtc_state);
11052 
11053 		drm_atomic_helper_cleanup_planes(dev, &state->base);
11054 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
11055 		return ret;
11056 	}
11057 	intel_shared_dpll_swap_state(state);
11058 	intel_atomic_track_fbs(state);
11059 
11060 	drm_atomic_state_get(&state->base);
11061 	INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
11062 
11063 	i915_sw_fence_commit(&state->commit_ready);
11064 	if (nonblock && state->modeset) {
11065 		queue_work(dev_priv->modeset_wq, &state->base.commit_work);
11066 	} else if (nonblock) {
11067 		queue_work(dev_priv->flip_wq, &state->base.commit_work);
11068 	} else {
11069 		if (state->modeset)
11070 			flush_workqueue(dev_priv->modeset_wq);
11071 		intel_atomic_commit_tail(state);
11072 	}
11073 
11074 	return 0;
11075 }
11076 
11077 struct wait_rps_boost {
11078 	struct wait_queue_entry wait;
11079 
11080 	struct drm_crtc *crtc;
11081 	struct i915_request *request;
11082 };
11083 
11084 static int do_rps_boost(struct wait_queue_entry *_wait,
11085 			unsigned mode, int sync, void *key)
11086 {
11087 	struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
11088 	struct i915_request *rq = wait->request;
11089 
11090 	/*
11091 	 * If we missed the vblank, but the request is already running it
11092 	 * is reasonable to assume that it will complete before the next
11093 	 * vblank without our intervention, so leave RPS alone.
11094 	 */
11095 	if (!i915_request_started(rq))
11096 		intel_rps_boost(rq);
11097 	i915_request_put(rq);
11098 
11099 	drm_crtc_vblank_put(wait->crtc);
11100 
11101 	list_del(&wait->wait.entry);
11102 	kfree(wait);
11103 	return 1;
11104 }
11105 
11106 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
11107 				       struct dma_fence *fence)
11108 {
11109 	struct wait_rps_boost *wait;
11110 
11111 	if (!dma_fence_is_i915(fence))
11112 		return;
11113 
11114 	if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
11115 		return;
11116 
11117 	if (drm_crtc_vblank_get(crtc))
11118 		return;
11119 
11120 	wait = kmalloc(sizeof(*wait), GFP_KERNEL);
11121 	if (!wait) {
11122 		drm_crtc_vblank_put(crtc);
11123 		return;
11124 	}
11125 
11126 	wait->request = to_request(dma_fence_get(fence));
11127 	wait->crtc = crtc;
11128 
11129 	wait->wait.func = do_rps_boost;
11130 	wait->wait.flags = 0;
11131 
11132 	add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
11133 }
11134 
11135 int intel_plane_pin_fb(struct intel_plane_state *plane_state)
11136 {
11137 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11138 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11139 	struct drm_framebuffer *fb = plane_state->hw.fb;
11140 	struct i915_vma *vma;
11141 	bool phys_cursor =
11142 		plane->id == PLANE_CURSOR &&
11143 		INTEL_INFO(dev_priv)->display.cursor_needs_physical;
11144 
11145 	if (!intel_fb_uses_dpt(fb)) {
11146 		vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
11147 						 &plane_state->view.gtt,
11148 						 intel_plane_uses_fence(plane_state),
11149 						 &plane_state->flags);
11150 		if (IS_ERR(vma))
11151 			return PTR_ERR(vma);
11152 
11153 		plane_state->ggtt_vma = vma;
11154 	} else {
11155 		struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11156 
11157 		vma = intel_dpt_pin(intel_fb->dpt_vm);
11158 		if (IS_ERR(vma))
11159 			return PTR_ERR(vma);
11160 
11161 		plane_state->ggtt_vma = vma;
11162 
11163 		vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false,
11164 					   &plane_state->flags, intel_fb->dpt_vm);
11165 		if (IS_ERR(vma)) {
11166 			intel_dpt_unpin(intel_fb->dpt_vm);
11167 			plane_state->ggtt_vma = NULL;
11168 			return PTR_ERR(vma);
11169 		}
11170 
11171 		plane_state->dpt_vma = vma;
11172 
11173 		WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma);
11174 	}
11175 
11176 	return 0;
11177 }
11178 
11179 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
11180 {
11181 	struct drm_framebuffer *fb = old_plane_state->hw.fb;
11182 	struct i915_vma *vma;
11183 
11184 	if (!intel_fb_uses_dpt(fb)) {
11185 		vma = fetch_and_zero(&old_plane_state->ggtt_vma);
11186 		if (vma)
11187 			intel_unpin_fb_vma(vma, old_plane_state->flags);
11188 	} else {
11189 		struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11190 
11191 		vma = fetch_and_zero(&old_plane_state->dpt_vma);
11192 		if (vma)
11193 			intel_unpin_fb_vma(vma, old_plane_state->flags);
11194 
11195 		vma = fetch_and_zero(&old_plane_state->ggtt_vma);
11196 		if (vma)
11197 			intel_dpt_unpin(intel_fb->dpt_vm);
11198 	}
11199 }
11200 
11201 /**
11202  * intel_prepare_plane_fb - Prepare fb for usage on plane
11203  * @_plane: drm plane to prepare for
11204  * @_new_plane_state: the plane state being prepared
11205  *
11206  * Prepares a framebuffer for usage on a display plane.  Generally this
11207  * involves pinning the underlying object and updating the frontbuffer tracking
11208  * bits.  Some older platforms need special physical address handling for
11209  * cursor planes.
11210  *
11211  * Returns 0 on success, negative error code on failure.
11212  */
11213 int
11214 intel_prepare_plane_fb(struct drm_plane *_plane,
11215 		       struct drm_plane_state *_new_plane_state)
11216 {
11217 	struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
11218 	struct intel_plane *plane = to_intel_plane(_plane);
11219 	struct intel_plane_state *new_plane_state =
11220 		to_intel_plane_state(_new_plane_state);
11221 	struct intel_atomic_state *state =
11222 		to_intel_atomic_state(new_plane_state->uapi.state);
11223 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11224 	const struct intel_plane_state *old_plane_state =
11225 		intel_atomic_get_old_plane_state(state, plane);
11226 	struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
11227 	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
11228 	int ret;
11229 
11230 	if (old_obj) {
11231 		const struct intel_crtc_state *crtc_state =
11232 			intel_atomic_get_new_crtc_state(state,
11233 							to_intel_crtc(old_plane_state->hw.crtc));
11234 
11235 		/* Big Hammer, we also need to ensure that any pending
11236 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
11237 		 * current scanout is retired before unpinning the old
11238 		 * framebuffer. Note that we rely on userspace rendering
11239 		 * into the buffer attached to the pipe they are waiting
11240 		 * on. If not, userspace generates a GPU hang with IPEHR
11241 		 * point to the MI_WAIT_FOR_EVENT.
11242 		 *
11243 		 * This should only fail upon a hung GPU, in which case we
11244 		 * can safely continue.
11245 		 */
11246 		if (intel_crtc_needs_modeset(crtc_state)) {
11247 			ret = i915_sw_fence_await_reservation(&state->commit_ready,
11248 							      old_obj->base.resv, NULL,
11249 							      false, 0,
11250 							      GFP_KERNEL);
11251 			if (ret < 0)
11252 				return ret;
11253 		}
11254 	}
11255 
11256 	if (new_plane_state->uapi.fence) { /* explicit fencing */
11257 		i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
11258 					     &attr);
11259 		ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
11260 						    new_plane_state->uapi.fence,
11261 						    i915_fence_timeout(dev_priv),
11262 						    GFP_KERNEL);
11263 		if (ret < 0)
11264 			return ret;
11265 	}
11266 
11267 	if (!obj)
11268 		return 0;
11269 
11270 
11271 	ret = intel_plane_pin_fb(new_plane_state);
11272 	if (ret)
11273 		return ret;
11274 
11275 	i915_gem_object_wait_priority(obj, 0, &attr);
11276 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
11277 
11278 	if (!new_plane_state->uapi.fence) { /* implicit fencing */
11279 		struct dma_fence *fence;
11280 
11281 		ret = i915_sw_fence_await_reservation(&state->commit_ready,
11282 						      obj->base.resv, NULL,
11283 						      false,
11284 						      i915_fence_timeout(dev_priv),
11285 						      GFP_KERNEL);
11286 		if (ret < 0)
11287 			goto unpin_fb;
11288 
11289 		fence = dma_resv_get_excl_unlocked(obj->base.resv);
11290 		if (fence) {
11291 			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
11292 						   fence);
11293 			dma_fence_put(fence);
11294 		}
11295 	} else {
11296 		add_rps_boost_after_vblank(new_plane_state->hw.crtc,
11297 					   new_plane_state->uapi.fence);
11298 	}
11299 
11300 	/*
11301 	 * We declare pageflips to be interactive and so merit a small bias
11302 	 * towards upclocking to deliver the frame on time. By only changing
11303 	 * the RPS thresholds to sample more regularly and aim for higher
11304 	 * clocks we can hopefully deliver low power workloads (like kodi)
11305 	 * that are not quite steady state without resorting to forcing
11306 	 * maximum clocks following a vblank miss (see do_rps_boost()).
11307 	 */
11308 	if (!state->rps_interactive) {
11309 		intel_rps_mark_interactive(&dev_priv->gt.rps, true);
11310 		state->rps_interactive = true;
11311 	}
11312 
11313 	return 0;
11314 
11315 unpin_fb:
11316 	intel_plane_unpin_fb(new_plane_state);
11317 
11318 	return ret;
11319 }
11320 
11321 /**
11322  * intel_cleanup_plane_fb - Cleans up an fb after plane use
11323  * @plane: drm plane to clean up for
11324  * @_old_plane_state: the state from the previous modeset
11325  *
11326  * Cleans up a framebuffer that has just been removed from a plane.
11327  */
11328 void
11329 intel_cleanup_plane_fb(struct drm_plane *plane,
11330 		       struct drm_plane_state *_old_plane_state)
11331 {
11332 	struct intel_plane_state *old_plane_state =
11333 		to_intel_plane_state(_old_plane_state);
11334 	struct intel_atomic_state *state =
11335 		to_intel_atomic_state(old_plane_state->uapi.state);
11336 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
11337 	struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
11338 
11339 	if (!obj)
11340 		return;
11341 
11342 	if (state->rps_interactive) {
11343 		intel_rps_mark_interactive(&dev_priv->gt.rps, false);
11344 		state->rps_interactive = false;
11345 	}
11346 
11347 	/* Should only be called after a successful intel_prepare_plane_fb()! */
11348 	intel_plane_unpin_fb(old_plane_state);
11349 }
11350 
11351 /**
11352  * intel_plane_destroy - destroy a plane
11353  * @plane: plane to destroy
11354  *
11355  * Common destruction function for all types of planes (primary, cursor,
11356  * sprite).
11357  */
11358 void intel_plane_destroy(struct drm_plane *plane)
11359 {
11360 	drm_plane_cleanup(plane);
11361 	kfree(to_intel_plane(plane));
11362 }
11363 
11364 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
11365 {
11366 	struct intel_plane *plane;
11367 
11368 	for_each_intel_plane(&dev_priv->drm, plane) {
11369 		struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
11370 								  plane->pipe);
11371 
11372 		plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
11373 	}
11374 }
11375 
11376 
11377 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
11378 				      struct drm_file *file)
11379 {
11380 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
11381 	struct drm_crtc *drmmode_crtc;
11382 	struct intel_crtc *crtc;
11383 
11384 	drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
11385 	if (!drmmode_crtc)
11386 		return -ENOENT;
11387 
11388 	crtc = to_intel_crtc(drmmode_crtc);
11389 	pipe_from_crtc_id->pipe = crtc->pipe;
11390 
11391 	return 0;
11392 }
11393 
11394 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
11395 {
11396 	struct drm_device *dev = encoder->base.dev;
11397 	struct intel_encoder *source_encoder;
11398 	u32 possible_clones = 0;
11399 
11400 	for_each_intel_encoder(dev, source_encoder) {
11401 		if (encoders_cloneable(encoder, source_encoder))
11402 			possible_clones |= drm_encoder_mask(&source_encoder->base);
11403 	}
11404 
11405 	return possible_clones;
11406 }
11407 
11408 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
11409 {
11410 	struct drm_device *dev = encoder->base.dev;
11411 	struct intel_crtc *crtc;
11412 	u32 possible_crtcs = 0;
11413 
11414 	for_each_intel_crtc(dev, crtc) {
11415 		if (encoder->pipe_mask & BIT(crtc->pipe))
11416 			possible_crtcs |= drm_crtc_mask(&crtc->base);
11417 	}
11418 
11419 	return possible_crtcs;
11420 }
11421 
11422 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
11423 {
11424 	if (!IS_MOBILE(dev_priv))
11425 		return false;
11426 
11427 	if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
11428 		return false;
11429 
11430 	if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
11431 		return false;
11432 
11433 	return true;
11434 }
11435 
11436 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
11437 {
11438 	if (DISPLAY_VER(dev_priv) >= 9)
11439 		return false;
11440 
11441 	if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
11442 		return false;
11443 
11444 	if (HAS_PCH_LPT_H(dev_priv) &&
11445 	    intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
11446 		return false;
11447 
11448 	/* DDI E can't be used if DDI A requires 4 lanes */
11449 	if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
11450 		return false;
11451 
11452 	if (!dev_priv->vbt.int_crt_support)
11453 		return false;
11454 
11455 	return true;
11456 }
11457 
11458 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
11459 {
11460 	struct intel_encoder *encoder;
11461 	bool dpd_is_edp = false;
11462 
11463 	intel_pps_unlock_regs_wa(dev_priv);
11464 
11465 	if (!HAS_DISPLAY(dev_priv))
11466 		return;
11467 
11468 	if (IS_DG2(dev_priv)) {
11469 		intel_ddi_init(dev_priv, PORT_A);
11470 		intel_ddi_init(dev_priv, PORT_B);
11471 		intel_ddi_init(dev_priv, PORT_C);
11472 		intel_ddi_init(dev_priv, PORT_D_XELPD);
11473 	} else if (IS_ALDERLAKE_P(dev_priv)) {
11474 		intel_ddi_init(dev_priv, PORT_A);
11475 		intel_ddi_init(dev_priv, PORT_B);
11476 		intel_ddi_init(dev_priv, PORT_TC1);
11477 		intel_ddi_init(dev_priv, PORT_TC2);
11478 		intel_ddi_init(dev_priv, PORT_TC3);
11479 		intel_ddi_init(dev_priv, PORT_TC4);
11480 	} else if (IS_ALDERLAKE_S(dev_priv)) {
11481 		intel_ddi_init(dev_priv, PORT_A);
11482 		intel_ddi_init(dev_priv, PORT_TC1);
11483 		intel_ddi_init(dev_priv, PORT_TC2);
11484 		intel_ddi_init(dev_priv, PORT_TC3);
11485 		intel_ddi_init(dev_priv, PORT_TC4);
11486 	} else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
11487 		intel_ddi_init(dev_priv, PORT_A);
11488 		intel_ddi_init(dev_priv, PORT_B);
11489 		intel_ddi_init(dev_priv, PORT_TC1);
11490 		intel_ddi_init(dev_priv, PORT_TC2);
11491 	} else if (DISPLAY_VER(dev_priv) >= 12) {
11492 		intel_ddi_init(dev_priv, PORT_A);
11493 		intel_ddi_init(dev_priv, PORT_B);
11494 		intel_ddi_init(dev_priv, PORT_TC1);
11495 		intel_ddi_init(dev_priv, PORT_TC2);
11496 		intel_ddi_init(dev_priv, PORT_TC3);
11497 		intel_ddi_init(dev_priv, PORT_TC4);
11498 		intel_ddi_init(dev_priv, PORT_TC5);
11499 		intel_ddi_init(dev_priv, PORT_TC6);
11500 		icl_dsi_init(dev_priv);
11501 	} else if (IS_JSL_EHL(dev_priv)) {
11502 		intel_ddi_init(dev_priv, PORT_A);
11503 		intel_ddi_init(dev_priv, PORT_B);
11504 		intel_ddi_init(dev_priv, PORT_C);
11505 		intel_ddi_init(dev_priv, PORT_D);
11506 		icl_dsi_init(dev_priv);
11507 	} else if (DISPLAY_VER(dev_priv) == 11) {
11508 		intel_ddi_init(dev_priv, PORT_A);
11509 		intel_ddi_init(dev_priv, PORT_B);
11510 		intel_ddi_init(dev_priv, PORT_C);
11511 		intel_ddi_init(dev_priv, PORT_D);
11512 		intel_ddi_init(dev_priv, PORT_E);
11513 		intel_ddi_init(dev_priv, PORT_F);
11514 		icl_dsi_init(dev_priv);
11515 	} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
11516 		intel_ddi_init(dev_priv, PORT_A);
11517 		intel_ddi_init(dev_priv, PORT_B);
11518 		intel_ddi_init(dev_priv, PORT_C);
11519 		vlv_dsi_init(dev_priv);
11520 	} else if (DISPLAY_VER(dev_priv) >= 9) {
11521 		intel_ddi_init(dev_priv, PORT_A);
11522 		intel_ddi_init(dev_priv, PORT_B);
11523 		intel_ddi_init(dev_priv, PORT_C);
11524 		intel_ddi_init(dev_priv, PORT_D);
11525 		intel_ddi_init(dev_priv, PORT_E);
11526 	} else if (HAS_DDI(dev_priv)) {
11527 		u32 found;
11528 
11529 		if (intel_ddi_crt_present(dev_priv))
11530 			intel_crt_init(dev_priv);
11531 
11532 		/* Haswell uses DDI functions to detect digital outputs. */
11533 		found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
11534 		if (found)
11535 			intel_ddi_init(dev_priv, PORT_A);
11536 
11537 		found = intel_de_read(dev_priv, SFUSE_STRAP);
11538 		if (found & SFUSE_STRAP_DDIB_DETECTED)
11539 			intel_ddi_init(dev_priv, PORT_B);
11540 		if (found & SFUSE_STRAP_DDIC_DETECTED)
11541 			intel_ddi_init(dev_priv, PORT_C);
11542 		if (found & SFUSE_STRAP_DDID_DETECTED)
11543 			intel_ddi_init(dev_priv, PORT_D);
11544 		if (found & SFUSE_STRAP_DDIF_DETECTED)
11545 			intel_ddi_init(dev_priv, PORT_F);
11546 	} else if (HAS_PCH_SPLIT(dev_priv)) {
11547 		int found;
11548 
11549 		/*
11550 		 * intel_edp_init_connector() depends on this completing first,
11551 		 * to prevent the registration of both eDP and LVDS and the
11552 		 * incorrect sharing of the PPS.
11553 		 */
11554 		intel_lvds_init(dev_priv);
11555 		intel_crt_init(dev_priv);
11556 
11557 		dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
11558 
11559 		if (ilk_has_edp_a(dev_priv))
11560 			g4x_dp_init(dev_priv, DP_A, PORT_A);
11561 
11562 		if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
11563 			/* PCH SDVOB multiplex with HDMIB */
11564 			found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
11565 			if (!found)
11566 				g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
11567 			if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
11568 				g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
11569 		}
11570 
11571 		if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
11572 			g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
11573 
11574 		if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
11575 			g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
11576 
11577 		if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
11578 			g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
11579 
11580 		if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
11581 			g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
11582 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
11583 		bool has_edp, has_port;
11584 
11585 		if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
11586 			intel_crt_init(dev_priv);
11587 
11588 		/*
11589 		 * The DP_DETECTED bit is the latched state of the DDC
11590 		 * SDA pin at boot. However since eDP doesn't require DDC
11591 		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
11592 		 * eDP ports may have been muxed to an alternate function.
11593 		 * Thus we can't rely on the DP_DETECTED bit alone to detect
11594 		 * eDP ports. Consult the VBT as well as DP_DETECTED to
11595 		 * detect eDP ports.
11596 		 *
11597 		 * Sadly the straps seem to be missing sometimes even for HDMI
11598 		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
11599 		 * and VBT for the presence of the port. Additionally we can't
11600 		 * trust the port type the VBT declares as we've seen at least
11601 		 * HDMI ports that the VBT claim are DP or eDP.
11602 		 */
11603 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
11604 		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
11605 		if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
11606 			has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
11607 		if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
11608 			g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
11609 
11610 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
11611 		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
11612 		if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
11613 			has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
11614 		if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
11615 			g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
11616 
11617 		if (IS_CHERRYVIEW(dev_priv)) {
11618 			/*
11619 			 * eDP not supported on port D,
11620 			 * so no need to worry about it
11621 			 */
11622 			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
11623 			if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
11624 				g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
11625 			if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
11626 				g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
11627 		}
11628 
11629 		vlv_dsi_init(dev_priv);
11630 	} else if (IS_PINEVIEW(dev_priv)) {
11631 		intel_lvds_init(dev_priv);
11632 		intel_crt_init(dev_priv);
11633 	} else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
11634 		bool found = false;
11635 
11636 		if (IS_MOBILE(dev_priv))
11637 			intel_lvds_init(dev_priv);
11638 
11639 		intel_crt_init(dev_priv);
11640 
11641 		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11642 			drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
11643 			found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
11644 			if (!found && IS_G4X(dev_priv)) {
11645 				drm_dbg_kms(&dev_priv->drm,
11646 					    "probing HDMI on SDVOB\n");
11647 				g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
11648 			}
11649 
11650 			if (!found && IS_G4X(dev_priv))
11651 				g4x_dp_init(dev_priv, DP_B, PORT_B);
11652 		}
11653 
11654 		/* Before G4X SDVOC doesn't have its own detect register */
11655 
11656 		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11657 			drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
11658 			found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
11659 		}
11660 
11661 		if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
11662 
11663 			if (IS_G4X(dev_priv)) {
11664 				drm_dbg_kms(&dev_priv->drm,
11665 					    "probing HDMI on SDVOC\n");
11666 				g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
11667 			}
11668 			if (IS_G4X(dev_priv))
11669 				g4x_dp_init(dev_priv, DP_C, PORT_C);
11670 		}
11671 
11672 		if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
11673 			g4x_dp_init(dev_priv, DP_D, PORT_D);
11674 
11675 		if (SUPPORTS_TV(dev_priv))
11676 			intel_tv_init(dev_priv);
11677 	} else if (DISPLAY_VER(dev_priv) == 2) {
11678 		if (IS_I85X(dev_priv))
11679 			intel_lvds_init(dev_priv);
11680 
11681 		intel_crt_init(dev_priv);
11682 		intel_dvo_init(dev_priv);
11683 	}
11684 
11685 	for_each_intel_encoder(&dev_priv->drm, encoder) {
11686 		encoder->base.possible_crtcs =
11687 			intel_encoder_possible_crtcs(encoder);
11688 		encoder->base.possible_clones =
11689 			intel_encoder_possible_clones(encoder);
11690 	}
11691 
11692 	intel_init_pch_refclk(dev_priv);
11693 
11694 	drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
11695 }
11696 
11697 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
11698 {
11699 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11700 
11701 	drm_framebuffer_cleanup(fb);
11702 
11703 	if (intel_fb_uses_dpt(fb))
11704 		intel_dpt_destroy(intel_fb->dpt_vm);
11705 
11706 	intel_frontbuffer_put(intel_fb->frontbuffer);
11707 
11708 	kfree(intel_fb);
11709 }
11710 
11711 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
11712 						struct drm_file *file,
11713 						unsigned int *handle)
11714 {
11715 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11716 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
11717 
11718 	if (i915_gem_object_is_userptr(obj)) {
11719 		drm_dbg(&i915->drm,
11720 			"attempting to use a userptr for a framebuffer, denied\n");
11721 		return -EINVAL;
11722 	}
11723 
11724 	return drm_gem_handle_create(file, &obj->base, handle);
11725 }
11726 
11727 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
11728 					struct drm_file *file,
11729 					unsigned flags, unsigned color,
11730 					struct drm_clip_rect *clips,
11731 					unsigned num_clips)
11732 {
11733 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11734 
11735 	i915_gem_object_flush_if_display(obj);
11736 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
11737 
11738 	return 0;
11739 }
11740 
11741 static const struct drm_framebuffer_funcs intel_fb_funcs = {
11742 	.destroy = intel_user_framebuffer_destroy,
11743 	.create_handle = intel_user_framebuffer_create_handle,
11744 	.dirty = intel_user_framebuffer_dirty,
11745 };
11746 
11747 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
11748 				  struct drm_i915_gem_object *obj,
11749 				  struct drm_mode_fb_cmd2 *mode_cmd)
11750 {
11751 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
11752 	struct drm_framebuffer *fb = &intel_fb->base;
11753 	u32 max_stride;
11754 	unsigned int tiling, stride;
11755 	int ret = -EINVAL;
11756 	int i;
11757 
11758 	intel_fb->frontbuffer = intel_frontbuffer_get(obj);
11759 	if (!intel_fb->frontbuffer)
11760 		return -ENOMEM;
11761 
11762 	i915_gem_object_lock(obj, NULL);
11763 	tiling = i915_gem_object_get_tiling(obj);
11764 	stride = i915_gem_object_get_stride(obj);
11765 	i915_gem_object_unlock(obj);
11766 
11767 	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
11768 		/*
11769 		 * If there's a fence, enforce that
11770 		 * the fb modifier and tiling mode match.
11771 		 */
11772 		if (tiling != I915_TILING_NONE &&
11773 		    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
11774 			drm_dbg_kms(&dev_priv->drm,
11775 				    "tiling_mode doesn't match fb modifier\n");
11776 			goto err;
11777 		}
11778 	} else {
11779 		if (tiling == I915_TILING_X) {
11780 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
11781 		} else if (tiling == I915_TILING_Y) {
11782 			drm_dbg_kms(&dev_priv->drm,
11783 				    "No Y tiling for legacy addfb\n");
11784 			goto err;
11785 		}
11786 	}
11787 
11788 	if (!drm_any_plane_has_format(&dev_priv->drm,
11789 				      mode_cmd->pixel_format,
11790 				      mode_cmd->modifier[0])) {
11791 		drm_dbg_kms(&dev_priv->drm,
11792 			    "unsupported pixel format %p4cc / modifier 0x%llx\n",
11793 			    &mode_cmd->pixel_format, mode_cmd->modifier[0]);
11794 		goto err;
11795 	}
11796 
11797 	/*
11798 	 * gen2/3 display engine uses the fence if present,
11799 	 * so the tiling mode must match the fb modifier exactly.
11800 	 */
11801 	if (DISPLAY_VER(dev_priv) < 4 &&
11802 	    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
11803 		drm_dbg_kms(&dev_priv->drm,
11804 			    "tiling_mode must match fb modifier exactly on gen2/3\n");
11805 		goto err;
11806 	}
11807 
11808 	max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
11809 					 mode_cmd->modifier[0]);
11810 	if (mode_cmd->pitches[0] > max_stride) {
11811 		drm_dbg_kms(&dev_priv->drm,
11812 			    "%s pitch (%u) must be at most %d\n",
11813 			    mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
11814 			    "tiled" : "linear",
11815 			    mode_cmd->pitches[0], max_stride);
11816 		goto err;
11817 	}
11818 
11819 	/*
11820 	 * If there's a fence, enforce that
11821 	 * the fb pitch and fence stride match.
11822 	 */
11823 	if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
11824 		drm_dbg_kms(&dev_priv->drm,
11825 			    "pitch (%d) must match tiling stride (%d)\n",
11826 			    mode_cmd->pitches[0], stride);
11827 		goto err;
11828 	}
11829 
11830 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
11831 	if (mode_cmd->offsets[0] != 0) {
11832 		drm_dbg_kms(&dev_priv->drm,
11833 			    "plane 0 offset (0x%08x) must be 0\n",
11834 			    mode_cmd->offsets[0]);
11835 		goto err;
11836 	}
11837 
11838 	drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
11839 
11840 	for (i = 0; i < fb->format->num_planes; i++) {
11841 		u32 stride_alignment;
11842 
11843 		if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
11844 			drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
11845 				    i);
11846 			goto err;
11847 		}
11848 
11849 		stride_alignment = intel_fb_stride_alignment(fb, i);
11850 		if (fb->pitches[i] & (stride_alignment - 1)) {
11851 			drm_dbg_kms(&dev_priv->drm,
11852 				    "plane %d pitch (%d) must be at least %u byte aligned\n",
11853 				    i, fb->pitches[i], stride_alignment);
11854 			goto err;
11855 		}
11856 
11857 		if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
11858 			int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
11859 
11860 			if (fb->pitches[i] != ccs_aux_stride) {
11861 				drm_dbg_kms(&dev_priv->drm,
11862 					    "ccs aux plane %d pitch (%d) must be %d\n",
11863 					    i,
11864 					    fb->pitches[i], ccs_aux_stride);
11865 				goto err;
11866 			}
11867 		}
11868 
11869 		/* TODO: Add POT stride remapping support for CCS formats as well. */
11870 		if (IS_ALDERLAKE_P(dev_priv) &&
11871 		    mode_cmd->modifier[i] != DRM_FORMAT_MOD_LINEAR &&
11872 		    !intel_fb_needs_pot_stride_remap(intel_fb) &&
11873 		    !is_power_of_2(mode_cmd->pitches[i])) {
11874 			drm_dbg_kms(&dev_priv->drm,
11875 				    "plane %d pitch (%d) must be power of two for tiled buffers\n",
11876 				    i, mode_cmd->pitches[i]);
11877 			goto err;
11878 		}
11879 
11880 		fb->obj[i] = &obj->base;
11881 	}
11882 
11883 	ret = intel_fill_fb_info(dev_priv, intel_fb);
11884 	if (ret)
11885 		goto err;
11886 
11887 	if (intel_fb_uses_dpt(fb)) {
11888 		struct i915_address_space *vm;
11889 
11890 		vm = intel_dpt_create(intel_fb);
11891 		if (IS_ERR(vm)) {
11892 			ret = PTR_ERR(vm);
11893 			goto err;
11894 		}
11895 
11896 		intel_fb->dpt_vm = vm;
11897 	}
11898 
11899 	ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
11900 	if (ret) {
11901 		drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
11902 		goto err;
11903 	}
11904 
11905 	return 0;
11906 
11907 err:
11908 	intel_frontbuffer_put(intel_fb->frontbuffer);
11909 	return ret;
11910 }
11911 
11912 static struct drm_framebuffer *
11913 intel_user_framebuffer_create(struct drm_device *dev,
11914 			      struct drm_file *filp,
11915 			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
11916 {
11917 	struct drm_framebuffer *fb;
11918 	struct drm_i915_gem_object *obj;
11919 	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
11920 	struct drm_i915_private *i915;
11921 
11922 	obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
11923 	if (!obj)
11924 		return ERR_PTR(-ENOENT);
11925 
11926 	/* object is backed with LMEM for discrete */
11927 	i915 = to_i915(obj->base.dev);
11928 	if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM)) {
11929 		/* object is "remote", not in local memory */
11930 		i915_gem_object_put(obj);
11931 		return ERR_PTR(-EREMOTE);
11932 	}
11933 
11934 	fb = intel_framebuffer_create(obj, &mode_cmd);
11935 	i915_gem_object_put(obj);
11936 
11937 	return fb;
11938 }
11939 
11940 static enum drm_mode_status
11941 intel_mode_valid(struct drm_device *dev,
11942 		 const struct drm_display_mode *mode)
11943 {
11944 	struct drm_i915_private *dev_priv = to_i915(dev);
11945 	int hdisplay_max, htotal_max;
11946 	int vdisplay_max, vtotal_max;
11947 
11948 	/*
11949 	 * Can't reject DBLSCAN here because Xorg ddxen can add piles
11950 	 * of DBLSCAN modes to the output's mode list when they detect
11951 	 * the scaling mode property on the connector. And they don't
11952 	 * ask the kernel to validate those modes in any way until
11953 	 * modeset time at which point the client gets a protocol error.
11954 	 * So in order to not upset those clients we silently ignore the
11955 	 * DBLSCAN flag on such connectors. For other connectors we will
11956 	 * reject modes with the DBLSCAN flag in encoder->compute_config().
11957 	 * And we always reject DBLSCAN modes in connector->mode_valid()
11958 	 * as we never want such modes on the connector's mode list.
11959 	 */
11960 
11961 	if (mode->vscan > 1)
11962 		return MODE_NO_VSCAN;
11963 
11964 	if (mode->flags & DRM_MODE_FLAG_HSKEW)
11965 		return MODE_H_ILLEGAL;
11966 
11967 	if (mode->flags & (DRM_MODE_FLAG_CSYNC |
11968 			   DRM_MODE_FLAG_NCSYNC |
11969 			   DRM_MODE_FLAG_PCSYNC))
11970 		return MODE_HSYNC;
11971 
11972 	if (mode->flags & (DRM_MODE_FLAG_BCAST |
11973 			   DRM_MODE_FLAG_PIXMUX |
11974 			   DRM_MODE_FLAG_CLKDIV2))
11975 		return MODE_BAD;
11976 
11977 	/* Transcoder timing limits */
11978 	if (DISPLAY_VER(dev_priv) >= 11) {
11979 		hdisplay_max = 16384;
11980 		vdisplay_max = 8192;
11981 		htotal_max = 16384;
11982 		vtotal_max = 8192;
11983 	} else if (DISPLAY_VER(dev_priv) >= 9 ||
11984 		   IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
11985 		hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
11986 		vdisplay_max = 4096;
11987 		htotal_max = 8192;
11988 		vtotal_max = 8192;
11989 	} else if (DISPLAY_VER(dev_priv) >= 3) {
11990 		hdisplay_max = 4096;
11991 		vdisplay_max = 4096;
11992 		htotal_max = 8192;
11993 		vtotal_max = 8192;
11994 	} else {
11995 		hdisplay_max = 2048;
11996 		vdisplay_max = 2048;
11997 		htotal_max = 4096;
11998 		vtotal_max = 4096;
11999 	}
12000 
12001 	if (mode->hdisplay > hdisplay_max ||
12002 	    mode->hsync_start > htotal_max ||
12003 	    mode->hsync_end > htotal_max ||
12004 	    mode->htotal > htotal_max)
12005 		return MODE_H_ILLEGAL;
12006 
12007 	if (mode->vdisplay > vdisplay_max ||
12008 	    mode->vsync_start > vtotal_max ||
12009 	    mode->vsync_end > vtotal_max ||
12010 	    mode->vtotal > vtotal_max)
12011 		return MODE_V_ILLEGAL;
12012 
12013 	if (DISPLAY_VER(dev_priv) >= 5) {
12014 		if (mode->hdisplay < 64 ||
12015 		    mode->htotal - mode->hdisplay < 32)
12016 			return MODE_H_ILLEGAL;
12017 
12018 		if (mode->vtotal - mode->vdisplay < 5)
12019 			return MODE_V_ILLEGAL;
12020 	} else {
12021 		if (mode->htotal - mode->hdisplay < 32)
12022 			return MODE_H_ILLEGAL;
12023 
12024 		if (mode->vtotal - mode->vdisplay < 3)
12025 			return MODE_V_ILLEGAL;
12026 	}
12027 
12028 	return MODE_OK;
12029 }
12030 
12031 enum drm_mode_status
12032 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
12033 				const struct drm_display_mode *mode,
12034 				bool bigjoiner)
12035 {
12036 	int plane_width_max, plane_height_max;
12037 
12038 	/*
12039 	 * intel_mode_valid() should be
12040 	 * sufficient on older platforms.
12041 	 */
12042 	if (DISPLAY_VER(dev_priv) < 9)
12043 		return MODE_OK;
12044 
12045 	/*
12046 	 * Most people will probably want a fullscreen
12047 	 * plane so let's not advertize modes that are
12048 	 * too big for that.
12049 	 */
12050 	if (DISPLAY_VER(dev_priv) >= 11) {
12051 		plane_width_max = 5120 << bigjoiner;
12052 		plane_height_max = 4320;
12053 	} else {
12054 		plane_width_max = 5120;
12055 		plane_height_max = 4096;
12056 	}
12057 
12058 	if (mode->hdisplay > plane_width_max)
12059 		return MODE_H_ILLEGAL;
12060 
12061 	if (mode->vdisplay > plane_height_max)
12062 		return MODE_V_ILLEGAL;
12063 
12064 	return MODE_OK;
12065 }
12066 
12067 static const struct drm_mode_config_funcs intel_mode_funcs = {
12068 	.fb_create = intel_user_framebuffer_create,
12069 	.get_format_info = intel_get_format_info,
12070 	.output_poll_changed = intel_fbdev_output_poll_changed,
12071 	.mode_valid = intel_mode_valid,
12072 	.atomic_check = intel_atomic_check,
12073 	.atomic_commit = intel_atomic_commit,
12074 	.atomic_state_alloc = intel_atomic_state_alloc,
12075 	.atomic_state_clear = intel_atomic_state_clear,
12076 	.atomic_state_free = intel_atomic_state_free,
12077 };
12078 
12079 /**
12080  * intel_init_display_hooks - initialize the display modesetting hooks
12081  * @dev_priv: device private
12082  */
12083 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
12084 {
12085 	if (!HAS_DISPLAY(dev_priv))
12086 		return;
12087 
12088 	intel_init_cdclk_hooks(dev_priv);
12089 	intel_init_audio_hooks(dev_priv);
12090 
12091 	intel_dpll_init_clock_hook(dev_priv);
12092 
12093 	if (DISPLAY_VER(dev_priv) >= 9) {
12094 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
12095 		dev_priv->display.crtc_enable = hsw_crtc_enable;
12096 		dev_priv->display.crtc_disable = hsw_crtc_disable;
12097 	} else if (HAS_DDI(dev_priv)) {
12098 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
12099 		dev_priv->display.crtc_enable = hsw_crtc_enable;
12100 		dev_priv->display.crtc_disable = hsw_crtc_disable;
12101 	} else if (HAS_PCH_SPLIT(dev_priv)) {
12102 		dev_priv->display.get_pipe_config = ilk_get_pipe_config;
12103 		dev_priv->display.crtc_enable = ilk_crtc_enable;
12104 		dev_priv->display.crtc_disable = ilk_crtc_disable;
12105 	} else if (IS_CHERRYVIEW(dev_priv) ||
12106 		   IS_VALLEYVIEW(dev_priv)) {
12107 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12108 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
12109 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
12110 	} else {
12111 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12112 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
12113 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
12114 	}
12115 
12116 	intel_fdi_init_hook(dev_priv);
12117 
12118 	if (DISPLAY_VER(dev_priv) >= 9) {
12119 		dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
12120 		dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
12121 	} else {
12122 		dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
12123 		dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
12124 	}
12125 
12126 }
12127 
12128 void intel_modeset_init_hw(struct drm_i915_private *i915)
12129 {
12130 	struct intel_cdclk_state *cdclk_state;
12131 
12132 	if (!HAS_DISPLAY(i915))
12133 		return;
12134 
12135 	cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
12136 
12137 	intel_update_cdclk(i915);
12138 	intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
12139 	cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
12140 }
12141 
12142 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
12143 {
12144 	struct drm_plane *plane;
12145 	struct intel_crtc *crtc;
12146 
12147 	for_each_intel_crtc(state->dev, crtc) {
12148 		struct intel_crtc_state *crtc_state;
12149 
12150 		crtc_state = intel_atomic_get_crtc_state(state, crtc);
12151 		if (IS_ERR(crtc_state))
12152 			return PTR_ERR(crtc_state);
12153 
12154 		if (crtc_state->hw.active) {
12155 			/*
12156 			 * Preserve the inherited flag to avoid
12157 			 * taking the full modeset path.
12158 			 */
12159 			crtc_state->inherited = true;
12160 		}
12161 	}
12162 
12163 	drm_for_each_plane(plane, state->dev) {
12164 		struct drm_plane_state *plane_state;
12165 
12166 		plane_state = drm_atomic_get_plane_state(state, plane);
12167 		if (IS_ERR(plane_state))
12168 			return PTR_ERR(plane_state);
12169 	}
12170 
12171 	return 0;
12172 }
12173 
12174 /*
12175  * Calculate what we think the watermarks should be for the state we've read
12176  * out of the hardware and then immediately program those watermarks so that
12177  * we ensure the hardware settings match our internal state.
12178  *
12179  * We can calculate what we think WM's should be by creating a duplicate of the
12180  * current state (which was constructed during hardware readout) and running it
12181  * through the atomic check code to calculate new watermark values in the
12182  * state object.
12183  */
12184 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
12185 {
12186 	struct drm_atomic_state *state;
12187 	struct intel_atomic_state *intel_state;
12188 	struct intel_crtc *crtc;
12189 	struct intel_crtc_state *crtc_state;
12190 	struct drm_modeset_acquire_ctx ctx;
12191 	int ret;
12192 	int i;
12193 
12194 	/* Only supported on platforms that use atomic watermark design */
12195 	if (!dev_priv->display.optimize_watermarks)
12196 		return;
12197 
12198 	state = drm_atomic_state_alloc(&dev_priv->drm);
12199 	if (drm_WARN_ON(&dev_priv->drm, !state))
12200 		return;
12201 
12202 	intel_state = to_intel_atomic_state(state);
12203 
12204 	drm_modeset_acquire_init(&ctx, 0);
12205 
12206 retry:
12207 	state->acquire_ctx = &ctx;
12208 
12209 	/*
12210 	 * Hardware readout is the only time we don't want to calculate
12211 	 * intermediate watermarks (since we don't trust the current
12212 	 * watermarks).
12213 	 */
12214 	if (!HAS_GMCH(dev_priv))
12215 		intel_state->skip_intermediate_wm = true;
12216 
12217 	ret = sanitize_watermarks_add_affected(state);
12218 	if (ret)
12219 		goto fail;
12220 
12221 	ret = intel_atomic_check(&dev_priv->drm, state);
12222 	if (ret)
12223 		goto fail;
12224 
12225 	/* Write calculated watermark values back */
12226 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
12227 		crtc_state->wm.need_postvbl_update = true;
12228 		dev_priv->display.optimize_watermarks(intel_state, crtc);
12229 
12230 		to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
12231 	}
12232 
12233 fail:
12234 	if (ret == -EDEADLK) {
12235 		drm_atomic_state_clear(state);
12236 		drm_modeset_backoff(&ctx);
12237 		goto retry;
12238 	}
12239 
12240 	/*
12241 	 * If we fail here, it means that the hardware appears to be
12242 	 * programmed in a way that shouldn't be possible, given our
12243 	 * understanding of watermark requirements.  This might mean a
12244 	 * mistake in the hardware readout code or a mistake in the
12245 	 * watermark calculations for a given platform.  Raise a WARN
12246 	 * so that this is noticeable.
12247 	 *
12248 	 * If this actually happens, we'll have to just leave the
12249 	 * BIOS-programmed watermarks untouched and hope for the best.
12250 	 */
12251 	drm_WARN(&dev_priv->drm, ret,
12252 		 "Could not determine valid watermarks for inherited state\n");
12253 
12254 	drm_atomic_state_put(state);
12255 
12256 	drm_modeset_drop_locks(&ctx);
12257 	drm_modeset_acquire_fini(&ctx);
12258 }
12259 
12260 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
12261 {
12262 	if (IS_IRONLAKE(dev_priv)) {
12263 		u32 fdi_pll_clk =
12264 			intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
12265 
12266 		dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
12267 	} else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
12268 		dev_priv->fdi_pll_freq = 270000;
12269 	} else {
12270 		return;
12271 	}
12272 
12273 	drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
12274 }
12275 
12276 static int intel_initial_commit(struct drm_device *dev)
12277 {
12278 	struct drm_atomic_state *state = NULL;
12279 	struct drm_modeset_acquire_ctx ctx;
12280 	struct intel_crtc *crtc;
12281 	int ret = 0;
12282 
12283 	state = drm_atomic_state_alloc(dev);
12284 	if (!state)
12285 		return -ENOMEM;
12286 
12287 	drm_modeset_acquire_init(&ctx, 0);
12288 
12289 retry:
12290 	state->acquire_ctx = &ctx;
12291 
12292 	for_each_intel_crtc(dev, crtc) {
12293 		struct intel_crtc_state *crtc_state =
12294 			intel_atomic_get_crtc_state(state, crtc);
12295 
12296 		if (IS_ERR(crtc_state)) {
12297 			ret = PTR_ERR(crtc_state);
12298 			goto out;
12299 		}
12300 
12301 		if (crtc_state->hw.active) {
12302 			struct intel_encoder *encoder;
12303 
12304 			/*
12305 			 * We've not yet detected sink capabilities
12306 			 * (audio,infoframes,etc.) and thus we don't want to
12307 			 * force a full state recomputation yet. We want that to
12308 			 * happen only for the first real commit from userspace.
12309 			 * So preserve the inherited flag for the time being.
12310 			 */
12311 			crtc_state->inherited = true;
12312 
12313 			ret = drm_atomic_add_affected_planes(state, &crtc->base);
12314 			if (ret)
12315 				goto out;
12316 
12317 			/*
12318 			 * FIXME hack to force a LUT update to avoid the
12319 			 * plane update forcing the pipe gamma on without
12320 			 * having a proper LUT loaded. Remove once we
12321 			 * have readout for pipe gamma enable.
12322 			 */
12323 			crtc_state->uapi.color_mgmt_changed = true;
12324 
12325 			for_each_intel_encoder_mask(dev, encoder,
12326 						    crtc_state->uapi.encoder_mask) {
12327 				if (encoder->initial_fastset_check &&
12328 				    !encoder->initial_fastset_check(encoder, crtc_state)) {
12329 					ret = drm_atomic_add_affected_connectors(state,
12330 										 &crtc->base);
12331 					if (ret)
12332 						goto out;
12333 				}
12334 			}
12335 		}
12336 	}
12337 
12338 	ret = drm_atomic_commit(state);
12339 
12340 out:
12341 	if (ret == -EDEADLK) {
12342 		drm_atomic_state_clear(state);
12343 		drm_modeset_backoff(&ctx);
12344 		goto retry;
12345 	}
12346 
12347 	drm_atomic_state_put(state);
12348 
12349 	drm_modeset_drop_locks(&ctx);
12350 	drm_modeset_acquire_fini(&ctx);
12351 
12352 	return ret;
12353 }
12354 
12355 static void intel_mode_config_init(struct drm_i915_private *i915)
12356 {
12357 	struct drm_mode_config *mode_config = &i915->drm.mode_config;
12358 
12359 	drm_mode_config_init(&i915->drm);
12360 	INIT_LIST_HEAD(&i915->global_obj_list);
12361 
12362 	mode_config->min_width = 0;
12363 	mode_config->min_height = 0;
12364 
12365 	mode_config->preferred_depth = 24;
12366 	mode_config->prefer_shadow = 1;
12367 
12368 	mode_config->funcs = &intel_mode_funcs;
12369 
12370 	mode_config->async_page_flip = has_async_flips(i915);
12371 
12372 	/*
12373 	 * Maximum framebuffer dimensions, chosen to match
12374 	 * the maximum render engine surface size on gen4+.
12375 	 */
12376 	if (DISPLAY_VER(i915) >= 7) {
12377 		mode_config->max_width = 16384;
12378 		mode_config->max_height = 16384;
12379 	} else if (DISPLAY_VER(i915) >= 4) {
12380 		mode_config->max_width = 8192;
12381 		mode_config->max_height = 8192;
12382 	} else if (DISPLAY_VER(i915) == 3) {
12383 		mode_config->max_width = 4096;
12384 		mode_config->max_height = 4096;
12385 	} else {
12386 		mode_config->max_width = 2048;
12387 		mode_config->max_height = 2048;
12388 	}
12389 
12390 	if (IS_I845G(i915) || IS_I865G(i915)) {
12391 		mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
12392 		mode_config->cursor_height = 1023;
12393 	} else if (IS_I830(i915) || IS_I85X(i915) ||
12394 		   IS_I915G(i915) || IS_I915GM(i915)) {
12395 		mode_config->cursor_width = 64;
12396 		mode_config->cursor_height = 64;
12397 	} else {
12398 		mode_config->cursor_width = 256;
12399 		mode_config->cursor_height = 256;
12400 	}
12401 }
12402 
12403 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
12404 {
12405 	intel_atomic_global_obj_cleanup(i915);
12406 	drm_mode_config_cleanup(&i915->drm);
12407 }
12408 
12409 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
12410 {
12411 	if (plane_config->fb) {
12412 		struct drm_framebuffer *fb = &plane_config->fb->base;
12413 
12414 		/* We may only have the stub and not a full framebuffer */
12415 		if (drm_framebuffer_read_refcount(fb))
12416 			drm_framebuffer_put(fb);
12417 		else
12418 			kfree(fb);
12419 	}
12420 
12421 	if (plane_config->vma)
12422 		i915_vma_put(plane_config->vma);
12423 }
12424 
12425 /* part #1: call before irq install */
12426 int intel_modeset_init_noirq(struct drm_i915_private *i915)
12427 {
12428 	int ret;
12429 
12430 	if (i915_inject_probe_failure(i915))
12431 		return -ENODEV;
12432 
12433 	if (HAS_DISPLAY(i915)) {
12434 		ret = drm_vblank_init(&i915->drm,
12435 				      INTEL_NUM_PIPES(i915));
12436 		if (ret)
12437 			return ret;
12438 	}
12439 
12440 	intel_bios_init(i915);
12441 
12442 	ret = intel_vga_register(i915);
12443 	if (ret)
12444 		goto cleanup_bios;
12445 
12446 	/* FIXME: completely on the wrong abstraction layer */
12447 	intel_power_domains_init_hw(i915, false);
12448 
12449 	if (!HAS_DISPLAY(i915))
12450 		return 0;
12451 
12452 	intel_dmc_ucode_init(i915);
12453 
12454 	i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
12455 	i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
12456 					WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
12457 
12458 	i915->framestart_delay = 1; /* 1-4 */
12459 
12460 	i915->window2_delay = 0; /* No DSB so no window2 delay */
12461 
12462 	intel_mode_config_init(i915);
12463 
12464 	ret = intel_cdclk_init(i915);
12465 	if (ret)
12466 		goto cleanup_vga_client_pw_domain_dmc;
12467 
12468 	ret = intel_dbuf_init(i915);
12469 	if (ret)
12470 		goto cleanup_vga_client_pw_domain_dmc;
12471 
12472 	ret = intel_bw_init(i915);
12473 	if (ret)
12474 		goto cleanup_vga_client_pw_domain_dmc;
12475 
12476 	init_llist_head(&i915->atomic_helper.free_list);
12477 	INIT_WORK(&i915->atomic_helper.free_work,
12478 		  intel_atomic_helper_free_state_worker);
12479 
12480 	intel_init_quirks(i915);
12481 
12482 	intel_fbc_init(i915);
12483 
12484 	return 0;
12485 
12486 cleanup_vga_client_pw_domain_dmc:
12487 	intel_dmc_ucode_fini(i915);
12488 	intel_power_domains_driver_remove(i915);
12489 	intel_vga_unregister(i915);
12490 cleanup_bios:
12491 	intel_bios_driver_remove(i915);
12492 
12493 	return ret;
12494 }
12495 
12496 /* part #2: call after irq install, but before gem init */
12497 int intel_modeset_init_nogem(struct drm_i915_private *i915)
12498 {
12499 	struct drm_device *dev = &i915->drm;
12500 	enum pipe pipe;
12501 	struct intel_crtc *crtc;
12502 	int ret;
12503 
12504 	if (!HAS_DISPLAY(i915))
12505 		return 0;
12506 
12507 	intel_init_pm(i915);
12508 
12509 	intel_panel_sanitize_ssc(i915);
12510 
12511 	intel_pps_setup(i915);
12512 
12513 	intel_gmbus_setup(i915);
12514 
12515 	drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
12516 		    INTEL_NUM_PIPES(i915),
12517 		    INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
12518 
12519 	for_each_pipe(i915, pipe) {
12520 		ret = intel_crtc_init(i915, pipe);
12521 		if (ret) {
12522 			intel_mode_config_cleanup(i915);
12523 			return ret;
12524 		}
12525 	}
12526 
12527 	intel_plane_possible_crtcs_init(i915);
12528 	intel_shared_dpll_init(dev);
12529 	intel_update_fdi_pll_freq(i915);
12530 
12531 	intel_update_czclk(i915);
12532 	intel_modeset_init_hw(i915);
12533 	intel_dpll_update_ref_clks(i915);
12534 
12535 	intel_hdcp_component_init(i915);
12536 
12537 	if (i915->max_cdclk_freq == 0)
12538 		intel_update_max_cdclk(i915);
12539 
12540 	/*
12541 	 * If the platform has HTI, we need to find out whether it has reserved
12542 	 * any display resources before we create our display outputs.
12543 	 */
12544 	if (INTEL_INFO(i915)->display.has_hti)
12545 		i915->hti_state = intel_de_read(i915, HDPORT_STATE);
12546 
12547 	/* Just disable it once at startup */
12548 	intel_vga_disable(i915);
12549 	intel_setup_outputs(i915);
12550 
12551 	drm_modeset_lock_all(dev);
12552 	intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
12553 	drm_modeset_unlock_all(dev);
12554 
12555 	for_each_intel_crtc(dev, crtc) {
12556 		struct intel_initial_plane_config plane_config = {};
12557 
12558 		if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
12559 			continue;
12560 
12561 		/*
12562 		 * Note that reserving the BIOS fb up front prevents us
12563 		 * from stuffing other stolen allocations like the ring
12564 		 * on top.  This prevents some ugliness at boot time, and
12565 		 * can even allow for smooth boot transitions if the BIOS
12566 		 * fb is large enough for the active pipe configuration.
12567 		 */
12568 		i915->display.get_initial_plane_config(crtc, &plane_config);
12569 
12570 		/*
12571 		 * If the fb is shared between multiple heads, we'll
12572 		 * just get the first one.
12573 		 */
12574 		intel_find_initial_plane_obj(crtc, &plane_config);
12575 
12576 		plane_config_fini(&plane_config);
12577 	}
12578 
12579 	/*
12580 	 * Make sure hardware watermarks really match the state we read out.
12581 	 * Note that we need to do this after reconstructing the BIOS fb's
12582 	 * since the watermark calculation done here will use pstate->fb.
12583 	 */
12584 	if (!HAS_GMCH(i915))
12585 		sanitize_watermarks(i915);
12586 
12587 	return 0;
12588 }
12589 
12590 /* part #3: call after gem init */
12591 int intel_modeset_init(struct drm_i915_private *i915)
12592 {
12593 	int ret;
12594 
12595 	if (!HAS_DISPLAY(i915))
12596 		return 0;
12597 
12598 	/*
12599 	 * Force all active planes to recompute their states. So that on
12600 	 * mode_setcrtc after probe, all the intel_plane_state variables
12601 	 * are already calculated and there is no assert_plane warnings
12602 	 * during bootup.
12603 	 */
12604 	ret = intel_initial_commit(&i915->drm);
12605 	if (ret)
12606 		drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
12607 
12608 	intel_overlay_setup(i915);
12609 
12610 	ret = intel_fbdev_init(&i915->drm);
12611 	if (ret)
12612 		return ret;
12613 
12614 	/* Only enable hotplug handling once the fbdev is fully set up. */
12615 	intel_hpd_init(i915);
12616 	intel_hpd_poll_disable(i915);
12617 
12618 	intel_init_ipc(i915);
12619 
12620 	return 0;
12621 }
12622 
12623 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
12624 {
12625 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12626 	/* 640x480@60Hz, ~25175 kHz */
12627 	struct dpll clock = {
12628 		.m1 = 18,
12629 		.m2 = 7,
12630 		.p1 = 13,
12631 		.p2 = 4,
12632 		.n = 2,
12633 	};
12634 	u32 dpll, fp;
12635 	int i;
12636 
12637 	drm_WARN_ON(&dev_priv->drm,
12638 		    i9xx_calc_dpll_params(48000, &clock) != 25154);
12639 
12640 	drm_dbg_kms(&dev_priv->drm,
12641 		    "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
12642 		    pipe_name(pipe), clock.vco, clock.dot);
12643 
12644 	fp = i9xx_dpll_compute_fp(&clock);
12645 	dpll = DPLL_DVO_2X_MODE |
12646 		DPLL_VGA_MODE_DIS |
12647 		((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
12648 		PLL_P2_DIVIDE_BY_4 |
12649 		PLL_REF_INPUT_DREFCLK |
12650 		DPLL_VCO_ENABLE;
12651 
12652 	intel_de_write(dev_priv, FP0(pipe), fp);
12653 	intel_de_write(dev_priv, FP1(pipe), fp);
12654 
12655 	intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
12656 	intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
12657 	intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
12658 	intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
12659 	intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
12660 	intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
12661 	intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
12662 
12663 	/*
12664 	 * Apparently we need to have VGA mode enabled prior to changing
12665 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
12666 	 * dividers, even though the register value does change.
12667 	 */
12668 	intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
12669 	intel_de_write(dev_priv, DPLL(pipe), dpll);
12670 
12671 	/* Wait for the clocks to stabilize. */
12672 	intel_de_posting_read(dev_priv, DPLL(pipe));
12673 	udelay(150);
12674 
12675 	/* The pixel multiplier can only be updated once the
12676 	 * DPLL is enabled and the clocks are stable.
12677 	 *
12678 	 * So write it again.
12679 	 */
12680 	intel_de_write(dev_priv, DPLL(pipe), dpll);
12681 
12682 	/* We do this three times for luck */
12683 	for (i = 0; i < 3 ; i++) {
12684 		intel_de_write(dev_priv, DPLL(pipe), dpll);
12685 		intel_de_posting_read(dev_priv, DPLL(pipe));
12686 		udelay(150); /* wait for warmup */
12687 	}
12688 
12689 	intel_de_write(dev_priv, PIPECONF(pipe),
12690 		       PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
12691 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
12692 
12693 	intel_wait_for_pipe_scanline_moving(crtc);
12694 }
12695 
12696 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
12697 {
12698 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12699 
12700 	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
12701 		    pipe_name(pipe));
12702 
12703 	drm_WARN_ON(&dev_priv->drm,
12704 		    intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
12705 		    DISPLAY_PLANE_ENABLE);
12706 	drm_WARN_ON(&dev_priv->drm,
12707 		    intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
12708 		    DISPLAY_PLANE_ENABLE);
12709 	drm_WARN_ON(&dev_priv->drm,
12710 		    intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
12711 		    DISPLAY_PLANE_ENABLE);
12712 	drm_WARN_ON(&dev_priv->drm,
12713 		    intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
12714 	drm_WARN_ON(&dev_priv->drm,
12715 		    intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
12716 
12717 	intel_de_write(dev_priv, PIPECONF(pipe), 0);
12718 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
12719 
12720 	intel_wait_for_pipe_scanline_stopped(crtc);
12721 
12722 	intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
12723 	intel_de_posting_read(dev_priv, DPLL(pipe));
12724 }
12725 
12726 static void
12727 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
12728 {
12729 	struct intel_crtc *crtc;
12730 
12731 	if (DISPLAY_VER(dev_priv) >= 4)
12732 		return;
12733 
12734 	for_each_intel_crtc(&dev_priv->drm, crtc) {
12735 		struct intel_plane *plane =
12736 			to_intel_plane(crtc->base.primary);
12737 		struct intel_crtc *plane_crtc;
12738 		enum pipe pipe;
12739 
12740 		if (!plane->get_hw_state(plane, &pipe))
12741 			continue;
12742 
12743 		if (pipe == crtc->pipe)
12744 			continue;
12745 
12746 		drm_dbg_kms(&dev_priv->drm,
12747 			    "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
12748 			    plane->base.base.id, plane->base.name);
12749 
12750 		plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12751 		intel_plane_disable_noatomic(plane_crtc, plane);
12752 	}
12753 }
12754 
12755 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
12756 {
12757 	struct drm_device *dev = crtc->base.dev;
12758 	struct intel_encoder *encoder;
12759 
12760 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
12761 		return true;
12762 
12763 	return false;
12764 }
12765 
12766 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
12767 {
12768 	struct drm_device *dev = encoder->base.dev;
12769 	struct intel_connector *connector;
12770 
12771 	for_each_connector_on_encoder(dev, &encoder->base, connector)
12772 		return connector;
12773 
12774 	return NULL;
12775 }
12776 
12777 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
12778 			      enum pipe pch_transcoder)
12779 {
12780 	return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
12781 		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
12782 }
12783 
12784 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
12785 {
12786 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12787 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12788 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
12789 
12790 	if (DISPLAY_VER(dev_priv) >= 9 ||
12791 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12792 		i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
12793 		u32 val;
12794 
12795 		if (transcoder_is_dsi(cpu_transcoder))
12796 			return;
12797 
12798 		val = intel_de_read(dev_priv, reg);
12799 		val &= ~HSW_FRAME_START_DELAY_MASK;
12800 		val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12801 		intel_de_write(dev_priv, reg, val);
12802 	} else {
12803 		i915_reg_t reg = PIPECONF(cpu_transcoder);
12804 		u32 val;
12805 
12806 		val = intel_de_read(dev_priv, reg);
12807 		val &= ~PIPECONF_FRAME_START_DELAY_MASK;
12808 		val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12809 		intel_de_write(dev_priv, reg, val);
12810 	}
12811 
12812 	if (!crtc_state->has_pch_encoder)
12813 		return;
12814 
12815 	if (HAS_PCH_IBX(dev_priv)) {
12816 		i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
12817 		u32 val;
12818 
12819 		val = intel_de_read(dev_priv, reg);
12820 		val &= ~TRANS_FRAME_START_DELAY_MASK;
12821 		val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12822 		intel_de_write(dev_priv, reg, val);
12823 	} else {
12824 		enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
12825 		i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
12826 		u32 val;
12827 
12828 		val = intel_de_read(dev_priv, reg);
12829 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
12830 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12831 		intel_de_write(dev_priv, reg, val);
12832 	}
12833 }
12834 
12835 static void intel_sanitize_crtc(struct intel_crtc *crtc,
12836 				struct drm_modeset_acquire_ctx *ctx)
12837 {
12838 	struct drm_device *dev = crtc->base.dev;
12839 	struct drm_i915_private *dev_priv = to_i915(dev);
12840 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
12841 
12842 	if (crtc_state->hw.active) {
12843 		struct intel_plane *plane;
12844 
12845 		/* Clear any frame start delays used for debugging left by the BIOS */
12846 		intel_sanitize_frame_start_delay(crtc_state);
12847 
12848 		/* Disable everything but the primary plane */
12849 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
12850 			const struct intel_plane_state *plane_state =
12851 				to_intel_plane_state(plane->base.state);
12852 
12853 			if (plane_state->uapi.visible &&
12854 			    plane->base.type != DRM_PLANE_TYPE_PRIMARY)
12855 				intel_plane_disable_noatomic(crtc, plane);
12856 		}
12857 
12858 		/*
12859 		 * Disable any background color set by the BIOS, but enable the
12860 		 * gamma and CSC to match how we program our planes.
12861 		 */
12862 		if (DISPLAY_VER(dev_priv) >= 9)
12863 			intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
12864 				       SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
12865 	}
12866 
12867 	/* Adjust the state of the output pipe according to whether we
12868 	 * have active connectors/encoders. */
12869 	if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
12870 	    !crtc_state->bigjoiner_slave)
12871 		intel_crtc_disable_noatomic(crtc, ctx);
12872 
12873 	if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
12874 		/*
12875 		 * We start out with underrun reporting disabled to avoid races.
12876 		 * For correct bookkeeping mark this on active crtcs.
12877 		 *
12878 		 * Also on gmch platforms we dont have any hardware bits to
12879 		 * disable the underrun reporting. Which means we need to start
12880 		 * out with underrun reporting disabled also on inactive pipes,
12881 		 * since otherwise we'll complain about the garbage we read when
12882 		 * e.g. coming up after runtime pm.
12883 		 *
12884 		 * No protection against concurrent access is required - at
12885 		 * worst a fifo underrun happens which also sets this to false.
12886 		 */
12887 		crtc->cpu_fifo_underrun_disabled = true;
12888 		/*
12889 		 * We track the PCH trancoder underrun reporting state
12890 		 * within the crtc. With crtc for pipe A housing the underrun
12891 		 * reporting state for PCH transcoder A, crtc for pipe B housing
12892 		 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
12893 		 * and marking underrun reporting as disabled for the non-existing
12894 		 * PCH transcoders B and C would prevent enabling the south
12895 		 * error interrupt (see cpt_can_enable_serr_int()).
12896 		 */
12897 		if (has_pch_trancoder(dev_priv, crtc->pipe))
12898 			crtc->pch_fifo_underrun_disabled = true;
12899 	}
12900 }
12901 
12902 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
12903 {
12904 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12905 
12906 	/*
12907 	 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
12908 	 * the hardware when a high res displays plugged in. DPLL P
12909 	 * divider is zero, and the pipe timings are bonkers. We'll
12910 	 * try to disable everything in that case.
12911 	 *
12912 	 * FIXME would be nice to be able to sanitize this state
12913 	 * without several WARNs, but for now let's take the easy
12914 	 * road.
12915 	 */
12916 	return IS_SANDYBRIDGE(dev_priv) &&
12917 		crtc_state->hw.active &&
12918 		crtc_state->shared_dpll &&
12919 		crtc_state->port_clock == 0;
12920 }
12921 
12922 static void intel_sanitize_encoder(struct intel_encoder *encoder)
12923 {
12924 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
12925 	struct intel_connector *connector;
12926 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
12927 	struct intel_crtc_state *crtc_state = crtc ?
12928 		to_intel_crtc_state(crtc->base.state) : NULL;
12929 
12930 	/* We need to check both for a crtc link (meaning that the
12931 	 * encoder is active and trying to read from a pipe) and the
12932 	 * pipe itself being active. */
12933 	bool has_active_crtc = crtc_state &&
12934 		crtc_state->hw.active;
12935 
12936 	if (crtc_state && has_bogus_dpll_config(crtc_state)) {
12937 		drm_dbg_kms(&dev_priv->drm,
12938 			    "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
12939 			    pipe_name(crtc->pipe));
12940 		has_active_crtc = false;
12941 	}
12942 
12943 	connector = intel_encoder_find_connector(encoder);
12944 	if (connector && !has_active_crtc) {
12945 		drm_dbg_kms(&dev_priv->drm,
12946 			    "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
12947 			    encoder->base.base.id,
12948 			    encoder->base.name);
12949 
12950 		/* Connector is active, but has no active pipe. This is
12951 		 * fallout from our resume register restoring. Disable
12952 		 * the encoder manually again. */
12953 		if (crtc_state) {
12954 			struct drm_encoder *best_encoder;
12955 
12956 			drm_dbg_kms(&dev_priv->drm,
12957 				    "[ENCODER:%d:%s] manually disabled\n",
12958 				    encoder->base.base.id,
12959 				    encoder->base.name);
12960 
12961 			/* avoid oopsing in case the hooks consult best_encoder */
12962 			best_encoder = connector->base.state->best_encoder;
12963 			connector->base.state->best_encoder = &encoder->base;
12964 
12965 			/* FIXME NULL atomic state passed! */
12966 			if (encoder->disable)
12967 				encoder->disable(NULL, encoder, crtc_state,
12968 						 connector->base.state);
12969 			if (encoder->post_disable)
12970 				encoder->post_disable(NULL, encoder, crtc_state,
12971 						      connector->base.state);
12972 
12973 			connector->base.state->best_encoder = best_encoder;
12974 		}
12975 		encoder->base.crtc = NULL;
12976 
12977 		/* Inconsistent output/port/pipe state happens presumably due to
12978 		 * a bug in one of the get_hw_state functions. Or someplace else
12979 		 * in our code, like the register restore mess on resume. Clamp
12980 		 * things to off as a safer default. */
12981 
12982 		connector->base.dpms = DRM_MODE_DPMS_OFF;
12983 		connector->base.encoder = NULL;
12984 	}
12985 
12986 	/* notify opregion of the sanitized encoder state */
12987 	intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
12988 
12989 	if (HAS_DDI(dev_priv))
12990 		intel_ddi_sanitize_encoder_pll_mapping(encoder);
12991 }
12992 
12993 /* FIXME read out full plane state for all planes */
12994 static void readout_plane_state(struct drm_i915_private *dev_priv)
12995 {
12996 	struct intel_plane *plane;
12997 	struct intel_crtc *crtc;
12998 
12999 	for_each_intel_plane(&dev_priv->drm, plane) {
13000 		struct intel_plane_state *plane_state =
13001 			to_intel_plane_state(plane->base.state);
13002 		struct intel_crtc_state *crtc_state;
13003 		enum pipe pipe = PIPE_A;
13004 		bool visible;
13005 
13006 		visible = plane->get_hw_state(plane, &pipe);
13007 
13008 		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
13009 		crtc_state = to_intel_crtc_state(crtc->base.state);
13010 
13011 		intel_set_plane_visible(crtc_state, plane_state, visible);
13012 
13013 		drm_dbg_kms(&dev_priv->drm,
13014 			    "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
13015 			    plane->base.base.id, plane->base.name,
13016 			    enableddisabled(visible), pipe_name(pipe));
13017 	}
13018 
13019 	for_each_intel_crtc(&dev_priv->drm, crtc) {
13020 		struct intel_crtc_state *crtc_state =
13021 			to_intel_crtc_state(crtc->base.state);
13022 
13023 		fixup_plane_bitmasks(crtc_state);
13024 	}
13025 }
13026 
13027 static void intel_modeset_readout_hw_state(struct drm_device *dev)
13028 {
13029 	struct drm_i915_private *dev_priv = to_i915(dev);
13030 	struct intel_cdclk_state *cdclk_state =
13031 		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
13032 	struct intel_dbuf_state *dbuf_state =
13033 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
13034 	enum pipe pipe;
13035 	struct intel_crtc *crtc;
13036 	struct intel_encoder *encoder;
13037 	struct intel_connector *connector;
13038 	struct drm_connector_list_iter conn_iter;
13039 	u8 active_pipes = 0;
13040 
13041 	for_each_intel_crtc(dev, crtc) {
13042 		struct intel_crtc_state *crtc_state =
13043 			to_intel_crtc_state(crtc->base.state);
13044 
13045 		__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
13046 		intel_crtc_free_hw_state(crtc_state);
13047 		intel_crtc_state_reset(crtc_state, crtc);
13048 
13049 		intel_crtc_get_pipe_config(crtc_state);
13050 
13051 		crtc_state->hw.enable = crtc_state->hw.active;
13052 
13053 		crtc->base.enabled = crtc_state->hw.enable;
13054 		crtc->active = crtc_state->hw.active;
13055 
13056 		if (crtc_state->hw.active)
13057 			active_pipes |= BIT(crtc->pipe);
13058 
13059 		drm_dbg_kms(&dev_priv->drm,
13060 			    "[CRTC:%d:%s] hw state readout: %s\n",
13061 			    crtc->base.base.id, crtc->base.name,
13062 			    enableddisabled(crtc_state->hw.active));
13063 	}
13064 
13065 	dev_priv->active_pipes = cdclk_state->active_pipes =
13066 		dbuf_state->active_pipes = active_pipes;
13067 
13068 	readout_plane_state(dev_priv);
13069 
13070 	for_each_intel_encoder(dev, encoder) {
13071 		pipe = 0;
13072 
13073 		if (encoder->get_hw_state(encoder, &pipe)) {
13074 			struct intel_crtc_state *crtc_state;
13075 
13076 			crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
13077 			crtc_state = to_intel_crtc_state(crtc->base.state);
13078 
13079 			encoder->base.crtc = &crtc->base;
13080 			intel_encoder_get_config(encoder, crtc_state);
13081 			if (encoder->sync_state)
13082 				encoder->sync_state(encoder, crtc_state);
13083 
13084 			/* read out to slave crtc as well for bigjoiner */
13085 			if (crtc_state->bigjoiner) {
13086 				/* encoder should read be linked to bigjoiner master */
13087 				WARN_ON(crtc_state->bigjoiner_slave);
13088 
13089 				crtc = crtc_state->bigjoiner_linked_crtc;
13090 				crtc_state = to_intel_crtc_state(crtc->base.state);
13091 				intel_encoder_get_config(encoder, crtc_state);
13092 			}
13093 		} else {
13094 			encoder->base.crtc = NULL;
13095 		}
13096 
13097 		drm_dbg_kms(&dev_priv->drm,
13098 			    "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
13099 			    encoder->base.base.id, encoder->base.name,
13100 			    enableddisabled(encoder->base.crtc),
13101 			    pipe_name(pipe));
13102 	}
13103 
13104 	intel_dpll_readout_hw_state(dev_priv);
13105 
13106 	drm_connector_list_iter_begin(dev, &conn_iter);
13107 	for_each_intel_connector_iter(connector, &conn_iter) {
13108 		if (connector->get_hw_state(connector)) {
13109 			struct intel_crtc_state *crtc_state;
13110 			struct intel_crtc *crtc;
13111 
13112 			connector->base.dpms = DRM_MODE_DPMS_ON;
13113 
13114 			encoder = intel_attached_encoder(connector);
13115 			connector->base.encoder = &encoder->base;
13116 
13117 			crtc = to_intel_crtc(encoder->base.crtc);
13118 			crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
13119 
13120 			if (crtc_state && crtc_state->hw.active) {
13121 				/*
13122 				 * This has to be done during hardware readout
13123 				 * because anything calling .crtc_disable may
13124 				 * rely on the connector_mask being accurate.
13125 				 */
13126 				crtc_state->uapi.connector_mask |=
13127 					drm_connector_mask(&connector->base);
13128 				crtc_state->uapi.encoder_mask |=
13129 					drm_encoder_mask(&encoder->base);
13130 			}
13131 		} else {
13132 			connector->base.dpms = DRM_MODE_DPMS_OFF;
13133 			connector->base.encoder = NULL;
13134 		}
13135 		drm_dbg_kms(&dev_priv->drm,
13136 			    "[CONNECTOR:%d:%s] hw state readout: %s\n",
13137 			    connector->base.base.id, connector->base.name,
13138 			    enableddisabled(connector->base.encoder));
13139 	}
13140 	drm_connector_list_iter_end(&conn_iter);
13141 
13142 	for_each_intel_crtc(dev, crtc) {
13143 		struct intel_bw_state *bw_state =
13144 			to_intel_bw_state(dev_priv->bw_obj.state);
13145 		struct intel_crtc_state *crtc_state =
13146 			to_intel_crtc_state(crtc->base.state);
13147 		struct intel_plane *plane;
13148 		int min_cdclk = 0;
13149 
13150 		if (crtc_state->bigjoiner_slave)
13151 			continue;
13152 
13153 		if (crtc_state->hw.active) {
13154 			/*
13155 			 * The initial mode needs to be set in order to keep
13156 			 * the atomic core happy. It wants a valid mode if the
13157 			 * crtc's enabled, so we do the above call.
13158 			 *
13159 			 * But we don't set all the derived state fully, hence
13160 			 * set a flag to indicate that a full recalculation is
13161 			 * needed on the next commit.
13162 			 */
13163 			crtc_state->inherited = true;
13164 
13165 			intel_crtc_update_active_timings(crtc_state);
13166 
13167 			intel_crtc_copy_hw_to_uapi_state(crtc_state);
13168 		}
13169 
13170 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
13171 			const struct intel_plane_state *plane_state =
13172 				to_intel_plane_state(plane->base.state);
13173 
13174 			/*
13175 			 * FIXME don't have the fb yet, so can't
13176 			 * use intel_plane_data_rate() :(
13177 			 */
13178 			if (plane_state->uapi.visible)
13179 				crtc_state->data_rate[plane->id] =
13180 					4 * crtc_state->pixel_rate;
13181 			/*
13182 			 * FIXME don't have the fb yet, so can't
13183 			 * use plane->min_cdclk() :(
13184 			 */
13185 			if (plane_state->uapi.visible && plane->min_cdclk) {
13186 				if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
13187 					crtc_state->min_cdclk[plane->id] =
13188 						DIV_ROUND_UP(crtc_state->pixel_rate, 2);
13189 				else
13190 					crtc_state->min_cdclk[plane->id] =
13191 						crtc_state->pixel_rate;
13192 			}
13193 			drm_dbg_kms(&dev_priv->drm,
13194 				    "[PLANE:%d:%s] min_cdclk %d kHz\n",
13195 				    plane->base.base.id, plane->base.name,
13196 				    crtc_state->min_cdclk[plane->id]);
13197 		}
13198 
13199 		if (crtc_state->hw.active) {
13200 			min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
13201 			if (drm_WARN_ON(dev, min_cdclk < 0))
13202 				min_cdclk = 0;
13203 		}
13204 
13205 		cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
13206 		cdclk_state->min_voltage_level[crtc->pipe] =
13207 			crtc_state->min_voltage_level;
13208 
13209 		intel_bw_crtc_update(bw_state, crtc_state);
13210 
13211 		intel_pipe_config_sanity_check(dev_priv, crtc_state);
13212 
13213 		/* discard our incomplete slave state, copy it from master */
13214 		if (crtc_state->bigjoiner && crtc_state->hw.active) {
13215 			struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
13216 			struct intel_crtc_state *slave_crtc_state =
13217 				to_intel_crtc_state(slave->base.state);
13218 
13219 			copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
13220 			slave->base.mode = crtc->base.mode;
13221 
13222 			cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
13223 			cdclk_state->min_voltage_level[slave->pipe] =
13224 				crtc_state->min_voltage_level;
13225 
13226 			for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
13227 				const struct intel_plane_state *plane_state =
13228 					to_intel_plane_state(plane->base.state);
13229 
13230 				/*
13231 				 * FIXME don't have the fb yet, so can't
13232 				 * use intel_plane_data_rate() :(
13233 				 */
13234 				if (plane_state->uapi.visible)
13235 					crtc_state->data_rate[plane->id] =
13236 						4 * crtc_state->pixel_rate;
13237 				else
13238 					crtc_state->data_rate[plane->id] = 0;
13239 			}
13240 
13241 			intel_bw_crtc_update(bw_state, slave_crtc_state);
13242 			drm_calc_timestamping_constants(&slave->base,
13243 							&slave_crtc_state->hw.adjusted_mode);
13244 		}
13245 	}
13246 }
13247 
13248 static void
13249 get_encoder_power_domains(struct drm_i915_private *dev_priv)
13250 {
13251 	struct intel_encoder *encoder;
13252 
13253 	for_each_intel_encoder(&dev_priv->drm, encoder) {
13254 		struct intel_crtc_state *crtc_state;
13255 
13256 		if (!encoder->get_power_domains)
13257 			continue;
13258 
13259 		/*
13260 		 * MST-primary and inactive encoders don't have a crtc state
13261 		 * and neither of these require any power domain references.
13262 		 */
13263 		if (!encoder->base.crtc)
13264 			continue;
13265 
13266 		crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
13267 		encoder->get_power_domains(encoder, crtc_state);
13268 	}
13269 }
13270 
13271 static void intel_early_display_was(struct drm_i915_private *dev_priv)
13272 {
13273 	/*
13274 	 * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
13275 	 * Also known as Wa_14010480278.
13276 	 */
13277 	if (IS_DISPLAY_VER(dev_priv, 10, 12))
13278 		intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
13279 			       intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
13280 
13281 	if (IS_HASWELL(dev_priv)) {
13282 		/*
13283 		 * WaRsPkgCStateDisplayPMReq:hsw
13284 		 * System hang if this isn't done before disabling all planes!
13285 		 */
13286 		intel_de_write(dev_priv, CHICKEN_PAR1_1,
13287 			       intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
13288 	}
13289 
13290 	if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
13291 		/* Display WA #1142:kbl,cfl,cml */
13292 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
13293 			     KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
13294 		intel_de_rmw(dev_priv, CHICKEN_MISC_2,
13295 			     KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
13296 			     KBL_ARB_FILL_SPARE_14);
13297 	}
13298 }
13299 
13300 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
13301 				       enum port port, i915_reg_t hdmi_reg)
13302 {
13303 	u32 val = intel_de_read(dev_priv, hdmi_reg);
13304 
13305 	if (val & SDVO_ENABLE ||
13306 	    (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
13307 		return;
13308 
13309 	drm_dbg_kms(&dev_priv->drm,
13310 		    "Sanitizing transcoder select for HDMI %c\n",
13311 		    port_name(port));
13312 
13313 	val &= ~SDVO_PIPE_SEL_MASK;
13314 	val |= SDVO_PIPE_SEL(PIPE_A);
13315 
13316 	intel_de_write(dev_priv, hdmi_reg, val);
13317 }
13318 
13319 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
13320 				     enum port port, i915_reg_t dp_reg)
13321 {
13322 	u32 val = intel_de_read(dev_priv, dp_reg);
13323 
13324 	if (val & DP_PORT_EN ||
13325 	    (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
13326 		return;
13327 
13328 	drm_dbg_kms(&dev_priv->drm,
13329 		    "Sanitizing transcoder select for DP %c\n",
13330 		    port_name(port));
13331 
13332 	val &= ~DP_PIPE_SEL_MASK;
13333 	val |= DP_PIPE_SEL(PIPE_A);
13334 
13335 	intel_de_write(dev_priv, dp_reg, val);
13336 }
13337 
13338 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
13339 {
13340 	/*
13341 	 * The BIOS may select transcoder B on some of the PCH
13342 	 * ports even it doesn't enable the port. This would trip
13343 	 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
13344 	 * Sanitize the transcoder select bits to prevent that. We
13345 	 * assume that the BIOS never actually enabled the port,
13346 	 * because if it did we'd actually have to toggle the port
13347 	 * on and back off to make the transcoder A select stick
13348 	 * (see. intel_dp_link_down(), intel_disable_hdmi(),
13349 	 * intel_disable_sdvo()).
13350 	 */
13351 	ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
13352 	ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
13353 	ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
13354 
13355 	/* PCH SDVOB multiplex with HDMIB */
13356 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
13357 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
13358 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
13359 }
13360 
13361 /* Scan out the current hw modeset state,
13362  * and sanitizes it to the current state
13363  */
13364 static void
13365 intel_modeset_setup_hw_state(struct drm_device *dev,
13366 			     struct drm_modeset_acquire_ctx *ctx)
13367 {
13368 	struct drm_i915_private *dev_priv = to_i915(dev);
13369 	struct intel_encoder *encoder;
13370 	struct intel_crtc *crtc;
13371 	intel_wakeref_t wakeref;
13372 
13373 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
13374 
13375 	intel_early_display_was(dev_priv);
13376 	intel_modeset_readout_hw_state(dev);
13377 
13378 	/* HW state is read out, now we need to sanitize this mess. */
13379 
13380 	/* Sanitize the TypeC port mode upfront, encoders depend on this */
13381 	for_each_intel_encoder(dev, encoder) {
13382 		enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
13383 
13384 		/* We need to sanitize only the MST primary port. */
13385 		if (encoder->type != INTEL_OUTPUT_DP_MST &&
13386 		    intel_phy_is_tc(dev_priv, phy))
13387 			intel_tc_port_sanitize(enc_to_dig_port(encoder));
13388 	}
13389 
13390 	get_encoder_power_domains(dev_priv);
13391 
13392 	if (HAS_PCH_IBX(dev_priv))
13393 		ibx_sanitize_pch_ports(dev_priv);
13394 
13395 	/*
13396 	 * intel_sanitize_plane_mapping() may need to do vblank
13397 	 * waits, so we need vblank interrupts restored beforehand.
13398 	 */
13399 	for_each_intel_crtc(&dev_priv->drm, crtc) {
13400 		struct intel_crtc_state *crtc_state =
13401 			to_intel_crtc_state(crtc->base.state);
13402 
13403 		drm_crtc_vblank_reset(&crtc->base);
13404 
13405 		if (crtc_state->hw.active)
13406 			intel_crtc_vblank_on(crtc_state);
13407 	}
13408 
13409 	intel_sanitize_plane_mapping(dev_priv);
13410 
13411 	for_each_intel_encoder(dev, encoder)
13412 		intel_sanitize_encoder(encoder);
13413 
13414 	for_each_intel_crtc(&dev_priv->drm, crtc) {
13415 		struct intel_crtc_state *crtc_state =
13416 			to_intel_crtc_state(crtc->base.state);
13417 
13418 		intel_sanitize_crtc(crtc, ctx);
13419 		intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
13420 	}
13421 
13422 	intel_modeset_update_connector_atomic_state(dev);
13423 
13424 	intel_dpll_sanitize_state(dev_priv);
13425 
13426 	if (IS_G4X(dev_priv)) {
13427 		g4x_wm_get_hw_state(dev_priv);
13428 		g4x_wm_sanitize(dev_priv);
13429 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
13430 		vlv_wm_get_hw_state(dev_priv);
13431 		vlv_wm_sanitize(dev_priv);
13432 	} else if (DISPLAY_VER(dev_priv) >= 9) {
13433 		skl_wm_get_hw_state(dev_priv);
13434 	} else if (HAS_PCH_SPLIT(dev_priv)) {
13435 		ilk_wm_get_hw_state(dev_priv);
13436 	}
13437 
13438 	for_each_intel_crtc(dev, crtc) {
13439 		struct intel_crtc_state *crtc_state =
13440 			to_intel_crtc_state(crtc->base.state);
13441 		u64 put_domains;
13442 
13443 		put_domains = modeset_get_crtc_power_domains(crtc_state);
13444 		if (drm_WARN_ON(dev, put_domains))
13445 			modeset_put_crtc_power_domains(crtc, put_domains);
13446 	}
13447 
13448 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
13449 }
13450 
13451 void intel_display_resume(struct drm_device *dev)
13452 {
13453 	struct drm_i915_private *dev_priv = to_i915(dev);
13454 	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
13455 	struct drm_modeset_acquire_ctx ctx;
13456 	int ret;
13457 
13458 	if (!HAS_DISPLAY(dev_priv))
13459 		return;
13460 
13461 	dev_priv->modeset_restore_state = NULL;
13462 	if (state)
13463 		state->acquire_ctx = &ctx;
13464 
13465 	drm_modeset_acquire_init(&ctx, 0);
13466 
13467 	while (1) {
13468 		ret = drm_modeset_lock_all_ctx(dev, &ctx);
13469 		if (ret != -EDEADLK)
13470 			break;
13471 
13472 		drm_modeset_backoff(&ctx);
13473 	}
13474 
13475 	if (!ret)
13476 		ret = __intel_display_resume(dev, state, &ctx);
13477 
13478 	intel_enable_ipc(dev_priv);
13479 	drm_modeset_drop_locks(&ctx);
13480 	drm_modeset_acquire_fini(&ctx);
13481 
13482 	if (ret)
13483 		drm_err(&dev_priv->drm,
13484 			"Restoring old state failed with %i\n", ret);
13485 	if (state)
13486 		drm_atomic_state_put(state);
13487 }
13488 
13489 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
13490 {
13491 	struct intel_connector *connector;
13492 	struct drm_connector_list_iter conn_iter;
13493 
13494 	/* Kill all the work that may have been queued by hpd. */
13495 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
13496 	for_each_intel_connector_iter(connector, &conn_iter) {
13497 		if (connector->modeset_retry_work.func)
13498 			cancel_work_sync(&connector->modeset_retry_work);
13499 		if (connector->hdcp.shim) {
13500 			cancel_delayed_work_sync(&connector->hdcp.check_work);
13501 			cancel_work_sync(&connector->hdcp.prop_work);
13502 		}
13503 	}
13504 	drm_connector_list_iter_end(&conn_iter);
13505 }
13506 
13507 /* part #1: call before irq uninstall */
13508 void intel_modeset_driver_remove(struct drm_i915_private *i915)
13509 {
13510 	if (!HAS_DISPLAY(i915))
13511 		return;
13512 
13513 	flush_workqueue(i915->flip_wq);
13514 	flush_workqueue(i915->modeset_wq);
13515 
13516 	flush_work(&i915->atomic_helper.free_work);
13517 	drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
13518 }
13519 
13520 /* part #2: call after irq uninstall */
13521 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
13522 {
13523 	if (!HAS_DISPLAY(i915))
13524 		return;
13525 
13526 	/*
13527 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
13528 	 * poll handlers. Hence disable polling after hpd handling is shut down.
13529 	 */
13530 	intel_hpd_poll_fini(i915);
13531 
13532 	/*
13533 	 * MST topology needs to be suspended so we don't have any calls to
13534 	 * fbdev after it's finalized. MST will be destroyed later as part of
13535 	 * drm_mode_config_cleanup()
13536 	 */
13537 	intel_dp_mst_suspend(i915);
13538 
13539 	/* poll work can call into fbdev, hence clean that up afterwards */
13540 	intel_fbdev_fini(i915);
13541 
13542 	intel_unregister_dsm_handler();
13543 
13544 	intel_fbc_global_disable(i915);
13545 
13546 	/* flush any delayed tasks or pending work */
13547 	flush_scheduled_work();
13548 
13549 	intel_hdcp_component_fini(i915);
13550 
13551 	intel_mode_config_cleanup(i915);
13552 
13553 	intel_overlay_cleanup(i915);
13554 
13555 	intel_gmbus_teardown(i915);
13556 
13557 	destroy_workqueue(i915->flip_wq);
13558 	destroy_workqueue(i915->modeset_wq);
13559 
13560 	intel_fbc_cleanup_cfb(i915);
13561 }
13562 
13563 /* part #3: call after gem init */
13564 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
13565 {
13566 	intel_dmc_ucode_fini(i915);
13567 
13568 	intel_power_domains_driver_remove(i915);
13569 
13570 	intel_vga_unregister(i915);
13571 
13572 	intel_bios_driver_remove(i915);
13573 }
13574 
13575 void intel_display_driver_register(struct drm_i915_private *i915)
13576 {
13577 	if (!HAS_DISPLAY(i915))
13578 		return;
13579 
13580 	intel_display_debugfs_register(i915);
13581 
13582 	/* Must be done after probing outputs */
13583 	intel_opregion_register(i915);
13584 	acpi_video_register();
13585 
13586 	intel_audio_init(i915);
13587 
13588 	/*
13589 	 * Some ports require correctly set-up hpd registers for
13590 	 * detection to work properly (leading to ghost connected
13591 	 * connector status), e.g. VGA on gm45.  Hence we can only set
13592 	 * up the initial fbdev config after hpd irqs are fully
13593 	 * enabled. We do it last so that the async config cannot run
13594 	 * before the connectors are registered.
13595 	 */
13596 	intel_fbdev_initial_config_async(&i915->drm);
13597 
13598 	/*
13599 	 * We need to coordinate the hotplugs with the asynchronous
13600 	 * fbdev configuration, for which we use the
13601 	 * fbdev->async_cookie.
13602 	 */
13603 	drm_kms_helper_poll_init(&i915->drm);
13604 }
13605 
13606 void intel_display_driver_unregister(struct drm_i915_private *i915)
13607 {
13608 	if (!HAS_DISPLAY(i915))
13609 		return;
13610 
13611 	intel_fbdev_unregister(i915);
13612 	intel_audio_deinit(i915);
13613 
13614 	/*
13615 	 * After flushing the fbdev (incl. a late async config which
13616 	 * will have delayed queuing of a hotplug event), then flush
13617 	 * the hotplug events.
13618 	 */
13619 	drm_kms_helper_poll_fini(&i915->drm);
13620 	drm_atomic_helper_shutdown(&i915->drm);
13621 
13622 	acpi_video_unregister();
13623 	intel_opregion_unregister(i915);
13624 }
13625