1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  */
26 
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/intel-iommu.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/dma-resv.h>
34 #include <linux/slab.h>
35 
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_damage_helper.h>
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_edid.h>
42 #include <drm/drm_fourcc.h>
43 #include <drm/drm_plane_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/drm_rect.h>
46 
47 #include "display/intel_audio.h"
48 #include "display/intel_crt.h"
49 #include "display/intel_ddi.h"
50 #include "display/intel_display_debugfs.h"
51 #include "display/intel_dp.h"
52 #include "display/intel_dp_mst.h"
53 #include "display/intel_dpll.h"
54 #include "display/intel_dpll_mgr.h"
55 #include "display/intel_dsi.h"
56 #include "display/intel_dvo.h"
57 #include "display/intel_fb.h"
58 #include "display/intel_gmbus.h"
59 #include "display/intel_hdmi.h"
60 #include "display/intel_lvds.h"
61 #include "display/intel_sdvo.h"
62 #include "display/intel_tv.h"
63 #include "display/intel_vdsc.h"
64 #include "display/intel_vrr.h"
65 
66 #include "gem/i915_gem_lmem.h"
67 #include "gem/i915_gem_object.h"
68 
69 #include "gt/intel_rps.h"
70 #include "gt/gen8_ppgtt.h"
71 
72 #include "g4x_dp.h"
73 #include "g4x_hdmi.h"
74 #include "i915_drv.h"
75 #include "intel_acpi.h"
76 #include "intel_atomic.h"
77 #include "intel_atomic_plane.h"
78 #include "intel_bw.h"
79 #include "intel_cdclk.h"
80 #include "intel_color.h"
81 #include "intel_crtc.h"
82 #include "intel_csr.h"
83 #include "intel_de.h"
84 #include "intel_display_types.h"
85 #include "intel_dp_link_training.h"
86 #include "intel_fbc.h"
87 #include "intel_fdi.h"
88 #include "intel_fbdev.h"
89 #include "intel_fifo_underrun.h"
90 #include "intel_frontbuffer.h"
91 #include "intel_hdcp.h"
92 #include "intel_hotplug.h"
93 #include "intel_overlay.h"
94 #include "intel_pipe_crc.h"
95 #include "intel_pm.h"
96 #include "intel_pps.h"
97 #include "intel_psr.h"
98 #include "intel_quirks.h"
99 #include "intel_sideband.h"
100 #include "intel_sprite.h"
101 #include "intel_tc.h"
102 #include "intel_vga.h"
103 #include "i9xx_plane.h"
104 #include "skl_scaler.h"
105 #include "skl_universal_plane.h"
106 
107 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
108 				struct intel_crtc_state *pipe_config);
109 static void ilk_pch_clock_get(struct intel_crtc *crtc,
110 			      struct intel_crtc_state *pipe_config);
111 
112 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
113 				  struct drm_i915_gem_object *obj,
114 				  struct drm_mode_fb_cmd2 *mode_cmd);
115 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
116 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
117 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
118 					 const struct intel_link_m_n *m_n,
119 					 const struct intel_link_m_n *m2_n2);
120 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
121 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
122 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
123 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
124 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
125 static void intel_modeset_setup_hw_state(struct drm_device *dev,
126 					 struct drm_modeset_acquire_ctx *ctx);
127 
128 struct i915_dpt {
129 	struct i915_address_space vm;
130 
131 	struct drm_i915_gem_object *obj;
132 	struct i915_vma *vma;
133 	void __iomem *iomem;
134 };
135 
136 #define i915_is_dpt(vm) ((vm)->is_dpt)
137 
138 static inline struct i915_dpt *
139 i915_vm_to_dpt(struct i915_address_space *vm)
140 {
141 	BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
142 	GEM_BUG_ON(!i915_is_dpt(vm));
143 	return container_of(vm, struct i915_dpt, vm);
144 }
145 
146 #define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
147 
148 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
149 {
150 	writeq(pte, addr);
151 }
152 
153 static void dpt_insert_page(struct i915_address_space *vm,
154 			    dma_addr_t addr,
155 			    u64 offset,
156 			    enum i915_cache_level level,
157 			    u32 flags)
158 {
159 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
160 	gen8_pte_t __iomem *base = dpt->iomem;
161 
162 	gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
163 		     vm->pte_encode(addr, level, flags));
164 }
165 
166 static void dpt_insert_entries(struct i915_address_space *vm,
167 			       struct i915_vma *vma,
168 			       enum i915_cache_level level,
169 			       u32 flags)
170 {
171 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
172 	gen8_pte_t __iomem *base = dpt->iomem;
173 	const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags);
174 	struct sgt_iter sgt_iter;
175 	dma_addr_t addr;
176 	int i;
177 
178 	/*
179 	 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
180 	 * not to allow the user to override access to a read only page.
181 	 */
182 
183 	i = vma->node.start / I915_GTT_PAGE_SIZE;
184 	for_each_sgt_daddr(addr, sgt_iter, vma->pages)
185 		gen8_set_pte(&base[i++], pte_encode | addr);
186 }
187 
188 static void dpt_clear_range(struct i915_address_space *vm,
189 			    u64 start, u64 length)
190 {
191 }
192 
193 static void dpt_bind_vma(struct i915_address_space *vm,
194 			 struct i915_vm_pt_stash *stash,
195 			 struct i915_vma *vma,
196 			 enum i915_cache_level cache_level,
197 			 u32 flags)
198 {
199 	struct drm_i915_gem_object *obj = vma->obj;
200 	u32 pte_flags;
201 
202 	/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
203 	pte_flags = 0;
204 	if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj))
205 		pte_flags |= PTE_READ_ONLY;
206 	if (i915_gem_object_is_lmem(obj))
207 		pte_flags |= PTE_LM;
208 
209 	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
210 
211 	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
212 
213 	/*
214 	 * Without aliasing PPGTT there's no difference between
215 	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
216 	 * upgrade to both bound if we bind either to avoid double-binding.
217 	 */
218 	atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
219 }
220 
221 static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
222 {
223 	vm->clear_range(vm, vma->node.start, vma->size);
224 }
225 
226 static void dpt_cleanup(struct i915_address_space *vm)
227 {
228 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
229 
230 	i915_gem_object_put(dpt->obj);
231 }
232 
233 static struct i915_address_space *
234 intel_dpt_create(struct intel_framebuffer *fb)
235 {
236 	struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base;
237 	struct drm_i915_private *i915 = to_i915(obj->dev);
238 	struct drm_i915_gem_object *dpt_obj;
239 	struct i915_address_space *vm;
240 	struct i915_dpt *dpt;
241 	size_t size;
242 	int ret;
243 
244 	if (intel_fb_needs_pot_stride_remap(fb))
245 		size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
246 	else
247 		size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
248 
249 	size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
250 
251 	if (HAS_LMEM(i915))
252 		dpt_obj = i915_gem_object_create_lmem(i915, size, 0);
253 	else
254 		dpt_obj = i915_gem_object_create_stolen(i915, size);
255 	if (IS_ERR(dpt_obj))
256 		return ERR_CAST(dpt_obj);
257 
258 	ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
259 	if (ret) {
260 		i915_gem_object_put(dpt_obj);
261 		return ERR_PTR(ret);
262 	}
263 
264 	dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
265 	if (!dpt) {
266 		i915_gem_object_put(dpt_obj);
267 		return ERR_PTR(-ENOMEM);
268 	}
269 
270 	vm = &dpt->vm;
271 
272 	vm->gt = &i915->gt;
273 	vm->i915 = i915;
274 	vm->dma = i915->drm.dev;
275 	vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
276 	vm->is_dpt = true;
277 
278 	i915_address_space_init(vm, VM_CLASS_DPT);
279 
280 	vm->insert_page = dpt_insert_page;
281 	vm->clear_range = dpt_clear_range;
282 	vm->insert_entries = dpt_insert_entries;
283 	vm->cleanup = dpt_cleanup;
284 
285 	vm->vma_ops.bind_vma    = dpt_bind_vma;
286 	vm->vma_ops.unbind_vma  = dpt_unbind_vma;
287 	vm->vma_ops.set_pages   = ggtt_set_pages;
288 	vm->vma_ops.clear_pages = clear_pages;
289 
290 	vm->pte_encode = gen8_ggtt_pte_encode;
291 
292 	dpt->obj = dpt_obj;
293 
294 	return &dpt->vm;
295 }
296 
297 static void intel_dpt_destroy(struct i915_address_space *vm)
298 {
299 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
300 
301 	i915_vm_close(&dpt->vm);
302 }
303 
304 /* returns HPLL frequency in kHz */
305 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
306 {
307 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
308 
309 	/* Obtain SKU information */
310 	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
311 		CCK_FUSE_HPLL_FREQ_MASK;
312 
313 	return vco_freq[hpll_freq] * 1000;
314 }
315 
316 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
317 		      const char *name, u32 reg, int ref_freq)
318 {
319 	u32 val;
320 	int divider;
321 
322 	val = vlv_cck_read(dev_priv, reg);
323 	divider = val & CCK_FREQUENCY_VALUES;
324 
325 	drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
326 		 (divider << CCK_FREQUENCY_STATUS_SHIFT),
327 		 "%s change in progress\n", name);
328 
329 	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
330 }
331 
332 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
333 			   const char *name, u32 reg)
334 {
335 	int hpll;
336 
337 	vlv_cck_get(dev_priv);
338 
339 	if (dev_priv->hpll_freq == 0)
340 		dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
341 
342 	hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
343 
344 	vlv_cck_put(dev_priv);
345 
346 	return hpll;
347 }
348 
349 static void intel_update_czclk(struct drm_i915_private *dev_priv)
350 {
351 	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
352 		return;
353 
354 	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
355 						      CCK_CZ_CLOCK_CONTROL);
356 
357 	drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
358 		dev_priv->czclk_freq);
359 }
360 
361 /* WA Display #0827: Gen9:all */
362 static void
363 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
364 {
365 	if (enable)
366 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
367 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
368 	else
369 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
370 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
371 }
372 
373 /* Wa_2006604312:icl,ehl */
374 static void
375 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
376 		       bool enable)
377 {
378 	if (enable)
379 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
380 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
381 	else
382 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
383 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
384 }
385 
386 static bool
387 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
388 {
389 	return crtc_state->master_transcoder != INVALID_TRANSCODER;
390 }
391 
392 static bool
393 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
394 {
395 	return crtc_state->sync_mode_slaves_mask != 0;
396 }
397 
398 bool
399 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
400 {
401 	return is_trans_port_sync_master(crtc_state) ||
402 		is_trans_port_sync_slave(crtc_state);
403 }
404 
405 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
406 				    enum pipe pipe)
407 {
408 	i915_reg_t reg = PIPEDSL(pipe);
409 	u32 line1, line2;
410 	u32 line_mask;
411 
412 	if (DISPLAY_VER(dev_priv) == 2)
413 		line_mask = DSL_LINEMASK_GEN2;
414 	else
415 		line_mask = DSL_LINEMASK_GEN3;
416 
417 	line1 = intel_de_read(dev_priv, reg) & line_mask;
418 	msleep(5);
419 	line2 = intel_de_read(dev_priv, reg) & line_mask;
420 
421 	return line1 != line2;
422 }
423 
424 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
425 {
426 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
427 	enum pipe pipe = crtc->pipe;
428 
429 	/* Wait for the display line to settle/start moving */
430 	if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
431 		drm_err(&dev_priv->drm,
432 			"pipe %c scanline %s wait timed out\n",
433 			pipe_name(pipe), onoff(state));
434 }
435 
436 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
437 {
438 	wait_for_pipe_scanline_moving(crtc, false);
439 }
440 
441 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
442 {
443 	wait_for_pipe_scanline_moving(crtc, true);
444 }
445 
446 static void
447 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
448 {
449 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
450 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
451 
452 	if (DISPLAY_VER(dev_priv) >= 4) {
453 		enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
454 		i915_reg_t reg = PIPECONF(cpu_transcoder);
455 
456 		/* Wait for the Pipe State to go off */
457 		if (intel_de_wait_for_clear(dev_priv, reg,
458 					    I965_PIPECONF_ACTIVE, 100))
459 			drm_WARN(&dev_priv->drm, 1,
460 				 "pipe_off wait timed out\n");
461 	} else {
462 		intel_wait_for_pipe_scanline_stopped(crtc);
463 	}
464 }
465 
466 /* Only for pre-ILK configs */
467 void assert_pll(struct drm_i915_private *dev_priv,
468 		enum pipe pipe, bool state)
469 {
470 	u32 val;
471 	bool cur_state;
472 
473 	val = intel_de_read(dev_priv, DPLL(pipe));
474 	cur_state = !!(val & DPLL_VCO_ENABLE);
475 	I915_STATE_WARN(cur_state != state,
476 	     "PLL state assertion failure (expected %s, current %s)\n",
477 			onoff(state), onoff(cur_state));
478 }
479 
480 /* XXX: the dsi pll is shared between MIPI DSI ports */
481 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
482 {
483 	u32 val;
484 	bool cur_state;
485 
486 	vlv_cck_get(dev_priv);
487 	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
488 	vlv_cck_put(dev_priv);
489 
490 	cur_state = val & DSI_PLL_VCO_EN;
491 	I915_STATE_WARN(cur_state != state,
492 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
493 			onoff(state), onoff(cur_state));
494 }
495 
496 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
497 			  enum pipe pipe, bool state)
498 {
499 	bool cur_state;
500 
501 	if (HAS_DDI(dev_priv)) {
502 		/*
503 		 * DDI does not have a specific FDI_TX register.
504 		 *
505 		 * FDI is never fed from EDP transcoder
506 		 * so pipe->transcoder cast is fine here.
507 		 */
508 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
509 		u32 val = intel_de_read(dev_priv,
510 					TRANS_DDI_FUNC_CTL(cpu_transcoder));
511 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
512 	} else {
513 		u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
514 		cur_state = !!(val & FDI_TX_ENABLE);
515 	}
516 	I915_STATE_WARN(cur_state != state,
517 	     "FDI TX state assertion failure (expected %s, current %s)\n",
518 			onoff(state), onoff(cur_state));
519 }
520 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
521 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
522 
523 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
524 			  enum pipe pipe, bool state)
525 {
526 	u32 val;
527 	bool cur_state;
528 
529 	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
530 	cur_state = !!(val & FDI_RX_ENABLE);
531 	I915_STATE_WARN(cur_state != state,
532 	     "FDI RX state assertion failure (expected %s, current %s)\n",
533 			onoff(state), onoff(cur_state));
534 }
535 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
536 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
537 
538 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
539 				      enum pipe pipe)
540 {
541 	u32 val;
542 
543 	/* ILK FDI PLL is always enabled */
544 	if (IS_IRONLAKE(dev_priv))
545 		return;
546 
547 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
548 	if (HAS_DDI(dev_priv))
549 		return;
550 
551 	val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
552 	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
553 }
554 
555 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
556 		       enum pipe pipe, bool state)
557 {
558 	u32 val;
559 	bool cur_state;
560 
561 	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
562 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
563 	I915_STATE_WARN(cur_state != state,
564 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
565 			onoff(state), onoff(cur_state));
566 }
567 
568 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
569 {
570 	i915_reg_t pp_reg;
571 	u32 val;
572 	enum pipe panel_pipe = INVALID_PIPE;
573 	bool locked = true;
574 
575 	if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
576 		return;
577 
578 	if (HAS_PCH_SPLIT(dev_priv)) {
579 		u32 port_sel;
580 
581 		pp_reg = PP_CONTROL(0);
582 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
583 
584 		switch (port_sel) {
585 		case PANEL_PORT_SELECT_LVDS:
586 			intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
587 			break;
588 		case PANEL_PORT_SELECT_DPA:
589 			g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
590 			break;
591 		case PANEL_PORT_SELECT_DPC:
592 			g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
593 			break;
594 		case PANEL_PORT_SELECT_DPD:
595 			g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
596 			break;
597 		default:
598 			MISSING_CASE(port_sel);
599 			break;
600 		}
601 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
602 		/* presumably write lock depends on pipe, not port select */
603 		pp_reg = PP_CONTROL(pipe);
604 		panel_pipe = pipe;
605 	} else {
606 		u32 port_sel;
607 
608 		pp_reg = PP_CONTROL(0);
609 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
610 
611 		drm_WARN_ON(&dev_priv->drm,
612 			    port_sel != PANEL_PORT_SELECT_LVDS);
613 		intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
614 	}
615 
616 	val = intel_de_read(dev_priv, pp_reg);
617 	if (!(val & PANEL_POWER_ON) ||
618 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
619 		locked = false;
620 
621 	I915_STATE_WARN(panel_pipe == pipe && locked,
622 	     "panel assertion failure, pipe %c regs locked\n",
623 	     pipe_name(pipe));
624 }
625 
626 void assert_pipe(struct drm_i915_private *dev_priv,
627 		 enum transcoder cpu_transcoder, bool state)
628 {
629 	bool cur_state;
630 	enum intel_display_power_domain power_domain;
631 	intel_wakeref_t wakeref;
632 
633 	/* we keep both pipes enabled on 830 */
634 	if (IS_I830(dev_priv))
635 		state = true;
636 
637 	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
638 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
639 	if (wakeref) {
640 		u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
641 		cur_state = !!(val & PIPECONF_ENABLE);
642 
643 		intel_display_power_put(dev_priv, power_domain, wakeref);
644 	} else {
645 		cur_state = false;
646 	}
647 
648 	I915_STATE_WARN(cur_state != state,
649 			"transcoder %s assertion failure (expected %s, current %s)\n",
650 			transcoder_name(cpu_transcoder),
651 			onoff(state), onoff(cur_state));
652 }
653 
654 static void assert_plane(struct intel_plane *plane, bool state)
655 {
656 	enum pipe pipe;
657 	bool cur_state;
658 
659 	cur_state = plane->get_hw_state(plane, &pipe);
660 
661 	I915_STATE_WARN(cur_state != state,
662 			"%s assertion failure (expected %s, current %s)\n",
663 			plane->base.name, onoff(state), onoff(cur_state));
664 }
665 
666 #define assert_plane_enabled(p) assert_plane(p, true)
667 #define assert_plane_disabled(p) assert_plane(p, false)
668 
669 static void assert_planes_disabled(struct intel_crtc *crtc)
670 {
671 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
672 	struct intel_plane *plane;
673 
674 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
675 		assert_plane_disabled(plane);
676 }
677 
678 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
679 				    enum pipe pipe)
680 {
681 	u32 val;
682 	bool enabled;
683 
684 	val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
685 	enabled = !!(val & TRANS_ENABLE);
686 	I915_STATE_WARN(enabled,
687 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
688 	     pipe_name(pipe));
689 }
690 
691 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
692 				   enum pipe pipe, enum port port,
693 				   i915_reg_t dp_reg)
694 {
695 	enum pipe port_pipe;
696 	bool state;
697 
698 	state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
699 
700 	I915_STATE_WARN(state && port_pipe == pipe,
701 			"PCH DP %c enabled on transcoder %c, should be disabled\n",
702 			port_name(port), pipe_name(pipe));
703 
704 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
705 			"IBX PCH DP %c still using transcoder B\n",
706 			port_name(port));
707 }
708 
709 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
710 				     enum pipe pipe, enum port port,
711 				     i915_reg_t hdmi_reg)
712 {
713 	enum pipe port_pipe;
714 	bool state;
715 
716 	state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
717 
718 	I915_STATE_WARN(state && port_pipe == pipe,
719 			"PCH HDMI %c enabled on transcoder %c, should be disabled\n",
720 			port_name(port), pipe_name(pipe));
721 
722 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
723 			"IBX PCH HDMI %c still using transcoder B\n",
724 			port_name(port));
725 }
726 
727 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
728 				      enum pipe pipe)
729 {
730 	enum pipe port_pipe;
731 
732 	assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
733 	assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
734 	assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
735 
736 	I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
737 			port_pipe == pipe,
738 			"PCH VGA enabled on transcoder %c, should be disabled\n",
739 			pipe_name(pipe));
740 
741 	I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
742 			port_pipe == pipe,
743 			"PCH LVDS enabled on transcoder %c, should be disabled\n",
744 			pipe_name(pipe));
745 
746 	/* PCH SDVOB multiplex with HDMIB */
747 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
748 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
749 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
750 }
751 
752 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
753 			 struct intel_digital_port *dig_port,
754 			 unsigned int expected_mask)
755 {
756 	u32 port_mask;
757 	i915_reg_t dpll_reg;
758 
759 	switch (dig_port->base.port) {
760 	case PORT_B:
761 		port_mask = DPLL_PORTB_READY_MASK;
762 		dpll_reg = DPLL(0);
763 		break;
764 	case PORT_C:
765 		port_mask = DPLL_PORTC_READY_MASK;
766 		dpll_reg = DPLL(0);
767 		expected_mask <<= 4;
768 		break;
769 	case PORT_D:
770 		port_mask = DPLL_PORTD_READY_MASK;
771 		dpll_reg = DPIO_PHY_STATUS;
772 		break;
773 	default:
774 		BUG();
775 	}
776 
777 	if (intel_de_wait_for_register(dev_priv, dpll_reg,
778 				       port_mask, expected_mask, 1000))
779 		drm_WARN(&dev_priv->drm, 1,
780 			 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
781 			 dig_port->base.base.base.id, dig_port->base.base.name,
782 			 intel_de_read(dev_priv, dpll_reg) & port_mask,
783 			 expected_mask);
784 }
785 
786 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
787 {
788 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
789 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
790 	enum pipe pipe = crtc->pipe;
791 	i915_reg_t reg;
792 	u32 val, pipeconf_val;
793 
794 	/* Make sure PCH DPLL is enabled */
795 	assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
796 
797 	/* FDI must be feeding us bits for PCH ports */
798 	assert_fdi_tx_enabled(dev_priv, pipe);
799 	assert_fdi_rx_enabled(dev_priv, pipe);
800 
801 	if (HAS_PCH_CPT(dev_priv)) {
802 		reg = TRANS_CHICKEN2(pipe);
803 		val = intel_de_read(dev_priv, reg);
804 		/*
805 		 * Workaround: Set the timing override bit
806 		 * before enabling the pch transcoder.
807 		 */
808 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
809 		/* Configure frame start delay to match the CPU */
810 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
811 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
812 		intel_de_write(dev_priv, reg, val);
813 	}
814 
815 	reg = PCH_TRANSCONF(pipe);
816 	val = intel_de_read(dev_priv, reg);
817 	pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
818 
819 	if (HAS_PCH_IBX(dev_priv)) {
820 		/* Configure frame start delay to match the CPU */
821 		val &= ~TRANS_FRAME_START_DELAY_MASK;
822 		val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
823 
824 		/*
825 		 * Make the BPC in transcoder be consistent with
826 		 * that in pipeconf reg. For HDMI we must use 8bpc
827 		 * here for both 8bpc and 12bpc.
828 		 */
829 		val &= ~PIPECONF_BPC_MASK;
830 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
831 			val |= PIPECONF_8BPC;
832 		else
833 			val |= pipeconf_val & PIPECONF_BPC_MASK;
834 	}
835 
836 	val &= ~TRANS_INTERLACE_MASK;
837 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
838 		if (HAS_PCH_IBX(dev_priv) &&
839 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
840 			val |= TRANS_LEGACY_INTERLACED_ILK;
841 		else
842 			val |= TRANS_INTERLACED;
843 	} else {
844 		val |= TRANS_PROGRESSIVE;
845 	}
846 
847 	intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
848 	if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
849 		drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
850 			pipe_name(pipe));
851 }
852 
853 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
854 				      enum transcoder cpu_transcoder)
855 {
856 	u32 val, pipeconf_val;
857 
858 	/* FDI must be feeding us bits for PCH ports */
859 	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
860 	assert_fdi_rx_enabled(dev_priv, PIPE_A);
861 
862 	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
863 	/* Workaround: set timing override bit. */
864 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
865 	/* Configure frame start delay to match the CPU */
866 	val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
867 	val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
868 	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
869 
870 	val = TRANS_ENABLE;
871 	pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
872 
873 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
874 	    PIPECONF_INTERLACED_ILK)
875 		val |= TRANS_INTERLACED;
876 	else
877 		val |= TRANS_PROGRESSIVE;
878 
879 	intel_de_write(dev_priv, LPT_TRANSCONF, val);
880 	if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
881 				  TRANS_STATE_ENABLE, 100))
882 		drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
883 }
884 
885 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
886 				       enum pipe pipe)
887 {
888 	i915_reg_t reg;
889 	u32 val;
890 
891 	/* FDI relies on the transcoder */
892 	assert_fdi_tx_disabled(dev_priv, pipe);
893 	assert_fdi_rx_disabled(dev_priv, pipe);
894 
895 	/* Ports must be off as well */
896 	assert_pch_ports_disabled(dev_priv, pipe);
897 
898 	reg = PCH_TRANSCONF(pipe);
899 	val = intel_de_read(dev_priv, reg);
900 	val &= ~TRANS_ENABLE;
901 	intel_de_write(dev_priv, reg, val);
902 	/* wait for PCH transcoder off, transcoder state */
903 	if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
904 		drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
905 			pipe_name(pipe));
906 
907 	if (HAS_PCH_CPT(dev_priv)) {
908 		/* Workaround: Clear the timing override chicken bit again. */
909 		reg = TRANS_CHICKEN2(pipe);
910 		val = intel_de_read(dev_priv, reg);
911 		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
912 		intel_de_write(dev_priv, reg, val);
913 	}
914 }
915 
916 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
917 {
918 	u32 val;
919 
920 	val = intel_de_read(dev_priv, LPT_TRANSCONF);
921 	val &= ~TRANS_ENABLE;
922 	intel_de_write(dev_priv, LPT_TRANSCONF, val);
923 	/* wait for PCH transcoder off, transcoder state */
924 	if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
925 				    TRANS_STATE_ENABLE, 50))
926 		drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
927 
928 	/* Workaround: clear timing override bit. */
929 	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
930 	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
931 	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
932 }
933 
934 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
935 {
936 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
937 
938 	if (HAS_PCH_LPT(dev_priv))
939 		return PIPE_A;
940 	else
941 		return crtc->pipe;
942 }
943 
944 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
945 {
946 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
947 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
948 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
949 	enum pipe pipe = crtc->pipe;
950 	i915_reg_t reg;
951 	u32 val;
952 
953 	drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
954 
955 	assert_planes_disabled(crtc);
956 
957 	/*
958 	 * A pipe without a PLL won't actually be able to drive bits from
959 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
960 	 * need the check.
961 	 */
962 	if (HAS_GMCH(dev_priv)) {
963 		if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
964 			assert_dsi_pll_enabled(dev_priv);
965 		else
966 			assert_pll_enabled(dev_priv, pipe);
967 	} else {
968 		if (new_crtc_state->has_pch_encoder) {
969 			/* if driving the PCH, we need FDI enabled */
970 			assert_fdi_rx_pll_enabled(dev_priv,
971 						  intel_crtc_pch_transcoder(crtc));
972 			assert_fdi_tx_pll_enabled(dev_priv,
973 						  (enum pipe) cpu_transcoder);
974 		}
975 		/* FIXME: assert CPU port conditions for SNB+ */
976 	}
977 
978 	reg = PIPECONF(cpu_transcoder);
979 	val = intel_de_read(dev_priv, reg);
980 	if (val & PIPECONF_ENABLE) {
981 		/* we keep both pipes enabled on 830 */
982 		drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
983 		return;
984 	}
985 
986 	intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
987 	intel_de_posting_read(dev_priv, reg);
988 
989 	/*
990 	 * Until the pipe starts PIPEDSL reads will return a stale value,
991 	 * which causes an apparent vblank timestamp jump when PIPEDSL
992 	 * resets to its proper value. That also messes up the frame count
993 	 * when it's derived from the timestamps. So let's wait for the
994 	 * pipe to start properly before we call drm_crtc_vblank_on()
995 	 */
996 	if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
997 		intel_wait_for_pipe_scanline_moving(crtc);
998 }
999 
1000 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1001 {
1002 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1003 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1004 	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1005 	enum pipe pipe = crtc->pipe;
1006 	i915_reg_t reg;
1007 	u32 val;
1008 
1009 	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
1010 
1011 	/*
1012 	 * Make sure planes won't keep trying to pump pixels to us,
1013 	 * or we might hang the display.
1014 	 */
1015 	assert_planes_disabled(crtc);
1016 
1017 	reg = PIPECONF(cpu_transcoder);
1018 	val = intel_de_read(dev_priv, reg);
1019 	if ((val & PIPECONF_ENABLE) == 0)
1020 		return;
1021 
1022 	/*
1023 	 * Double wide has implications for planes
1024 	 * so best keep it disabled when not needed.
1025 	 */
1026 	if (old_crtc_state->double_wide)
1027 		val &= ~PIPECONF_DOUBLE_WIDE;
1028 
1029 	/* Don't disable pipe or pipe PLLs if needed */
1030 	if (!IS_I830(dev_priv))
1031 		val &= ~PIPECONF_ENABLE;
1032 
1033 	intel_de_write(dev_priv, reg, val);
1034 	if ((val & PIPECONF_ENABLE) == 0)
1035 		intel_wait_for_pipe_off(old_crtc_state);
1036 }
1037 
1038 bool
1039 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
1040 				    u64 modifier)
1041 {
1042 	return info->is_yuv &&
1043 	       info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
1044 }
1045 
1046 unsigned int
1047 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1048 {
1049 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
1050 	unsigned int cpp = fb->format->cpp[color_plane];
1051 
1052 	switch (fb->modifier) {
1053 	case DRM_FORMAT_MOD_LINEAR:
1054 		return intel_tile_size(dev_priv);
1055 	case I915_FORMAT_MOD_X_TILED:
1056 		if (DISPLAY_VER(dev_priv) == 2)
1057 			return 128;
1058 		else
1059 			return 512;
1060 	case I915_FORMAT_MOD_Y_TILED_CCS:
1061 		if (is_ccs_plane(fb, color_plane))
1062 			return 128;
1063 		fallthrough;
1064 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1065 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1066 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1067 		if (is_ccs_plane(fb, color_plane))
1068 			return 64;
1069 		fallthrough;
1070 	case I915_FORMAT_MOD_Y_TILED:
1071 		if (DISPLAY_VER(dev_priv) == 2 || HAS_128_BYTE_Y_TILING(dev_priv))
1072 			return 128;
1073 		else
1074 			return 512;
1075 	case I915_FORMAT_MOD_Yf_TILED_CCS:
1076 		if (is_ccs_plane(fb, color_plane))
1077 			return 128;
1078 		fallthrough;
1079 	case I915_FORMAT_MOD_Yf_TILED:
1080 		switch (cpp) {
1081 		case 1:
1082 			return 64;
1083 		case 2:
1084 		case 4:
1085 			return 128;
1086 		case 8:
1087 		case 16:
1088 			return 256;
1089 		default:
1090 			MISSING_CASE(cpp);
1091 			return cpp;
1092 		}
1093 		break;
1094 	default:
1095 		MISSING_CASE(fb->modifier);
1096 		return cpp;
1097 	}
1098 }
1099 
1100 unsigned int
1101 intel_fb_align_height(const struct drm_framebuffer *fb,
1102 		      int color_plane, unsigned int height)
1103 {
1104 	unsigned int tile_height = intel_tile_height(fb, color_plane);
1105 
1106 	return ALIGN(height, tile_height);
1107 }
1108 
1109 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1110 {
1111 	unsigned int size = 0;
1112 	int i;
1113 
1114 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1115 		size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
1116 
1117 	return size;
1118 }
1119 
1120 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
1121 {
1122 	unsigned int size = 0;
1123 	int i;
1124 
1125 	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
1126 		size += rem_info->plane[i].dst_stride * rem_info->plane[i].height;
1127 
1128 	return size;
1129 }
1130 
1131 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
1132 {
1133 	if (DISPLAY_VER(dev_priv) >= 9)
1134 		return 256 * 1024;
1135 	else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
1136 		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1137 		return 128 * 1024;
1138 	else if (DISPLAY_VER(dev_priv) >= 4)
1139 		return 4 * 1024;
1140 	else
1141 		return 0;
1142 }
1143 
1144 static bool has_async_flips(struct drm_i915_private *i915)
1145 {
1146 	return DISPLAY_VER(i915) >= 5;
1147 }
1148 
1149 unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
1150 				  int color_plane)
1151 {
1152 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
1153 
1154 	if (intel_fb_uses_dpt(fb))
1155 		return 512 * 4096;
1156 
1157 	/* AUX_DIST needs only 4K alignment */
1158 	if (is_ccs_plane(fb, color_plane))
1159 		return 4096;
1160 
1161 	if (is_semiplanar_uv_plane(fb, color_plane)) {
1162 		/*
1163 		 * TODO: cross-check wrt. the bspec stride in bytes * 64 bytes
1164 		 * alignment for linear UV planes on all platforms.
1165 		 */
1166 		if (DISPLAY_VER(dev_priv) >= 12) {
1167 			if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
1168 				return intel_linear_alignment(dev_priv);
1169 
1170 			return intel_tile_row_size(fb, color_plane);
1171 		}
1172 
1173 		return 4096;
1174 	}
1175 
1176 	drm_WARN_ON(&dev_priv->drm, color_plane != 0);
1177 
1178 	switch (fb->modifier) {
1179 	case DRM_FORMAT_MOD_LINEAR:
1180 		return intel_linear_alignment(dev_priv);
1181 	case I915_FORMAT_MOD_X_TILED:
1182 		if (has_async_flips(dev_priv))
1183 			return 256 * 1024;
1184 		return 0;
1185 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1186 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1187 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1188 		return 16 * 1024;
1189 	case I915_FORMAT_MOD_Y_TILED_CCS:
1190 	case I915_FORMAT_MOD_Yf_TILED_CCS:
1191 	case I915_FORMAT_MOD_Y_TILED:
1192 	case I915_FORMAT_MOD_Yf_TILED:
1193 		return 1 * 1024 * 1024;
1194 	default:
1195 		MISSING_CASE(fb->modifier);
1196 		return 0;
1197 	}
1198 }
1199 
1200 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
1201 {
1202 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1203 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1204 
1205 	return DISPLAY_VER(dev_priv) < 4 ||
1206 		(plane->has_fbc &&
1207 		 plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
1208 }
1209 
1210 static struct i915_vma *
1211 intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
1212 		     const struct i915_ggtt_view *view,
1213 		     bool uses_fence,
1214 		     unsigned long *out_flags,
1215 		     struct i915_address_space *vm)
1216 {
1217 	struct drm_device *dev = fb->dev;
1218 	struct drm_i915_private *dev_priv = to_i915(dev);
1219 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1220 	struct i915_vma *vma;
1221 	u32 alignment;
1222 	int ret;
1223 
1224 	if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
1225 		return ERR_PTR(-EINVAL);
1226 
1227 	alignment = 4096 * 512;
1228 
1229 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1230 
1231 	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
1232 	if (ret) {
1233 		vma = ERR_PTR(ret);
1234 		goto err;
1235 	}
1236 
1237 	vma = i915_vma_instance(obj, vm, view);
1238 	if (IS_ERR(vma))
1239 		goto err;
1240 
1241 	if (i915_vma_misplaced(vma, 0, alignment, 0)) {
1242 		ret = i915_vma_unbind(vma);
1243 		if (ret) {
1244 			vma = ERR_PTR(ret);
1245 			goto err;
1246 		}
1247 	}
1248 
1249 	ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
1250 	if (ret) {
1251 		vma = ERR_PTR(ret);
1252 		goto err;
1253 	}
1254 
1255 	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
1256 
1257 	i915_gem_object_flush_if_display(obj);
1258 
1259 	i915_vma_get(vma);
1260 err:
1261 	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1262 
1263 	return vma;
1264 }
1265 
1266 struct i915_vma *
1267 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1268 			   bool phys_cursor,
1269 			   const struct i915_ggtt_view *view,
1270 			   bool uses_fence,
1271 			   unsigned long *out_flags)
1272 {
1273 	struct drm_device *dev = fb->dev;
1274 	struct drm_i915_private *dev_priv = to_i915(dev);
1275 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1276 	intel_wakeref_t wakeref;
1277 	struct i915_gem_ww_ctx ww;
1278 	struct i915_vma *vma;
1279 	unsigned int pinctl;
1280 	u32 alignment;
1281 	int ret;
1282 
1283 	if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
1284 		return ERR_PTR(-EINVAL);
1285 
1286 	if (phys_cursor)
1287 		alignment = intel_cursor_alignment(dev_priv);
1288 	else
1289 		alignment = intel_surf_alignment(fb, 0);
1290 	if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
1291 		return ERR_PTR(-EINVAL);
1292 
1293 	/* Note that the w/a also requires 64 PTE of padding following the
1294 	 * bo. We currently fill all unused PTE with the shadow page and so
1295 	 * we should always have valid PTE following the scanout preventing
1296 	 * the VT-d warning.
1297 	 */
1298 	if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
1299 		alignment = 256 * 1024;
1300 
1301 	/*
1302 	 * Global gtt pte registers are special registers which actually forward
1303 	 * writes to a chunk of system memory. Which means that there is no risk
1304 	 * that the register values disappear as soon as we call
1305 	 * intel_runtime_pm_put(), so it is correct to wrap only the
1306 	 * pin/unpin/fence and not more.
1307 	 */
1308 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1309 
1310 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1311 
1312 	/*
1313 	 * Valleyview is definitely limited to scanning out the first
1314 	 * 512MiB. Lets presume this behaviour was inherited from the
1315 	 * g4x display engine and that all earlier gen are similarly
1316 	 * limited. Testing suggests that it is a little more
1317 	 * complicated than this. For example, Cherryview appears quite
1318 	 * happy to scanout from anywhere within its global aperture.
1319 	 */
1320 	pinctl = 0;
1321 	if (HAS_GMCH(dev_priv))
1322 		pinctl |= PIN_MAPPABLE;
1323 
1324 	i915_gem_ww_ctx_init(&ww, true);
1325 retry:
1326 	ret = i915_gem_object_lock(obj, &ww);
1327 	if (!ret && phys_cursor)
1328 		ret = i915_gem_object_attach_phys(obj, alignment);
1329 	if (!ret)
1330 		ret = i915_gem_object_pin_pages(obj);
1331 	if (ret)
1332 		goto err;
1333 
1334 	if (!ret) {
1335 		vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
1336 							   view, pinctl);
1337 		if (IS_ERR(vma)) {
1338 			ret = PTR_ERR(vma);
1339 			goto err_unpin;
1340 		}
1341 	}
1342 
1343 	if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
1344 		/*
1345 		 * Install a fence for tiled scan-out. Pre-i965 always needs a
1346 		 * fence, whereas 965+ only requires a fence if using
1347 		 * framebuffer compression.  For simplicity, we always, when
1348 		 * possible, install a fence as the cost is not that onerous.
1349 		 *
1350 		 * If we fail to fence the tiled scanout, then either the
1351 		 * modeset will reject the change (which is highly unlikely as
1352 		 * the affected systems, all but one, do not have unmappable
1353 		 * space) or we will not be able to enable full powersaving
1354 		 * techniques (also likely not to apply due to various limits
1355 		 * FBC and the like impose on the size of the buffer, which
1356 		 * presumably we violated anyway with this unmappable buffer).
1357 		 * Anyway, it is presumably better to stumble onwards with
1358 		 * something and try to run the system in a "less than optimal"
1359 		 * mode that matches the user configuration.
1360 		 */
1361 		ret = i915_vma_pin_fence(vma);
1362 		if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
1363 			i915_vma_unpin(vma);
1364 			goto err_unpin;
1365 		}
1366 		ret = 0;
1367 
1368 		if (vma->fence)
1369 			*out_flags |= PLANE_HAS_FENCE;
1370 	}
1371 
1372 	i915_vma_get(vma);
1373 
1374 err_unpin:
1375 	i915_gem_object_unpin_pages(obj);
1376 err:
1377 	if (ret == -EDEADLK) {
1378 		ret = i915_gem_ww_ctx_backoff(&ww);
1379 		if (!ret)
1380 			goto retry;
1381 	}
1382 	i915_gem_ww_ctx_fini(&ww);
1383 	if (ret)
1384 		vma = ERR_PTR(ret);
1385 
1386 	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1387 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1388 	return vma;
1389 }
1390 
1391 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
1392 {
1393 	if (flags & PLANE_HAS_FENCE)
1394 		i915_vma_unpin_fence(vma);
1395 	i915_vma_unpin(vma);
1396 	i915_vma_put(vma);
1397 }
1398 
1399 /*
1400  * Convert the x/y offsets into a linear offset.
1401  * Only valid with 0/180 degree rotation, which is fine since linear
1402  * offset is only used with linear buffers on pre-hsw and tiled buffers
1403  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
1404  */
1405 u32 intel_fb_xy_to_linear(int x, int y,
1406 			  const struct intel_plane_state *state,
1407 			  int color_plane)
1408 {
1409 	const struct drm_framebuffer *fb = state->hw.fb;
1410 	unsigned int cpp = fb->format->cpp[color_plane];
1411 	unsigned int pitch = state->view.color_plane[color_plane].stride;
1412 
1413 	return y * pitch + x * cpp;
1414 }
1415 
1416 /*
1417  * Add the x/y offsets derived from fb->offsets[] to the user
1418  * specified plane src x/y offsets. The resulting x/y offsets
1419  * specify the start of scanout from the beginning of the gtt mapping.
1420  */
1421 void intel_add_fb_offsets(int *x, int *y,
1422 			  const struct intel_plane_state *state,
1423 			  int color_plane)
1424 
1425 {
1426 	*x += state->view.color_plane[color_plane].x;
1427 	*y += state->view.color_plane[color_plane].y;
1428 }
1429 
1430 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
1431 {
1432 	switch (fb_modifier) {
1433 	case I915_FORMAT_MOD_X_TILED:
1434 		return I915_TILING_X;
1435 	case I915_FORMAT_MOD_Y_TILED:
1436 	case I915_FORMAT_MOD_Y_TILED_CCS:
1437 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1438 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1439 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1440 		return I915_TILING_Y;
1441 	default:
1442 		return I915_TILING_NONE;
1443 	}
1444 }
1445 
1446 /*
1447  * From the Sky Lake PRM:
1448  * "The Color Control Surface (CCS) contains the compression status of
1449  *  the cache-line pairs. The compression state of the cache-line pair
1450  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
1451  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
1452  *  cache-line-pairs. CCS is always Y tiled."
1453  *
1454  * Since cache line pairs refers to horizontally adjacent cache lines,
1455  * each cache line in the CCS corresponds to an area of 32x16 cache
1456  * lines on the main surface. Since each pixel is 4 bytes, this gives
1457  * us a ratio of one byte in the CCS for each 8x16 pixels in the
1458  * main surface.
1459  */
1460 static const struct drm_format_info skl_ccs_formats[] = {
1461 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1462 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1463 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1464 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1465 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1466 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1467 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1468 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1469 };
1470 
1471 /*
1472  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
1473  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
1474  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
1475  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
1476  * the main surface.
1477  */
1478 static const struct drm_format_info gen12_ccs_formats[] = {
1479 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1480 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1481 	  .hsub = 1, .vsub = 1, },
1482 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1483 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1484 	  .hsub = 1, .vsub = 1, },
1485 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1486 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1487 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1488 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1489 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1490 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1491 	{ .format = DRM_FORMAT_YUYV, .num_planes = 2,
1492 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1493 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1494 	{ .format = DRM_FORMAT_YVYU, .num_planes = 2,
1495 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1496 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1497 	{ .format = DRM_FORMAT_UYVY, .num_planes = 2,
1498 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1499 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1500 	{ .format = DRM_FORMAT_VYUY, .num_planes = 2,
1501 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1502 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1503 	{ .format = DRM_FORMAT_XYUV8888, .num_planes = 2,
1504 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1505 	  .hsub = 1, .vsub = 1, .is_yuv = true },
1506 	{ .format = DRM_FORMAT_NV12, .num_planes = 4,
1507 	  .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
1508 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1509 	{ .format = DRM_FORMAT_P010, .num_planes = 4,
1510 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1511 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1512 	{ .format = DRM_FORMAT_P012, .num_planes = 4,
1513 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1514 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1515 	{ .format = DRM_FORMAT_P016, .num_planes = 4,
1516 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1517 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1518 };
1519 
1520 /*
1521  * Same as gen12_ccs_formats[] above, but with additional surface used
1522  * to pass Clear Color information in plane 2 with 64 bits of data.
1523  */
1524 static const struct drm_format_info gen12_ccs_cc_formats[] = {
1525 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
1526 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1527 	  .hsub = 1, .vsub = 1, },
1528 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
1529 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1530 	  .hsub = 1, .vsub = 1, },
1531 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
1532 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1533 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1534 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
1535 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1536 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1537 };
1538 
1539 static const struct drm_format_info *
1540 lookup_format_info(const struct drm_format_info formats[],
1541 		   int num_formats, u32 format)
1542 {
1543 	int i;
1544 
1545 	for (i = 0; i < num_formats; i++) {
1546 		if (formats[i].format == format)
1547 			return &formats[i];
1548 	}
1549 
1550 	return NULL;
1551 }
1552 
1553 static const struct drm_format_info *
1554 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
1555 {
1556 	switch (cmd->modifier[0]) {
1557 	case I915_FORMAT_MOD_Y_TILED_CCS:
1558 	case I915_FORMAT_MOD_Yf_TILED_CCS:
1559 		return lookup_format_info(skl_ccs_formats,
1560 					  ARRAY_SIZE(skl_ccs_formats),
1561 					  cmd->pixel_format);
1562 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1563 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1564 		return lookup_format_info(gen12_ccs_formats,
1565 					  ARRAY_SIZE(gen12_ccs_formats),
1566 					  cmd->pixel_format);
1567 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1568 		return lookup_format_info(gen12_ccs_cc_formats,
1569 					  ARRAY_SIZE(gen12_ccs_cc_formats),
1570 					  cmd->pixel_format);
1571 	default:
1572 		return NULL;
1573 	}
1574 }
1575 
1576 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
1577 {
1578 	return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)],
1579 			    512) * 64;
1580 }
1581 
1582 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
1583 			      u32 pixel_format, u64 modifier)
1584 {
1585 	struct intel_crtc *crtc;
1586 	struct intel_plane *plane;
1587 
1588 	if (!HAS_DISPLAY(dev_priv))
1589 		return 0;
1590 
1591 	/*
1592 	 * We assume the primary plane for pipe A has
1593 	 * the highest stride limits of them all,
1594 	 * if in case pipe A is disabled, use the first pipe from pipe_mask.
1595 	 */
1596 	crtc = intel_get_first_crtc(dev_priv);
1597 	if (!crtc)
1598 		return 0;
1599 
1600 	plane = to_intel_plane(crtc->base.primary);
1601 
1602 	return plane->max_stride(plane, pixel_format, modifier,
1603 				 DRM_MODE_ROTATE_0);
1604 }
1605 
1606 static
1607 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
1608 			u32 pixel_format, u64 modifier)
1609 {
1610 	/*
1611 	 * Arbitrary limit for gen4+ chosen to match the
1612 	 * render engine max stride.
1613 	 *
1614 	 * The new CCS hash mode makes remapping impossible
1615 	 */
1616 	if (DISPLAY_VER(dev_priv) < 4 || is_ccs_modifier(modifier) ||
1617 	    intel_modifier_uses_dpt(dev_priv, modifier))
1618 		return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
1619 	else if (DISPLAY_VER(dev_priv) >= 7)
1620 		return 256 * 1024;
1621 	else
1622 		return 128 * 1024;
1623 }
1624 
1625 static u32
1626 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
1627 {
1628 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
1629 	u32 tile_width;
1630 
1631 	if (is_surface_linear(fb, color_plane)) {
1632 		u32 max_stride = intel_plane_fb_max_stride(dev_priv,
1633 							   fb->format->format,
1634 							   fb->modifier);
1635 
1636 		/*
1637 		 * To make remapping with linear generally feasible
1638 		 * we need the stride to be page aligned.
1639 		 */
1640 		if (fb->pitches[color_plane] > max_stride &&
1641 		    !is_ccs_modifier(fb->modifier))
1642 			return intel_tile_size(dev_priv);
1643 		else
1644 			return 64;
1645 	}
1646 
1647 	tile_width = intel_tile_width_bytes(fb, color_plane);
1648 	if (is_ccs_modifier(fb->modifier)) {
1649 		/*
1650 		 * Display WA #0531: skl,bxt,kbl,glk
1651 		 *
1652 		 * Render decompression and plane width > 3840
1653 		 * combined with horizontal panning requires the
1654 		 * plane stride to be a multiple of 4. We'll just
1655 		 * require the entire fb to accommodate that to avoid
1656 		 * potential runtime errors at plane configuration time.
1657 		 */
1658 		if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) &&
1659 		    color_plane == 0 && fb->width > 3840)
1660 			tile_width *= 4;
1661 		/*
1662 		 * The main surface pitch must be padded to a multiple of four
1663 		 * tile widths.
1664 		 */
1665 		else if (DISPLAY_VER(dev_priv) >= 12)
1666 			tile_width *= 4;
1667 	}
1668 	return tile_width;
1669 }
1670 
1671 static struct i915_vma *
1672 initial_plane_vma(struct drm_i915_private *i915,
1673 		  struct intel_initial_plane_config *plane_config)
1674 {
1675 	struct drm_i915_gem_object *obj;
1676 	struct i915_vma *vma;
1677 	u32 base, size;
1678 
1679 	if (plane_config->size == 0)
1680 		return NULL;
1681 
1682 	base = round_down(plane_config->base,
1683 			  I915_GTT_MIN_ALIGNMENT);
1684 	size = round_up(plane_config->base + plane_config->size,
1685 			I915_GTT_MIN_ALIGNMENT);
1686 	size -= base;
1687 
1688 	/*
1689 	 * If the FB is too big, just don't use it since fbdev is not very
1690 	 * important and we should probably use that space with FBC or other
1691 	 * features.
1692 	 */
1693 	if (size * 2 > i915->stolen_usable_size)
1694 		return NULL;
1695 
1696 	obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
1697 	if (IS_ERR(obj))
1698 		return NULL;
1699 
1700 	/*
1701 	 * Mark it WT ahead of time to avoid changing the
1702 	 * cache_level during fbdev initialization. The
1703 	 * unbind there would get stuck waiting for rcu.
1704 	 */
1705 	i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
1706 					    I915_CACHE_WT : I915_CACHE_NONE);
1707 
1708 	switch (plane_config->tiling) {
1709 	case I915_TILING_NONE:
1710 		break;
1711 	case I915_TILING_X:
1712 	case I915_TILING_Y:
1713 		obj->tiling_and_stride =
1714 			plane_config->fb->base.pitches[0] |
1715 			plane_config->tiling;
1716 		break;
1717 	default:
1718 		MISSING_CASE(plane_config->tiling);
1719 		goto err_obj;
1720 	}
1721 
1722 	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1723 	if (IS_ERR(vma))
1724 		goto err_obj;
1725 
1726 	if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
1727 		goto err_obj;
1728 
1729 	if (i915_gem_object_is_tiled(obj) &&
1730 	    !i915_vma_is_map_and_fenceable(vma))
1731 		goto err_obj;
1732 
1733 	return vma;
1734 
1735 err_obj:
1736 	i915_gem_object_put(obj);
1737 	return NULL;
1738 }
1739 
1740 static bool
1741 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
1742 			      struct intel_initial_plane_config *plane_config)
1743 {
1744 	struct drm_device *dev = crtc->base.dev;
1745 	struct drm_i915_private *dev_priv = to_i915(dev);
1746 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
1747 	struct drm_framebuffer *fb = &plane_config->fb->base;
1748 	struct i915_vma *vma;
1749 
1750 	switch (fb->modifier) {
1751 	case DRM_FORMAT_MOD_LINEAR:
1752 	case I915_FORMAT_MOD_X_TILED:
1753 	case I915_FORMAT_MOD_Y_TILED:
1754 		break;
1755 	default:
1756 		drm_dbg(&dev_priv->drm,
1757 			"Unsupported modifier for initial FB: 0x%llx\n",
1758 			fb->modifier);
1759 		return false;
1760 	}
1761 
1762 	vma = initial_plane_vma(dev_priv, plane_config);
1763 	if (!vma)
1764 		return false;
1765 
1766 	mode_cmd.pixel_format = fb->format->format;
1767 	mode_cmd.width = fb->width;
1768 	mode_cmd.height = fb->height;
1769 	mode_cmd.pitches[0] = fb->pitches[0];
1770 	mode_cmd.modifier[0] = fb->modifier;
1771 	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
1772 
1773 	if (intel_framebuffer_init(to_intel_framebuffer(fb),
1774 				   vma->obj, &mode_cmd)) {
1775 		drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
1776 		goto err_vma;
1777 	}
1778 
1779 	plane_config->vma = vma;
1780 	return true;
1781 
1782 err_vma:
1783 	i915_vma_put(vma);
1784 	return false;
1785 }
1786 
1787 static void
1788 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
1789 			struct intel_plane_state *plane_state,
1790 			bool visible)
1791 {
1792 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1793 
1794 	plane_state->uapi.visible = visible;
1795 
1796 	if (visible)
1797 		crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
1798 	else
1799 		crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
1800 }
1801 
1802 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
1803 {
1804 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1805 	struct drm_plane *plane;
1806 
1807 	/*
1808 	 * Active_planes aliases if multiple "primary" or cursor planes
1809 	 * have been used on the same (or wrong) pipe. plane_mask uses
1810 	 * unique ids, hence we can use that to reconstruct active_planes.
1811 	 */
1812 	crtc_state->enabled_planes = 0;
1813 	crtc_state->active_planes = 0;
1814 
1815 	drm_for_each_plane_mask(plane, &dev_priv->drm,
1816 				crtc_state->uapi.plane_mask) {
1817 		crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
1818 		crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
1819 	}
1820 }
1821 
1822 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
1823 					 struct intel_plane *plane)
1824 {
1825 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1826 	struct intel_crtc_state *crtc_state =
1827 		to_intel_crtc_state(crtc->base.state);
1828 	struct intel_plane_state *plane_state =
1829 		to_intel_plane_state(plane->base.state);
1830 
1831 	drm_dbg_kms(&dev_priv->drm,
1832 		    "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
1833 		    plane->base.base.id, plane->base.name,
1834 		    crtc->base.base.id, crtc->base.name);
1835 
1836 	intel_set_plane_visible(crtc_state, plane_state, false);
1837 	fixup_plane_bitmasks(crtc_state);
1838 	crtc_state->data_rate[plane->id] = 0;
1839 	crtc_state->min_cdclk[plane->id] = 0;
1840 
1841 	if (plane->id == PLANE_PRIMARY)
1842 		hsw_disable_ips(crtc_state);
1843 
1844 	/*
1845 	 * Vblank time updates from the shadow to live plane control register
1846 	 * are blocked if the memory self-refresh mode is active at that
1847 	 * moment. So to make sure the plane gets truly disabled, disable
1848 	 * first the self-refresh mode. The self-refresh enable bit in turn
1849 	 * will be checked/applied by the HW only at the next frame start
1850 	 * event which is after the vblank start event, so we need to have a
1851 	 * wait-for-vblank between disabling the plane and the pipe.
1852 	 */
1853 	if (HAS_GMCH(dev_priv) &&
1854 	    intel_set_memory_cxsr(dev_priv, false))
1855 		intel_wait_for_vblank(dev_priv, crtc->pipe);
1856 
1857 	/*
1858 	 * Gen2 reports pipe underruns whenever all planes are disabled.
1859 	 * So disable underrun reporting before all the planes get disabled.
1860 	 */
1861 	if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
1862 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
1863 
1864 	intel_disable_plane(plane, crtc_state);
1865 	intel_wait_for_vblank(dev_priv, crtc->pipe);
1866 }
1867 
1868 static struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
1869 {
1870 	struct drm_i915_private *i915 = vm->i915;
1871 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
1872 	intel_wakeref_t wakeref;
1873 	struct i915_vma *vma;
1874 	void __iomem *iomem;
1875 
1876 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1877 	atomic_inc(&i915->gpu_error.pending_fb_pin);
1878 
1879 	vma = i915_gem_object_ggtt_pin(dpt->obj, NULL, 0, 4096,
1880 				       HAS_LMEM(i915) ? 0 : PIN_MAPPABLE);
1881 	if (IS_ERR(vma))
1882 		goto err;
1883 
1884 	iomem = i915_vma_pin_iomap(vma);
1885 	i915_vma_unpin(vma);
1886 	if (IS_ERR(iomem)) {
1887 		vma = iomem;
1888 		goto err;
1889 	}
1890 
1891 	dpt->vma = vma;
1892 	dpt->iomem = iomem;
1893 
1894 	i915_vma_get(vma);
1895 
1896 err:
1897 	atomic_dec(&i915->gpu_error.pending_fb_pin);
1898 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1899 
1900 	return vma;
1901 }
1902 
1903 static void intel_dpt_unpin(struct i915_address_space *vm)
1904 {
1905 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
1906 
1907 	i915_vma_unpin_iomap(dpt->vma);
1908 	i915_vma_put(dpt->vma);
1909 }
1910 
1911 static void
1912 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
1913 			     struct intel_initial_plane_config *plane_config)
1914 {
1915 	struct drm_device *dev = intel_crtc->base.dev;
1916 	struct drm_i915_private *dev_priv = to_i915(dev);
1917 	struct drm_crtc *c;
1918 	struct drm_plane *primary = intel_crtc->base.primary;
1919 	struct drm_plane_state *plane_state = primary->state;
1920 	struct intel_plane *intel_plane = to_intel_plane(primary);
1921 	struct intel_plane_state *intel_state =
1922 		to_intel_plane_state(plane_state);
1923 	struct intel_crtc_state *crtc_state =
1924 		to_intel_crtc_state(intel_crtc->base.state);
1925 	struct drm_framebuffer *fb;
1926 	struct i915_vma *vma;
1927 
1928 	/*
1929 	 * TODO:
1930 	 *   Disable planes if get_initial_plane_config() failed.
1931 	 *   Make sure things work if the surface base is not page aligned.
1932 	 */
1933 	if (!plane_config->fb)
1934 		return;
1935 
1936 	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
1937 		fb = &plane_config->fb->base;
1938 		vma = plane_config->vma;
1939 		goto valid_fb;
1940 	}
1941 
1942 	/*
1943 	 * Failed to alloc the obj, check to see if we should share
1944 	 * an fb with another CRTC instead
1945 	 */
1946 	for_each_crtc(dev, c) {
1947 		struct intel_plane_state *state;
1948 
1949 		if (c == &intel_crtc->base)
1950 			continue;
1951 
1952 		if (!to_intel_crtc_state(c->state)->uapi.active)
1953 			continue;
1954 
1955 		state = to_intel_plane_state(c->primary->state);
1956 		if (!state->ggtt_vma)
1957 			continue;
1958 
1959 		if (intel_plane_ggtt_offset(state) == plane_config->base) {
1960 			fb = state->hw.fb;
1961 			vma = state->ggtt_vma;
1962 			goto valid_fb;
1963 		}
1964 	}
1965 
1966 	/*
1967 	 * We've failed to reconstruct the BIOS FB.  Current display state
1968 	 * indicates that the primary plane is visible, but has a NULL FB,
1969 	 * which will lead to problems later if we don't fix it up.  The
1970 	 * simplest solution is to just disable the primary plane now and
1971 	 * pretend the BIOS never had it enabled.
1972 	 */
1973 	intel_plane_disable_noatomic(intel_crtc, intel_plane);
1974 	if (crtc_state->bigjoiner) {
1975 		struct intel_crtc *slave =
1976 			crtc_state->bigjoiner_linked_crtc;
1977 		intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
1978 	}
1979 
1980 	return;
1981 
1982 valid_fb:
1983 	plane_state->rotation = plane_config->rotation;
1984 	intel_fb_fill_view(to_intel_framebuffer(fb), plane_state->rotation,
1985 			   &intel_state->view);
1986 
1987 	__i915_vma_pin(vma);
1988 	intel_state->ggtt_vma = i915_vma_get(vma);
1989 	if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
1990 		if (vma->fence)
1991 			intel_state->flags |= PLANE_HAS_FENCE;
1992 
1993 	plane_state->src_x = 0;
1994 	plane_state->src_y = 0;
1995 	plane_state->src_w = fb->width << 16;
1996 	plane_state->src_h = fb->height << 16;
1997 
1998 	plane_state->crtc_x = 0;
1999 	plane_state->crtc_y = 0;
2000 	plane_state->crtc_w = fb->width;
2001 	plane_state->crtc_h = fb->height;
2002 
2003 	if (plane_config->tiling)
2004 		dev_priv->preserve_bios_swizzle = true;
2005 
2006 	plane_state->fb = fb;
2007 	drm_framebuffer_get(fb);
2008 
2009 	plane_state->crtc = &intel_crtc->base;
2010 	intel_plane_copy_uapi_to_hw_state(intel_state, intel_state,
2011 					  intel_crtc);
2012 
2013 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
2014 
2015 	atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2016 		  &to_intel_frontbuffer(fb)->bits);
2017 }
2018 
2019 unsigned int
2020 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
2021 {
2022 	int x = 0, y = 0;
2023 
2024 	intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
2025 					  plane_state->view.color_plane[0].offset, 0);
2026 
2027 	return y;
2028 }
2029 
2030 static int
2031 __intel_display_resume(struct drm_device *dev,
2032 		       struct drm_atomic_state *state,
2033 		       struct drm_modeset_acquire_ctx *ctx)
2034 {
2035 	struct drm_crtc_state *crtc_state;
2036 	struct drm_crtc *crtc;
2037 	int i, ret;
2038 
2039 	intel_modeset_setup_hw_state(dev, ctx);
2040 	intel_vga_redisable(to_i915(dev));
2041 
2042 	if (!state)
2043 		return 0;
2044 
2045 	/*
2046 	 * We've duplicated the state, pointers to the old state are invalid.
2047 	 *
2048 	 * Don't attempt to use the old state until we commit the duplicated state.
2049 	 */
2050 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2051 		/*
2052 		 * Force recalculation even if we restore
2053 		 * current state. With fast modeset this may not result
2054 		 * in a modeset when the state is compatible.
2055 		 */
2056 		crtc_state->mode_changed = true;
2057 	}
2058 
2059 	/* ignore any reset values/BIOS leftovers in the WM registers */
2060 	if (!HAS_GMCH(to_i915(dev)))
2061 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
2062 
2063 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
2064 
2065 	drm_WARN_ON(dev, ret == -EDEADLK);
2066 	return ret;
2067 }
2068 
2069 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
2070 {
2071 	return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
2072 		intel_has_gpu_reset(&dev_priv->gt));
2073 }
2074 
2075 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
2076 {
2077 	struct drm_device *dev = &dev_priv->drm;
2078 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
2079 	struct drm_atomic_state *state;
2080 	int ret;
2081 
2082 	if (!HAS_DISPLAY(dev_priv))
2083 		return;
2084 
2085 	/* reset doesn't touch the display */
2086 	if (!dev_priv->params.force_reset_modeset_test &&
2087 	    !gpu_reset_clobbers_display(dev_priv))
2088 		return;
2089 
2090 	/* We have a modeset vs reset deadlock, defensively unbreak it. */
2091 	set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
2092 	smp_mb__after_atomic();
2093 	wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
2094 
2095 	if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
2096 		drm_dbg_kms(&dev_priv->drm,
2097 			    "Modeset potentially stuck, unbreaking through wedging\n");
2098 		intel_gt_set_wedged(&dev_priv->gt);
2099 	}
2100 
2101 	/*
2102 	 * Need mode_config.mutex so that we don't
2103 	 * trample ongoing ->detect() and whatnot.
2104 	 */
2105 	mutex_lock(&dev->mode_config.mutex);
2106 	drm_modeset_acquire_init(ctx, 0);
2107 	while (1) {
2108 		ret = drm_modeset_lock_all_ctx(dev, ctx);
2109 		if (ret != -EDEADLK)
2110 			break;
2111 
2112 		drm_modeset_backoff(ctx);
2113 	}
2114 	/*
2115 	 * Disabling the crtcs gracefully seems nicer. Also the
2116 	 * g33 docs say we should at least disable all the planes.
2117 	 */
2118 	state = drm_atomic_helper_duplicate_state(dev, ctx);
2119 	if (IS_ERR(state)) {
2120 		ret = PTR_ERR(state);
2121 		drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
2122 			ret);
2123 		return;
2124 	}
2125 
2126 	ret = drm_atomic_helper_disable_all(dev, ctx);
2127 	if (ret) {
2128 		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2129 			ret);
2130 		drm_atomic_state_put(state);
2131 		return;
2132 	}
2133 
2134 	dev_priv->modeset_restore_state = state;
2135 	state->acquire_ctx = ctx;
2136 }
2137 
2138 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
2139 {
2140 	struct drm_device *dev = &dev_priv->drm;
2141 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
2142 	struct drm_atomic_state *state;
2143 	int ret;
2144 
2145 	if (!HAS_DISPLAY(dev_priv))
2146 		return;
2147 
2148 	/* reset doesn't touch the display */
2149 	if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
2150 		return;
2151 
2152 	state = fetch_and_zero(&dev_priv->modeset_restore_state);
2153 	if (!state)
2154 		goto unlock;
2155 
2156 	/* reset doesn't touch the display */
2157 	if (!gpu_reset_clobbers_display(dev_priv)) {
2158 		/* for testing only restore the display */
2159 		ret = __intel_display_resume(dev, state, ctx);
2160 		if (ret)
2161 			drm_err(&dev_priv->drm,
2162 				"Restoring old state failed with %i\n", ret);
2163 	} else {
2164 		/*
2165 		 * The display has been reset as well,
2166 		 * so need a full re-initialization.
2167 		 */
2168 		intel_pps_unlock_regs_wa(dev_priv);
2169 		intel_modeset_init_hw(dev_priv);
2170 		intel_init_clock_gating(dev_priv);
2171 		intel_hpd_init(dev_priv);
2172 
2173 		ret = __intel_display_resume(dev, state, ctx);
2174 		if (ret)
2175 			drm_err(&dev_priv->drm,
2176 				"Restoring old state failed with %i\n", ret);
2177 
2178 		intel_hpd_poll_disable(dev_priv);
2179 	}
2180 
2181 	drm_atomic_state_put(state);
2182 unlock:
2183 	drm_modeset_drop_locks(ctx);
2184 	drm_modeset_acquire_fini(ctx);
2185 	mutex_unlock(&dev->mode_config.mutex);
2186 
2187 	clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
2188 }
2189 
2190 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
2191 {
2192 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2193 	enum pipe pipe = crtc->pipe;
2194 	u32 tmp;
2195 
2196 	tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
2197 
2198 	/*
2199 	 * Display WA #1153: icl
2200 	 * enable hardware to bypass the alpha math
2201 	 * and rounding for per-pixel values 00 and 0xff
2202 	 */
2203 	tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
2204 	/*
2205 	 * Display WA # 1605353570: icl
2206 	 * Set the pixel rounding bit to 1 for allowing
2207 	 * passthrough of Frame buffer pixels unmodified
2208 	 * across pipe
2209 	 */
2210 	tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
2211 	intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
2212 }
2213 
2214 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
2215 {
2216 	struct drm_crtc *crtc;
2217 	bool cleanup_done;
2218 
2219 	drm_for_each_crtc(crtc, &dev_priv->drm) {
2220 		struct drm_crtc_commit *commit;
2221 		spin_lock(&crtc->commit_lock);
2222 		commit = list_first_entry_or_null(&crtc->commit_list,
2223 						  struct drm_crtc_commit, commit_entry);
2224 		cleanup_done = commit ?
2225 			try_wait_for_completion(&commit->cleanup_done) : true;
2226 		spin_unlock(&crtc->commit_lock);
2227 
2228 		if (cleanup_done)
2229 			continue;
2230 
2231 		drm_crtc_wait_one_vblank(crtc);
2232 
2233 		return true;
2234 	}
2235 
2236 	return false;
2237 }
2238 
2239 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
2240 {
2241 	u32 temp;
2242 
2243 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
2244 
2245 	mutex_lock(&dev_priv->sb_lock);
2246 
2247 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2248 	temp |= SBI_SSCCTL_DISABLE;
2249 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2250 
2251 	mutex_unlock(&dev_priv->sb_lock);
2252 }
2253 
2254 /* Program iCLKIP clock to the desired frequency */
2255 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
2256 {
2257 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2258 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2259 	int clock = crtc_state->hw.adjusted_mode.crtc_clock;
2260 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
2261 	u32 temp;
2262 
2263 	lpt_disable_iclkip(dev_priv);
2264 
2265 	/* The iCLK virtual clock root frequency is in MHz,
2266 	 * but the adjusted_mode->crtc_clock in in KHz. To get the
2267 	 * divisors, it is necessary to divide one by another, so we
2268 	 * convert the virtual clock precision to KHz here for higher
2269 	 * precision.
2270 	 */
2271 	for (auxdiv = 0; auxdiv < 2; auxdiv++) {
2272 		u32 iclk_virtual_root_freq = 172800 * 1000;
2273 		u32 iclk_pi_range = 64;
2274 		u32 desired_divisor;
2275 
2276 		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2277 						    clock << auxdiv);
2278 		divsel = (desired_divisor / iclk_pi_range) - 2;
2279 		phaseinc = desired_divisor % iclk_pi_range;
2280 
2281 		/*
2282 		 * Near 20MHz is a corner case which is
2283 		 * out of range for the 7-bit divisor
2284 		 */
2285 		if (divsel <= 0x7f)
2286 			break;
2287 	}
2288 
2289 	/* This should not happen with any sane values */
2290 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2291 		    ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
2292 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
2293 		    ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2294 
2295 	drm_dbg_kms(&dev_priv->drm,
2296 		    "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2297 		    clock, auxdiv, divsel, phasedir, phaseinc);
2298 
2299 	mutex_lock(&dev_priv->sb_lock);
2300 
2301 	/* Program SSCDIVINTPHASE6 */
2302 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2303 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2304 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2305 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2306 	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2307 	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2308 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2309 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
2310 
2311 	/* Program SSCAUXDIV */
2312 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2313 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2314 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2315 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
2316 
2317 	/* Enable modulator and associated divider */
2318 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2319 	temp &= ~SBI_SSCCTL_DISABLE;
2320 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2321 
2322 	mutex_unlock(&dev_priv->sb_lock);
2323 
2324 	/* Wait for initialization time */
2325 	udelay(24);
2326 
2327 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2328 }
2329 
2330 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
2331 {
2332 	u32 divsel, phaseinc, auxdiv;
2333 	u32 iclk_virtual_root_freq = 172800 * 1000;
2334 	u32 iclk_pi_range = 64;
2335 	u32 desired_divisor;
2336 	u32 temp;
2337 
2338 	if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
2339 		return 0;
2340 
2341 	mutex_lock(&dev_priv->sb_lock);
2342 
2343 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2344 	if (temp & SBI_SSCCTL_DISABLE) {
2345 		mutex_unlock(&dev_priv->sb_lock);
2346 		return 0;
2347 	}
2348 
2349 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2350 	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
2351 		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
2352 	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
2353 		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
2354 
2355 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2356 	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
2357 		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
2358 
2359 	mutex_unlock(&dev_priv->sb_lock);
2360 
2361 	desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
2362 
2363 	return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2364 				 desired_divisor << auxdiv);
2365 }
2366 
2367 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
2368 					   enum pipe pch_transcoder)
2369 {
2370 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2371 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2372 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2373 
2374 	intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
2375 		       intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
2376 	intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
2377 		       intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
2378 	intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
2379 		       intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
2380 
2381 	intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
2382 		       intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
2383 	intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
2384 		       intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
2385 	intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
2386 		       intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
2387 	intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
2388 		       intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
2389 }
2390 
2391 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
2392 {
2393 	u32 temp;
2394 
2395 	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
2396 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
2397 		return;
2398 
2399 	drm_WARN_ON(&dev_priv->drm,
2400 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
2401 		    FDI_RX_ENABLE);
2402 	drm_WARN_ON(&dev_priv->drm,
2403 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
2404 		    FDI_RX_ENABLE);
2405 
2406 	temp &= ~FDI_BC_BIFURCATION_SELECT;
2407 	if (enable)
2408 		temp |= FDI_BC_BIFURCATION_SELECT;
2409 
2410 	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
2411 		    enable ? "en" : "dis");
2412 	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
2413 	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
2414 }
2415 
2416 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
2417 {
2418 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2419 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2420 
2421 	switch (crtc->pipe) {
2422 	case PIPE_A:
2423 		break;
2424 	case PIPE_B:
2425 		if (crtc_state->fdi_lanes > 2)
2426 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
2427 		else
2428 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
2429 
2430 		break;
2431 	case PIPE_C:
2432 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
2433 
2434 		break;
2435 	default:
2436 		BUG();
2437 	}
2438 }
2439 
2440 /*
2441  * Finds the encoder associated with the given CRTC. This can only be
2442  * used when we know that the CRTC isn't feeding multiple encoders!
2443  */
2444 struct intel_encoder *
2445 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
2446 			   const struct intel_crtc_state *crtc_state)
2447 {
2448 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2449 	const struct drm_connector_state *connector_state;
2450 	const struct drm_connector *connector;
2451 	struct intel_encoder *encoder = NULL;
2452 	int num_encoders = 0;
2453 	int i;
2454 
2455 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
2456 		if (connector_state->crtc != &crtc->base)
2457 			continue;
2458 
2459 		encoder = to_intel_encoder(connector_state->best_encoder);
2460 		num_encoders++;
2461 	}
2462 
2463 	drm_WARN(encoder->base.dev, num_encoders != 1,
2464 		 "%d encoders for pipe %c\n",
2465 		 num_encoders, pipe_name(crtc->pipe));
2466 
2467 	return encoder;
2468 }
2469 
2470 /*
2471  * Enable PCH resources required for PCH ports:
2472  *   - PCH PLLs
2473  *   - FDI training & RX/TX
2474  *   - update transcoder timings
2475  *   - DP transcoding bits
2476  *   - transcoder
2477  */
2478 static void ilk_pch_enable(const struct intel_atomic_state *state,
2479 			   const struct intel_crtc_state *crtc_state)
2480 {
2481 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2482 	struct drm_device *dev = crtc->base.dev;
2483 	struct drm_i915_private *dev_priv = to_i915(dev);
2484 	enum pipe pipe = crtc->pipe;
2485 	u32 temp;
2486 
2487 	assert_pch_transcoder_disabled(dev_priv, pipe);
2488 
2489 	if (IS_IVYBRIDGE(dev_priv))
2490 		ivb_update_fdi_bc_bifurcation(crtc_state);
2491 
2492 	/* Write the TU size bits before fdi link training, so that error
2493 	 * detection works. */
2494 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
2495 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2496 
2497 	/* For PCH output, training FDI link */
2498 	dev_priv->display.fdi_link_train(crtc, crtc_state);
2499 
2500 	/* We need to program the right clock selection before writing the pixel
2501 	 * mutliplier into the DPLL. */
2502 	if (HAS_PCH_CPT(dev_priv)) {
2503 		u32 sel;
2504 
2505 		temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
2506 		temp |= TRANS_DPLL_ENABLE(pipe);
2507 		sel = TRANS_DPLLB_SEL(pipe);
2508 		if (crtc_state->shared_dpll ==
2509 		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
2510 			temp |= sel;
2511 		else
2512 			temp &= ~sel;
2513 		intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
2514 	}
2515 
2516 	/* XXX: pch pll's can be enabled any time before we enable the PCH
2517 	 * transcoder, and we actually should do this to not upset any PCH
2518 	 * transcoder that already use the clock when we share it.
2519 	 *
2520 	 * Note that enable_shared_dpll tries to do the right thing, but
2521 	 * get_shared_dpll unconditionally resets the pll - we need that to have
2522 	 * the right LVDS enable sequence. */
2523 	intel_enable_shared_dpll(crtc_state);
2524 
2525 	/* set transcoder timing, panel must allow it */
2526 	assert_panel_unlocked(dev_priv, pipe);
2527 	ilk_pch_transcoder_set_timings(crtc_state, pipe);
2528 
2529 	intel_fdi_normal_train(crtc);
2530 
2531 	/* For PCH DP, enable TRANS_DP_CTL */
2532 	if (HAS_PCH_CPT(dev_priv) &&
2533 	    intel_crtc_has_dp_encoder(crtc_state)) {
2534 		const struct drm_display_mode *adjusted_mode =
2535 			&crtc_state->hw.adjusted_mode;
2536 		u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
2537 		i915_reg_t reg = TRANS_DP_CTL(pipe);
2538 		enum port port;
2539 
2540 		temp = intel_de_read(dev_priv, reg);
2541 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
2542 			  TRANS_DP_SYNC_MASK |
2543 			  TRANS_DP_BPC_MASK);
2544 		temp |= TRANS_DP_OUTPUT_ENABLE;
2545 		temp |= bpc << 9; /* same format but at 11:9 */
2546 
2547 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2548 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2549 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2550 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2551 
2552 		port = intel_get_crtc_new_encoder(state, crtc_state)->port;
2553 		drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
2554 		temp |= TRANS_DP_PORT_SEL(port);
2555 
2556 		intel_de_write(dev_priv, reg, temp);
2557 	}
2558 
2559 	ilk_enable_pch_transcoder(crtc_state);
2560 }
2561 
2562 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
2563 {
2564 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2565 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2566 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2567 
2568 	assert_pch_transcoder_disabled(dev_priv, PIPE_A);
2569 
2570 	lpt_program_iclkip(crtc_state);
2571 
2572 	/* Set transcoder timing. */
2573 	ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
2574 
2575 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
2576 }
2577 
2578 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
2579 			       enum pipe pipe)
2580 {
2581 	i915_reg_t dslreg = PIPEDSL(pipe);
2582 	u32 temp;
2583 
2584 	temp = intel_de_read(dev_priv, dslreg);
2585 	udelay(500);
2586 	if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
2587 		if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
2588 			drm_err(&dev_priv->drm,
2589 				"mode set failed: pipe %c stuck\n",
2590 				pipe_name(pipe));
2591 	}
2592 }
2593 
2594 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
2595 {
2596 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2597 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2598 	const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
2599 	enum pipe pipe = crtc->pipe;
2600 	int width = drm_rect_width(dst);
2601 	int height = drm_rect_height(dst);
2602 	int x = dst->x1;
2603 	int y = dst->y1;
2604 
2605 	if (!crtc_state->pch_pfit.enabled)
2606 		return;
2607 
2608 	/* Force use of hard-coded filter coefficients
2609 	 * as some pre-programmed values are broken,
2610 	 * e.g. x201.
2611 	 */
2612 	if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
2613 		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2614 			       PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
2615 	else
2616 		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2617 			       PF_FILTER_MED_3x3);
2618 	intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
2619 	intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
2620 }
2621 
2622 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
2623 {
2624 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2625 	struct drm_device *dev = crtc->base.dev;
2626 	struct drm_i915_private *dev_priv = to_i915(dev);
2627 
2628 	if (!crtc_state->ips_enabled)
2629 		return;
2630 
2631 	/*
2632 	 * We can only enable IPS after we enable a plane and wait for a vblank
2633 	 * This function is called from post_plane_update, which is run after
2634 	 * a vblank wait.
2635 	 */
2636 	drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
2637 
2638 	if (IS_BROADWELL(dev_priv)) {
2639 		drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
2640 							 IPS_ENABLE | IPS_PCODE_CONTROL));
2641 		/* Quoting Art Runyan: "its not safe to expect any particular
2642 		 * value in IPS_CTL bit 31 after enabling IPS through the
2643 		 * mailbox." Moreover, the mailbox may return a bogus state,
2644 		 * so we need to just enable it and continue on.
2645 		 */
2646 	} else {
2647 		intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
2648 		/* The bit only becomes 1 in the next vblank, so this wait here
2649 		 * is essentially intel_wait_for_vblank. If we don't have this
2650 		 * and don't wait for vblanks until the end of crtc_enable, then
2651 		 * the HW state readout code will complain that the expected
2652 		 * IPS_CTL value is not the one we read. */
2653 		if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
2654 			drm_err(&dev_priv->drm,
2655 				"Timed out waiting for IPS enable\n");
2656 	}
2657 }
2658 
2659 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
2660 {
2661 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2662 	struct drm_device *dev = crtc->base.dev;
2663 	struct drm_i915_private *dev_priv = to_i915(dev);
2664 
2665 	if (!crtc_state->ips_enabled)
2666 		return;
2667 
2668 	if (IS_BROADWELL(dev_priv)) {
2669 		drm_WARN_ON(dev,
2670 			    sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
2671 		/*
2672 		 * Wait for PCODE to finish disabling IPS. The BSpec specified
2673 		 * 42ms timeout value leads to occasional timeouts so use 100ms
2674 		 * instead.
2675 		 */
2676 		if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
2677 			drm_err(&dev_priv->drm,
2678 				"Timed out waiting for IPS disable\n");
2679 	} else {
2680 		intel_de_write(dev_priv, IPS_CTL, 0);
2681 		intel_de_posting_read(dev_priv, IPS_CTL);
2682 	}
2683 
2684 	/* We need to wait for a vblank before we can disable the plane. */
2685 	intel_wait_for_vblank(dev_priv, crtc->pipe);
2686 }
2687 
2688 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
2689 {
2690 	if (intel_crtc->overlay)
2691 		(void) intel_overlay_switch_off(intel_crtc->overlay);
2692 
2693 	/* Let userspace switch the overlay on again. In most cases userspace
2694 	 * has to recompute where to put it anyway.
2695 	 */
2696 }
2697 
2698 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
2699 				       const struct intel_crtc_state *new_crtc_state)
2700 {
2701 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2702 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2703 
2704 	if (!old_crtc_state->ips_enabled)
2705 		return false;
2706 
2707 	if (intel_crtc_needs_modeset(new_crtc_state))
2708 		return true;
2709 
2710 	/*
2711 	 * Workaround : Do not read or write the pipe palette/gamma data while
2712 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2713 	 *
2714 	 * Disable IPS before we program the LUT.
2715 	 */
2716 	if (IS_HASWELL(dev_priv) &&
2717 	    (new_crtc_state->uapi.color_mgmt_changed ||
2718 	     new_crtc_state->update_pipe) &&
2719 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2720 		return true;
2721 
2722 	return !new_crtc_state->ips_enabled;
2723 }
2724 
2725 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
2726 				       const struct intel_crtc_state *new_crtc_state)
2727 {
2728 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2729 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2730 
2731 	if (!new_crtc_state->ips_enabled)
2732 		return false;
2733 
2734 	if (intel_crtc_needs_modeset(new_crtc_state))
2735 		return true;
2736 
2737 	/*
2738 	 * Workaround : Do not read or write the pipe palette/gamma data while
2739 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2740 	 *
2741 	 * Re-enable IPS after the LUT has been programmed.
2742 	 */
2743 	if (IS_HASWELL(dev_priv) &&
2744 	    (new_crtc_state->uapi.color_mgmt_changed ||
2745 	     new_crtc_state->update_pipe) &&
2746 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2747 		return true;
2748 
2749 	/*
2750 	 * We can't read out IPS on broadwell, assume the worst and
2751 	 * forcibly enable IPS on the first fastset.
2752 	 */
2753 	if (new_crtc_state->update_pipe && old_crtc_state->inherited)
2754 		return true;
2755 
2756 	return !old_crtc_state->ips_enabled;
2757 }
2758 
2759 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
2760 {
2761 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2762 
2763 	if (!crtc_state->nv12_planes)
2764 		return false;
2765 
2766 	/* WA Display #0827: Gen9:all */
2767 	if (DISPLAY_VER(dev_priv) == 9)
2768 		return true;
2769 
2770 	return false;
2771 }
2772 
2773 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
2774 {
2775 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2776 
2777 	/* Wa_2006604312:icl,ehl */
2778 	if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
2779 		return true;
2780 
2781 	return false;
2782 }
2783 
2784 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
2785 			    const struct intel_crtc_state *new_crtc_state)
2786 {
2787 	return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
2788 		new_crtc_state->active_planes;
2789 }
2790 
2791 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
2792 			     const struct intel_crtc_state *new_crtc_state)
2793 {
2794 	return old_crtc_state->active_planes &&
2795 		(!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
2796 }
2797 
2798 static void intel_post_plane_update(struct intel_atomic_state *state,
2799 				    struct intel_crtc *crtc)
2800 {
2801 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2802 	const struct intel_crtc_state *old_crtc_state =
2803 		intel_atomic_get_old_crtc_state(state, crtc);
2804 	const struct intel_crtc_state *new_crtc_state =
2805 		intel_atomic_get_new_crtc_state(state, crtc);
2806 	enum pipe pipe = crtc->pipe;
2807 
2808 	intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
2809 
2810 	if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
2811 		intel_update_watermarks(crtc);
2812 
2813 	if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
2814 		hsw_enable_ips(new_crtc_state);
2815 
2816 	intel_fbc_post_update(state, crtc);
2817 
2818 	if (needs_nv12_wa(old_crtc_state) &&
2819 	    !needs_nv12_wa(new_crtc_state))
2820 		skl_wa_827(dev_priv, pipe, false);
2821 
2822 	if (needs_scalerclk_wa(old_crtc_state) &&
2823 	    !needs_scalerclk_wa(new_crtc_state))
2824 		icl_wa_scalerclkgating(dev_priv, pipe, false);
2825 }
2826 
2827 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
2828 					struct intel_crtc *crtc)
2829 {
2830 	const struct intel_crtc_state *crtc_state =
2831 		intel_atomic_get_new_crtc_state(state, crtc);
2832 	u8 update_planes = crtc_state->update_planes;
2833 	const struct intel_plane_state *plane_state;
2834 	struct intel_plane *plane;
2835 	int i;
2836 
2837 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2838 		if (plane->enable_flip_done &&
2839 		    plane->pipe == crtc->pipe &&
2840 		    update_planes & BIT(plane->id))
2841 			plane->enable_flip_done(plane);
2842 	}
2843 }
2844 
2845 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
2846 					 struct intel_crtc *crtc)
2847 {
2848 	const struct intel_crtc_state *crtc_state =
2849 		intel_atomic_get_new_crtc_state(state, crtc);
2850 	u8 update_planes = crtc_state->update_planes;
2851 	const struct intel_plane_state *plane_state;
2852 	struct intel_plane *plane;
2853 	int i;
2854 
2855 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2856 		if (plane->disable_flip_done &&
2857 		    plane->pipe == crtc->pipe &&
2858 		    update_planes & BIT(plane->id))
2859 			plane->disable_flip_done(plane);
2860 	}
2861 }
2862 
2863 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
2864 					     struct intel_crtc *crtc)
2865 {
2866 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2867 	const struct intel_crtc_state *old_crtc_state =
2868 		intel_atomic_get_old_crtc_state(state, crtc);
2869 	const struct intel_crtc_state *new_crtc_state =
2870 		intel_atomic_get_new_crtc_state(state, crtc);
2871 	u8 update_planes = new_crtc_state->update_planes;
2872 	const struct intel_plane_state *old_plane_state;
2873 	struct intel_plane *plane;
2874 	bool need_vbl_wait = false;
2875 	int i;
2876 
2877 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
2878 		if (plane->need_async_flip_disable_wa &&
2879 		    plane->pipe == crtc->pipe &&
2880 		    update_planes & BIT(plane->id)) {
2881 			/*
2882 			 * Apart from the async flip bit we want to
2883 			 * preserve the old state for the plane.
2884 			 */
2885 			plane->async_flip(plane, old_crtc_state,
2886 					  old_plane_state, false);
2887 			need_vbl_wait = true;
2888 		}
2889 	}
2890 
2891 	if (need_vbl_wait)
2892 		intel_wait_for_vblank(i915, crtc->pipe);
2893 }
2894 
2895 static void intel_pre_plane_update(struct intel_atomic_state *state,
2896 				   struct intel_crtc *crtc)
2897 {
2898 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2899 	const struct intel_crtc_state *old_crtc_state =
2900 		intel_atomic_get_old_crtc_state(state, crtc);
2901 	const struct intel_crtc_state *new_crtc_state =
2902 		intel_atomic_get_new_crtc_state(state, crtc);
2903 	enum pipe pipe = crtc->pipe;
2904 
2905 	if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
2906 		hsw_disable_ips(old_crtc_state);
2907 
2908 	if (intel_fbc_pre_update(state, crtc))
2909 		intel_wait_for_vblank(dev_priv, pipe);
2910 
2911 	/* Display WA 827 */
2912 	if (!needs_nv12_wa(old_crtc_state) &&
2913 	    needs_nv12_wa(new_crtc_state))
2914 		skl_wa_827(dev_priv, pipe, true);
2915 
2916 	/* Wa_2006604312:icl,ehl */
2917 	if (!needs_scalerclk_wa(old_crtc_state) &&
2918 	    needs_scalerclk_wa(new_crtc_state))
2919 		icl_wa_scalerclkgating(dev_priv, pipe, true);
2920 
2921 	/*
2922 	 * Vblank time updates from the shadow to live plane control register
2923 	 * are blocked if the memory self-refresh mode is active at that
2924 	 * moment. So to make sure the plane gets truly disabled, disable
2925 	 * first the self-refresh mode. The self-refresh enable bit in turn
2926 	 * will be checked/applied by the HW only at the next frame start
2927 	 * event which is after the vblank start event, so we need to have a
2928 	 * wait-for-vblank between disabling the plane and the pipe.
2929 	 */
2930 	if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
2931 	    new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
2932 		intel_wait_for_vblank(dev_priv, pipe);
2933 
2934 	/*
2935 	 * IVB workaround: must disable low power watermarks for at least
2936 	 * one frame before enabling scaling.  LP watermarks can be re-enabled
2937 	 * when scaling is disabled.
2938 	 *
2939 	 * WaCxSRDisabledForSpriteScaling:ivb
2940 	 */
2941 	if (old_crtc_state->hw.active &&
2942 	    new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
2943 		intel_wait_for_vblank(dev_priv, pipe);
2944 
2945 	/*
2946 	 * If we're doing a modeset we don't need to do any
2947 	 * pre-vblank watermark programming here.
2948 	 */
2949 	if (!intel_crtc_needs_modeset(new_crtc_state)) {
2950 		/*
2951 		 * For platforms that support atomic watermarks, program the
2952 		 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
2953 		 * will be the intermediate values that are safe for both pre- and
2954 		 * post- vblank; when vblank happens, the 'active' values will be set
2955 		 * to the final 'target' values and we'll do this again to get the
2956 		 * optimal watermarks.  For gen9+ platforms, the values we program here
2957 		 * will be the final target values which will get automatically latched
2958 		 * at vblank time; no further programming will be necessary.
2959 		 *
2960 		 * If a platform hasn't been transitioned to atomic watermarks yet,
2961 		 * we'll continue to update watermarks the old way, if flags tell
2962 		 * us to.
2963 		 */
2964 		if (dev_priv->display.initial_watermarks)
2965 			dev_priv->display.initial_watermarks(state, crtc);
2966 		else if (new_crtc_state->update_wm_pre)
2967 			intel_update_watermarks(crtc);
2968 	}
2969 
2970 	/*
2971 	 * Gen2 reports pipe underruns whenever all planes are disabled.
2972 	 * So disable underrun reporting before all the planes get disabled.
2973 	 *
2974 	 * We do this after .initial_watermarks() so that we have a
2975 	 * chance of catching underruns with the intermediate watermarks
2976 	 * vs. the old plane configuration.
2977 	 */
2978 	if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
2979 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2980 
2981 	/*
2982 	 * WA for platforms where async address update enable bit
2983 	 * is double buffered and only latched at start of vblank.
2984 	 */
2985 	if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
2986 		intel_crtc_async_flip_disable_wa(state, crtc);
2987 }
2988 
2989 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
2990 				      struct intel_crtc *crtc)
2991 {
2992 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2993 	const struct intel_crtc_state *new_crtc_state =
2994 		intel_atomic_get_new_crtc_state(state, crtc);
2995 	unsigned int update_mask = new_crtc_state->update_planes;
2996 	const struct intel_plane_state *old_plane_state;
2997 	struct intel_plane *plane;
2998 	unsigned fb_bits = 0;
2999 	int i;
3000 
3001 	intel_crtc_dpms_overlay_disable(crtc);
3002 
3003 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
3004 		if (crtc->pipe != plane->pipe ||
3005 		    !(update_mask & BIT(plane->id)))
3006 			continue;
3007 
3008 		intel_disable_plane(plane, new_crtc_state);
3009 
3010 		if (old_plane_state->uapi.visible)
3011 			fb_bits |= plane->frontbuffer_bit;
3012 	}
3013 
3014 	intel_frontbuffer_flip(dev_priv, fb_bits);
3015 }
3016 
3017 /*
3018  * intel_connector_primary_encoder - get the primary encoder for a connector
3019  * @connector: connector for which to return the encoder
3020  *
3021  * Returns the primary encoder for a connector. There is a 1:1 mapping from
3022  * all connectors to their encoder, except for DP-MST connectors which have
3023  * both a virtual and a primary encoder. These DP-MST primary encoders can be
3024  * pointed to by as many DP-MST connectors as there are pipes.
3025  */
3026 static struct intel_encoder *
3027 intel_connector_primary_encoder(struct intel_connector *connector)
3028 {
3029 	struct intel_encoder *encoder;
3030 
3031 	if (connector->mst_port)
3032 		return &dp_to_dig_port(connector->mst_port)->base;
3033 
3034 	encoder = intel_attached_encoder(connector);
3035 	drm_WARN_ON(connector->base.dev, !encoder);
3036 
3037 	return encoder;
3038 }
3039 
3040 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
3041 {
3042 	struct drm_connector_state *new_conn_state;
3043 	struct drm_connector *connector;
3044 	int i;
3045 
3046 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
3047 					i) {
3048 		struct intel_connector *intel_connector;
3049 		struct intel_encoder *encoder;
3050 		struct intel_crtc *crtc;
3051 
3052 		if (!intel_connector_needs_modeset(state, connector))
3053 			continue;
3054 
3055 		intel_connector = to_intel_connector(connector);
3056 		encoder = intel_connector_primary_encoder(intel_connector);
3057 		if (!encoder->update_prepare)
3058 			continue;
3059 
3060 		crtc = new_conn_state->crtc ?
3061 			to_intel_crtc(new_conn_state->crtc) : NULL;
3062 		encoder->update_prepare(state, encoder, crtc);
3063 	}
3064 }
3065 
3066 static void intel_encoders_update_complete(struct intel_atomic_state *state)
3067 {
3068 	struct drm_connector_state *new_conn_state;
3069 	struct drm_connector *connector;
3070 	int i;
3071 
3072 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
3073 					i) {
3074 		struct intel_connector *intel_connector;
3075 		struct intel_encoder *encoder;
3076 		struct intel_crtc *crtc;
3077 
3078 		if (!intel_connector_needs_modeset(state, connector))
3079 			continue;
3080 
3081 		intel_connector = to_intel_connector(connector);
3082 		encoder = intel_connector_primary_encoder(intel_connector);
3083 		if (!encoder->update_complete)
3084 			continue;
3085 
3086 		crtc = new_conn_state->crtc ?
3087 			to_intel_crtc(new_conn_state->crtc) : NULL;
3088 		encoder->update_complete(state, encoder, crtc);
3089 	}
3090 }
3091 
3092 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
3093 					  struct intel_crtc *crtc)
3094 {
3095 	const struct intel_crtc_state *crtc_state =
3096 		intel_atomic_get_new_crtc_state(state, crtc);
3097 	const struct drm_connector_state *conn_state;
3098 	struct drm_connector *conn;
3099 	int i;
3100 
3101 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3102 		struct intel_encoder *encoder =
3103 			to_intel_encoder(conn_state->best_encoder);
3104 
3105 		if (conn_state->crtc != &crtc->base)
3106 			continue;
3107 
3108 		if (encoder->pre_pll_enable)
3109 			encoder->pre_pll_enable(state, encoder,
3110 						crtc_state, conn_state);
3111 	}
3112 }
3113 
3114 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
3115 				      struct intel_crtc *crtc)
3116 {
3117 	const struct intel_crtc_state *crtc_state =
3118 		intel_atomic_get_new_crtc_state(state, crtc);
3119 	const struct drm_connector_state *conn_state;
3120 	struct drm_connector *conn;
3121 	int i;
3122 
3123 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3124 		struct intel_encoder *encoder =
3125 			to_intel_encoder(conn_state->best_encoder);
3126 
3127 		if (conn_state->crtc != &crtc->base)
3128 			continue;
3129 
3130 		if (encoder->pre_enable)
3131 			encoder->pre_enable(state, encoder,
3132 					    crtc_state, conn_state);
3133 	}
3134 }
3135 
3136 static void intel_encoders_enable(struct intel_atomic_state *state,
3137 				  struct intel_crtc *crtc)
3138 {
3139 	const struct intel_crtc_state *crtc_state =
3140 		intel_atomic_get_new_crtc_state(state, crtc);
3141 	const struct drm_connector_state *conn_state;
3142 	struct drm_connector *conn;
3143 	int i;
3144 
3145 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3146 		struct intel_encoder *encoder =
3147 			to_intel_encoder(conn_state->best_encoder);
3148 
3149 		if (conn_state->crtc != &crtc->base)
3150 			continue;
3151 
3152 		if (encoder->enable)
3153 			encoder->enable(state, encoder,
3154 					crtc_state, conn_state);
3155 		intel_opregion_notify_encoder(encoder, true);
3156 	}
3157 }
3158 
3159 static void intel_encoders_disable(struct intel_atomic_state *state,
3160 				   struct intel_crtc *crtc)
3161 {
3162 	const struct intel_crtc_state *old_crtc_state =
3163 		intel_atomic_get_old_crtc_state(state, crtc);
3164 	const struct drm_connector_state *old_conn_state;
3165 	struct drm_connector *conn;
3166 	int i;
3167 
3168 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3169 		struct intel_encoder *encoder =
3170 			to_intel_encoder(old_conn_state->best_encoder);
3171 
3172 		if (old_conn_state->crtc != &crtc->base)
3173 			continue;
3174 
3175 		intel_opregion_notify_encoder(encoder, false);
3176 		if (encoder->disable)
3177 			encoder->disable(state, encoder,
3178 					 old_crtc_state, old_conn_state);
3179 	}
3180 }
3181 
3182 static void intel_encoders_post_disable(struct intel_atomic_state *state,
3183 					struct intel_crtc *crtc)
3184 {
3185 	const struct intel_crtc_state *old_crtc_state =
3186 		intel_atomic_get_old_crtc_state(state, crtc);
3187 	const struct drm_connector_state *old_conn_state;
3188 	struct drm_connector *conn;
3189 	int i;
3190 
3191 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3192 		struct intel_encoder *encoder =
3193 			to_intel_encoder(old_conn_state->best_encoder);
3194 
3195 		if (old_conn_state->crtc != &crtc->base)
3196 			continue;
3197 
3198 		if (encoder->post_disable)
3199 			encoder->post_disable(state, encoder,
3200 					      old_crtc_state, old_conn_state);
3201 	}
3202 }
3203 
3204 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
3205 					    struct intel_crtc *crtc)
3206 {
3207 	const struct intel_crtc_state *old_crtc_state =
3208 		intel_atomic_get_old_crtc_state(state, crtc);
3209 	const struct drm_connector_state *old_conn_state;
3210 	struct drm_connector *conn;
3211 	int i;
3212 
3213 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3214 		struct intel_encoder *encoder =
3215 			to_intel_encoder(old_conn_state->best_encoder);
3216 
3217 		if (old_conn_state->crtc != &crtc->base)
3218 			continue;
3219 
3220 		if (encoder->post_pll_disable)
3221 			encoder->post_pll_disable(state, encoder,
3222 						  old_crtc_state, old_conn_state);
3223 	}
3224 }
3225 
3226 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
3227 				       struct intel_crtc *crtc)
3228 {
3229 	const struct intel_crtc_state *crtc_state =
3230 		intel_atomic_get_new_crtc_state(state, crtc);
3231 	const struct drm_connector_state *conn_state;
3232 	struct drm_connector *conn;
3233 	int i;
3234 
3235 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3236 		struct intel_encoder *encoder =
3237 			to_intel_encoder(conn_state->best_encoder);
3238 
3239 		if (conn_state->crtc != &crtc->base)
3240 			continue;
3241 
3242 		if (encoder->update_pipe)
3243 			encoder->update_pipe(state, encoder,
3244 					     crtc_state, conn_state);
3245 	}
3246 }
3247 
3248 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
3249 {
3250 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3251 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3252 
3253 	plane->disable_plane(plane, crtc_state);
3254 }
3255 
3256 static void ilk_crtc_enable(struct intel_atomic_state *state,
3257 			    struct intel_crtc *crtc)
3258 {
3259 	const struct intel_crtc_state *new_crtc_state =
3260 		intel_atomic_get_new_crtc_state(state, crtc);
3261 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3262 	enum pipe pipe = crtc->pipe;
3263 
3264 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3265 		return;
3266 
3267 	/*
3268 	 * Sometimes spurious CPU pipe underruns happen during FDI
3269 	 * training, at least with VGA+HDMI cloning. Suppress them.
3270 	 *
3271 	 * On ILK we get an occasional spurious CPU pipe underruns
3272 	 * between eDP port A enable and vdd enable. Also PCH port
3273 	 * enable seems to result in the occasional CPU pipe underrun.
3274 	 *
3275 	 * Spurious PCH underruns also occur during PCH enabling.
3276 	 */
3277 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3278 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
3279 
3280 	if (new_crtc_state->has_pch_encoder)
3281 		intel_prepare_shared_dpll(new_crtc_state);
3282 
3283 	if (intel_crtc_has_dp_encoder(new_crtc_state))
3284 		intel_dp_set_m_n(new_crtc_state, M1_N1);
3285 
3286 	intel_set_transcoder_timings(new_crtc_state);
3287 	intel_set_pipe_src_size(new_crtc_state);
3288 
3289 	if (new_crtc_state->has_pch_encoder)
3290 		intel_cpu_transcoder_set_m_n(new_crtc_state,
3291 					     &new_crtc_state->fdi_m_n, NULL);
3292 
3293 	ilk_set_pipeconf(new_crtc_state);
3294 
3295 	crtc->active = true;
3296 
3297 	intel_encoders_pre_enable(state, crtc);
3298 
3299 	if (new_crtc_state->has_pch_encoder) {
3300 		/* Note: FDI PLL enabling _must_ be done before we enable the
3301 		 * cpu pipes, hence this is separate from all the other fdi/pch
3302 		 * enabling. */
3303 		ilk_fdi_pll_enable(new_crtc_state);
3304 	} else {
3305 		assert_fdi_tx_disabled(dev_priv, pipe);
3306 		assert_fdi_rx_disabled(dev_priv, pipe);
3307 	}
3308 
3309 	ilk_pfit_enable(new_crtc_state);
3310 
3311 	/*
3312 	 * On ILK+ LUT must be loaded before the pipe is running but with
3313 	 * clocks enabled
3314 	 */
3315 	intel_color_load_luts(new_crtc_state);
3316 	intel_color_commit(new_crtc_state);
3317 	/* update DSPCNTR to configure gamma for pipe bottom color */
3318 	intel_disable_primary_plane(new_crtc_state);
3319 
3320 	if (dev_priv->display.initial_watermarks)
3321 		dev_priv->display.initial_watermarks(state, crtc);
3322 	intel_enable_pipe(new_crtc_state);
3323 
3324 	if (new_crtc_state->has_pch_encoder)
3325 		ilk_pch_enable(state, new_crtc_state);
3326 
3327 	intel_crtc_vblank_on(new_crtc_state);
3328 
3329 	intel_encoders_enable(state, crtc);
3330 
3331 	if (HAS_PCH_CPT(dev_priv))
3332 		cpt_verify_modeset(dev_priv, pipe);
3333 
3334 	/*
3335 	 * Must wait for vblank to avoid spurious PCH FIFO underruns.
3336 	 * And a second vblank wait is needed at least on ILK with
3337 	 * some interlaced HDMI modes. Let's do the double wait always
3338 	 * in case there are more corner cases we don't know about.
3339 	 */
3340 	if (new_crtc_state->has_pch_encoder) {
3341 		intel_wait_for_vblank(dev_priv, pipe);
3342 		intel_wait_for_vblank(dev_priv, pipe);
3343 	}
3344 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3345 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3346 }
3347 
3348 /* IPS only exists on ULT machines and is tied to pipe A. */
3349 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3350 {
3351 	return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
3352 }
3353 
3354 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
3355 					    enum pipe pipe, bool apply)
3356 {
3357 	u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
3358 	u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
3359 
3360 	if (apply)
3361 		val |= mask;
3362 	else
3363 		val &= ~mask;
3364 
3365 	intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
3366 }
3367 
3368 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
3369 {
3370 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3371 	enum pipe pipe = crtc->pipe;
3372 	u32 val;
3373 
3374 	val = MBUS_DBOX_A_CREDIT(2);
3375 
3376 	if (DISPLAY_VER(dev_priv) >= 12) {
3377 		val |= MBUS_DBOX_BW_CREDIT(2);
3378 		val |= MBUS_DBOX_B_CREDIT(12);
3379 	} else {
3380 		val |= MBUS_DBOX_BW_CREDIT(1);
3381 		val |= MBUS_DBOX_B_CREDIT(8);
3382 	}
3383 
3384 	intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
3385 }
3386 
3387 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
3388 {
3389 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3390 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3391 
3392 	intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
3393 		       HSW_LINETIME(crtc_state->linetime) |
3394 		       HSW_IPS_LINETIME(crtc_state->ips_linetime));
3395 }
3396 
3397 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
3398 {
3399 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3400 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3401 	i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
3402 	u32 val;
3403 
3404 	val = intel_de_read(dev_priv, reg);
3405 	val &= ~HSW_FRAME_START_DELAY_MASK;
3406 	val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3407 	intel_de_write(dev_priv, reg, val);
3408 }
3409 
3410 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
3411 					 const struct intel_crtc_state *crtc_state)
3412 {
3413 	struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
3414 	struct drm_i915_private *dev_priv = to_i915(master->base.dev);
3415 	struct intel_crtc_state *master_crtc_state;
3416 	struct drm_connector_state *conn_state;
3417 	struct drm_connector *conn;
3418 	struct intel_encoder *encoder = NULL;
3419 	int i;
3420 
3421 	if (crtc_state->bigjoiner_slave)
3422 		master = crtc_state->bigjoiner_linked_crtc;
3423 
3424 	master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
3425 
3426 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3427 		if (conn_state->crtc != &master->base)
3428 			continue;
3429 
3430 		encoder = to_intel_encoder(conn_state->best_encoder);
3431 		break;
3432 	}
3433 
3434 	if (!crtc_state->bigjoiner_slave) {
3435 		/* need to enable VDSC, which we skipped in pre-enable */
3436 		intel_dsc_enable(encoder, crtc_state);
3437 	} else {
3438 		/*
3439 		 * Enable sequence steps 1-7 on bigjoiner master
3440 		 */
3441 		intel_encoders_pre_pll_enable(state, master);
3442 		intel_enable_shared_dpll(master_crtc_state);
3443 		intel_encoders_pre_enable(state, master);
3444 
3445 		/* and DSC on slave */
3446 		intel_dsc_enable(NULL, crtc_state);
3447 	}
3448 
3449 	if (DISPLAY_VER(dev_priv) >= 13)
3450 		intel_uncompressed_joiner_enable(crtc_state);
3451 }
3452 
3453 static void hsw_crtc_enable(struct intel_atomic_state *state,
3454 			    struct intel_crtc *crtc)
3455 {
3456 	const struct intel_crtc_state *new_crtc_state =
3457 		intel_atomic_get_new_crtc_state(state, crtc);
3458 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3459 	enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
3460 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3461 	bool psl_clkgate_wa;
3462 
3463 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3464 		return;
3465 
3466 	if (!new_crtc_state->bigjoiner) {
3467 		intel_encoders_pre_pll_enable(state, crtc);
3468 
3469 		if (new_crtc_state->shared_dpll)
3470 			intel_enable_shared_dpll(new_crtc_state);
3471 
3472 		intel_encoders_pre_enable(state, crtc);
3473 	} else {
3474 		icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
3475 	}
3476 
3477 	intel_set_pipe_src_size(new_crtc_state);
3478 	if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
3479 		bdw_set_pipemisc(new_crtc_state);
3480 
3481 	if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
3482 		intel_set_transcoder_timings(new_crtc_state);
3483 
3484 		if (cpu_transcoder != TRANSCODER_EDP)
3485 			intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
3486 				       new_crtc_state->pixel_multiplier - 1);
3487 
3488 		if (new_crtc_state->has_pch_encoder)
3489 			intel_cpu_transcoder_set_m_n(new_crtc_state,
3490 						     &new_crtc_state->fdi_m_n, NULL);
3491 
3492 		hsw_set_frame_start_delay(new_crtc_state);
3493 	}
3494 
3495 	if (!transcoder_is_dsi(cpu_transcoder))
3496 		hsw_set_pipeconf(new_crtc_state);
3497 
3498 	crtc->active = true;
3499 
3500 	/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
3501 	psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
3502 		new_crtc_state->pch_pfit.enabled;
3503 	if (psl_clkgate_wa)
3504 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
3505 
3506 	if (DISPLAY_VER(dev_priv) >= 9)
3507 		skl_pfit_enable(new_crtc_state);
3508 	else
3509 		ilk_pfit_enable(new_crtc_state);
3510 
3511 	/*
3512 	 * On ILK+ LUT must be loaded before the pipe is running but with
3513 	 * clocks enabled
3514 	 */
3515 	intel_color_load_luts(new_crtc_state);
3516 	intel_color_commit(new_crtc_state);
3517 	/* update DSPCNTR to configure gamma/csc for pipe bottom color */
3518 	if (DISPLAY_VER(dev_priv) < 9)
3519 		intel_disable_primary_plane(new_crtc_state);
3520 
3521 	hsw_set_linetime_wm(new_crtc_state);
3522 
3523 	if (DISPLAY_VER(dev_priv) >= 11)
3524 		icl_set_pipe_chicken(crtc);
3525 
3526 	if (dev_priv->display.initial_watermarks)
3527 		dev_priv->display.initial_watermarks(state, crtc);
3528 
3529 	if (DISPLAY_VER(dev_priv) >= 11)
3530 		icl_pipe_mbus_enable(crtc);
3531 
3532 	if (new_crtc_state->bigjoiner_slave)
3533 		intel_crtc_vblank_on(new_crtc_state);
3534 
3535 	intel_encoders_enable(state, crtc);
3536 
3537 	if (psl_clkgate_wa) {
3538 		intel_wait_for_vblank(dev_priv, pipe);
3539 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
3540 	}
3541 
3542 	/* If we change the relative order between pipe/planes enabling, we need
3543 	 * to change the workaround. */
3544 	hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
3545 	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
3546 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3547 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3548 	}
3549 }
3550 
3551 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3552 {
3553 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3554 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3555 	enum pipe pipe = crtc->pipe;
3556 
3557 	/* To avoid upsetting the power well on haswell only disable the pfit if
3558 	 * it's in use. The hw state code will make sure we get this right. */
3559 	if (!old_crtc_state->pch_pfit.enabled)
3560 		return;
3561 
3562 	intel_de_write(dev_priv, PF_CTL(pipe), 0);
3563 	intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
3564 	intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
3565 }
3566 
3567 static void ilk_crtc_disable(struct intel_atomic_state *state,
3568 			     struct intel_crtc *crtc)
3569 {
3570 	const struct intel_crtc_state *old_crtc_state =
3571 		intel_atomic_get_old_crtc_state(state, crtc);
3572 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3573 	enum pipe pipe = crtc->pipe;
3574 
3575 	/*
3576 	 * Sometimes spurious CPU pipe underruns happen when the
3577 	 * pipe is already disabled, but FDI RX/TX is still enabled.
3578 	 * Happens at least with VGA+HDMI cloning. Suppress them.
3579 	 */
3580 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3581 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
3582 
3583 	intel_encoders_disable(state, crtc);
3584 
3585 	intel_crtc_vblank_off(old_crtc_state);
3586 
3587 	intel_disable_pipe(old_crtc_state);
3588 
3589 	ilk_pfit_disable(old_crtc_state);
3590 
3591 	if (old_crtc_state->has_pch_encoder)
3592 		ilk_fdi_disable(crtc);
3593 
3594 	intel_encoders_post_disable(state, crtc);
3595 
3596 	if (old_crtc_state->has_pch_encoder) {
3597 		ilk_disable_pch_transcoder(dev_priv, pipe);
3598 
3599 		if (HAS_PCH_CPT(dev_priv)) {
3600 			i915_reg_t reg;
3601 			u32 temp;
3602 
3603 			/* disable TRANS_DP_CTL */
3604 			reg = TRANS_DP_CTL(pipe);
3605 			temp = intel_de_read(dev_priv, reg);
3606 			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
3607 				  TRANS_DP_PORT_SEL_MASK);
3608 			temp |= TRANS_DP_PORT_SEL_NONE;
3609 			intel_de_write(dev_priv, reg, temp);
3610 
3611 			/* disable DPLL_SEL */
3612 			temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
3613 			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
3614 			intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
3615 		}
3616 
3617 		ilk_fdi_pll_disable(crtc);
3618 	}
3619 
3620 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3621 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3622 }
3623 
3624 static void hsw_crtc_disable(struct intel_atomic_state *state,
3625 			     struct intel_crtc *crtc)
3626 {
3627 	/*
3628 	 * FIXME collapse everything to one hook.
3629 	 * Need care with mst->ddi interactions.
3630 	 */
3631 	intel_encoders_disable(state, crtc);
3632 	intel_encoders_post_disable(state, crtc);
3633 }
3634 
3635 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
3636 {
3637 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3638 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3639 
3640 	if (!crtc_state->gmch_pfit.control)
3641 		return;
3642 
3643 	/*
3644 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
3645 	 * according to register description and PRM.
3646 	 */
3647 	drm_WARN_ON(&dev_priv->drm,
3648 		    intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
3649 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
3650 
3651 	intel_de_write(dev_priv, PFIT_PGM_RATIOS,
3652 		       crtc_state->gmch_pfit.pgm_ratios);
3653 	intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
3654 
3655 	/* Border color in case we don't scale up to the full screen. Black by
3656 	 * default, change to something else for debugging. */
3657 	intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
3658 }
3659 
3660 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
3661 {
3662 	if (phy == PHY_NONE)
3663 		return false;
3664 	else if (IS_ALDERLAKE_S(dev_priv))
3665 		return phy <= PHY_E;
3666 	else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
3667 		return phy <= PHY_D;
3668 	else if (IS_JSL_EHL(dev_priv))
3669 		return phy <= PHY_C;
3670 	else if (DISPLAY_VER(dev_priv) >= 11)
3671 		return phy <= PHY_B;
3672 	else
3673 		return false;
3674 }
3675 
3676 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
3677 {
3678 	if (IS_TIGERLAKE(dev_priv))
3679 		return phy >= PHY_D && phy <= PHY_I;
3680 	else if (IS_ICELAKE(dev_priv))
3681 		return phy >= PHY_C && phy <= PHY_F;
3682 	else
3683 		return false;
3684 }
3685 
3686 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
3687 {
3688 	if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
3689 		return PHY_D + port - PORT_D_XELPD;
3690 	else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
3691 		return PHY_F + port - PORT_TC1;
3692 	else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
3693 		return PHY_B + port - PORT_TC1;
3694 	else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
3695 		return PHY_C + port - PORT_TC1;
3696 	else if (IS_JSL_EHL(i915) && port == PORT_D)
3697 		return PHY_A;
3698 
3699 	return PHY_A + port - PORT_A;
3700 }
3701 
3702 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
3703 {
3704 	if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
3705 		return TC_PORT_NONE;
3706 
3707 	if (DISPLAY_VER(dev_priv) >= 12)
3708 		return TC_PORT_1 + port - PORT_TC1;
3709 	else
3710 		return TC_PORT_1 + port - PORT_C;
3711 }
3712 
3713 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
3714 {
3715 	switch (port) {
3716 	case PORT_A:
3717 		return POWER_DOMAIN_PORT_DDI_A_LANES;
3718 	case PORT_B:
3719 		return POWER_DOMAIN_PORT_DDI_B_LANES;
3720 	case PORT_C:
3721 		return POWER_DOMAIN_PORT_DDI_C_LANES;
3722 	case PORT_D:
3723 		return POWER_DOMAIN_PORT_DDI_D_LANES;
3724 	case PORT_E:
3725 		return POWER_DOMAIN_PORT_DDI_E_LANES;
3726 	case PORT_F:
3727 		return POWER_DOMAIN_PORT_DDI_F_LANES;
3728 	case PORT_G:
3729 		return POWER_DOMAIN_PORT_DDI_G_LANES;
3730 	case PORT_H:
3731 		return POWER_DOMAIN_PORT_DDI_H_LANES;
3732 	case PORT_I:
3733 		return POWER_DOMAIN_PORT_DDI_I_LANES;
3734 	default:
3735 		MISSING_CASE(port);
3736 		return POWER_DOMAIN_PORT_OTHER;
3737 	}
3738 }
3739 
3740 enum intel_display_power_domain
3741 intel_aux_power_domain(struct intel_digital_port *dig_port)
3742 {
3743 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3744 	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
3745 
3746 	if (intel_phy_is_tc(dev_priv, phy) &&
3747 	    dig_port->tc_mode == TC_PORT_TBT_ALT) {
3748 		switch (dig_port->aux_ch) {
3749 		case AUX_CH_C:
3750 			return POWER_DOMAIN_AUX_C_TBT;
3751 		case AUX_CH_D:
3752 			return POWER_DOMAIN_AUX_D_TBT;
3753 		case AUX_CH_E:
3754 			return POWER_DOMAIN_AUX_E_TBT;
3755 		case AUX_CH_F:
3756 			return POWER_DOMAIN_AUX_F_TBT;
3757 		case AUX_CH_G:
3758 			return POWER_DOMAIN_AUX_G_TBT;
3759 		case AUX_CH_H:
3760 			return POWER_DOMAIN_AUX_H_TBT;
3761 		case AUX_CH_I:
3762 			return POWER_DOMAIN_AUX_I_TBT;
3763 		default:
3764 			MISSING_CASE(dig_port->aux_ch);
3765 			return POWER_DOMAIN_AUX_C_TBT;
3766 		}
3767 	}
3768 
3769 	return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
3770 }
3771 
3772 /*
3773  * Converts aux_ch to power_domain without caring about TBT ports for that use
3774  * intel_aux_power_domain()
3775  */
3776 enum intel_display_power_domain
3777 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
3778 {
3779 	switch (aux_ch) {
3780 	case AUX_CH_A:
3781 		return POWER_DOMAIN_AUX_A;
3782 	case AUX_CH_B:
3783 		return POWER_DOMAIN_AUX_B;
3784 	case AUX_CH_C:
3785 		return POWER_DOMAIN_AUX_C;
3786 	case AUX_CH_D:
3787 		return POWER_DOMAIN_AUX_D;
3788 	case AUX_CH_E:
3789 		return POWER_DOMAIN_AUX_E;
3790 	case AUX_CH_F:
3791 		return POWER_DOMAIN_AUX_F;
3792 	case AUX_CH_G:
3793 		return POWER_DOMAIN_AUX_G;
3794 	case AUX_CH_H:
3795 		return POWER_DOMAIN_AUX_H;
3796 	case AUX_CH_I:
3797 		return POWER_DOMAIN_AUX_I;
3798 	default:
3799 		MISSING_CASE(aux_ch);
3800 		return POWER_DOMAIN_AUX_A;
3801 	}
3802 }
3803 
3804 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3805 {
3806 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3807 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3808 	struct drm_encoder *encoder;
3809 	enum pipe pipe = crtc->pipe;
3810 	u64 mask;
3811 	enum transcoder transcoder = crtc_state->cpu_transcoder;
3812 
3813 	if (!crtc_state->hw.active)
3814 		return 0;
3815 
3816 	mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
3817 	mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
3818 	if (crtc_state->pch_pfit.enabled ||
3819 	    crtc_state->pch_pfit.force_thru)
3820 		mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
3821 
3822 	drm_for_each_encoder_mask(encoder, &dev_priv->drm,
3823 				  crtc_state->uapi.encoder_mask) {
3824 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3825 
3826 		mask |= BIT_ULL(intel_encoder->power_domain);
3827 	}
3828 
3829 	if (HAS_DDI(dev_priv) && crtc_state->has_audio)
3830 		mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
3831 
3832 	if (crtc_state->shared_dpll)
3833 		mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
3834 
3835 	if (crtc_state->dsc.compression_enable)
3836 		mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
3837 
3838 	return mask;
3839 }
3840 
3841 static u64
3842 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3843 {
3844 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3845 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3846 	enum intel_display_power_domain domain;
3847 	u64 domains, new_domains, old_domains;
3848 
3849 	domains = get_crtc_power_domains(crtc_state);
3850 
3851 	new_domains = domains & ~crtc->enabled_power_domains.mask;
3852 	old_domains = crtc->enabled_power_domains.mask & ~domains;
3853 
3854 	for_each_power_domain(domain, new_domains)
3855 		intel_display_power_get_in_set(dev_priv,
3856 					       &crtc->enabled_power_domains,
3857 					       domain);
3858 
3859 	return old_domains;
3860 }
3861 
3862 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
3863 					   u64 domains)
3864 {
3865 	intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
3866 					    &crtc->enabled_power_domains,
3867 					    domains);
3868 }
3869 
3870 static void valleyview_crtc_enable(struct intel_atomic_state *state,
3871 				   struct intel_crtc *crtc)
3872 {
3873 	const struct intel_crtc_state *new_crtc_state =
3874 		intel_atomic_get_new_crtc_state(state, crtc);
3875 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3876 	enum pipe pipe = crtc->pipe;
3877 
3878 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3879 		return;
3880 
3881 	if (intel_crtc_has_dp_encoder(new_crtc_state))
3882 		intel_dp_set_m_n(new_crtc_state, M1_N1);
3883 
3884 	intel_set_transcoder_timings(new_crtc_state);
3885 	intel_set_pipe_src_size(new_crtc_state);
3886 
3887 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
3888 		intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
3889 		intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
3890 	}
3891 
3892 	i9xx_set_pipeconf(new_crtc_state);
3893 
3894 	crtc->active = true;
3895 
3896 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3897 
3898 	intel_encoders_pre_pll_enable(state, crtc);
3899 
3900 	if (IS_CHERRYVIEW(dev_priv)) {
3901 		chv_prepare_pll(crtc, new_crtc_state);
3902 		chv_enable_pll(crtc, new_crtc_state);
3903 	} else {
3904 		vlv_prepare_pll(crtc, new_crtc_state);
3905 		vlv_enable_pll(crtc, new_crtc_state);
3906 	}
3907 
3908 	intel_encoders_pre_enable(state, crtc);
3909 
3910 	i9xx_pfit_enable(new_crtc_state);
3911 
3912 	intel_color_load_luts(new_crtc_state);
3913 	intel_color_commit(new_crtc_state);
3914 	/* update DSPCNTR to configure gamma for pipe bottom color */
3915 	intel_disable_primary_plane(new_crtc_state);
3916 
3917 	dev_priv->display.initial_watermarks(state, crtc);
3918 	intel_enable_pipe(new_crtc_state);
3919 
3920 	intel_crtc_vblank_on(new_crtc_state);
3921 
3922 	intel_encoders_enable(state, crtc);
3923 }
3924 
3925 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
3926 {
3927 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3928 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3929 
3930 	intel_de_write(dev_priv, FP0(crtc->pipe),
3931 		       crtc_state->dpll_hw_state.fp0);
3932 	intel_de_write(dev_priv, FP1(crtc->pipe),
3933 		       crtc_state->dpll_hw_state.fp1);
3934 }
3935 
3936 static void i9xx_crtc_enable(struct intel_atomic_state *state,
3937 			     struct intel_crtc *crtc)
3938 {
3939 	const struct intel_crtc_state *new_crtc_state =
3940 		intel_atomic_get_new_crtc_state(state, crtc);
3941 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3942 	enum pipe pipe = crtc->pipe;
3943 
3944 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3945 		return;
3946 
3947 	i9xx_set_pll_dividers(new_crtc_state);
3948 
3949 	if (intel_crtc_has_dp_encoder(new_crtc_state))
3950 		intel_dp_set_m_n(new_crtc_state, M1_N1);
3951 
3952 	intel_set_transcoder_timings(new_crtc_state);
3953 	intel_set_pipe_src_size(new_crtc_state);
3954 
3955 	i9xx_set_pipeconf(new_crtc_state);
3956 
3957 	crtc->active = true;
3958 
3959 	if (DISPLAY_VER(dev_priv) != 2)
3960 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3961 
3962 	intel_encoders_pre_enable(state, crtc);
3963 
3964 	i9xx_enable_pll(crtc, new_crtc_state);
3965 
3966 	i9xx_pfit_enable(new_crtc_state);
3967 
3968 	intel_color_load_luts(new_crtc_state);
3969 	intel_color_commit(new_crtc_state);
3970 	/* update DSPCNTR to configure gamma for pipe bottom color */
3971 	intel_disable_primary_plane(new_crtc_state);
3972 
3973 	if (dev_priv->display.initial_watermarks)
3974 		dev_priv->display.initial_watermarks(state, crtc);
3975 	else
3976 		intel_update_watermarks(crtc);
3977 	intel_enable_pipe(new_crtc_state);
3978 
3979 	intel_crtc_vblank_on(new_crtc_state);
3980 
3981 	intel_encoders_enable(state, crtc);
3982 
3983 	/* prevents spurious underruns */
3984 	if (DISPLAY_VER(dev_priv) == 2)
3985 		intel_wait_for_vblank(dev_priv, pipe);
3986 }
3987 
3988 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3989 {
3990 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3991 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3992 
3993 	if (!old_crtc_state->gmch_pfit.control)
3994 		return;
3995 
3996 	assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
3997 
3998 	drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
3999 		    intel_de_read(dev_priv, PFIT_CONTROL));
4000 	intel_de_write(dev_priv, PFIT_CONTROL, 0);
4001 }
4002 
4003 static void i9xx_crtc_disable(struct intel_atomic_state *state,
4004 			      struct intel_crtc *crtc)
4005 {
4006 	struct intel_crtc_state *old_crtc_state =
4007 		intel_atomic_get_old_crtc_state(state, crtc);
4008 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4009 	enum pipe pipe = crtc->pipe;
4010 
4011 	/*
4012 	 * On gen2 planes are double buffered but the pipe isn't, so we must
4013 	 * wait for planes to fully turn off before disabling the pipe.
4014 	 */
4015 	if (DISPLAY_VER(dev_priv) == 2)
4016 		intel_wait_for_vblank(dev_priv, pipe);
4017 
4018 	intel_encoders_disable(state, crtc);
4019 
4020 	intel_crtc_vblank_off(old_crtc_state);
4021 
4022 	intel_disable_pipe(old_crtc_state);
4023 
4024 	i9xx_pfit_disable(old_crtc_state);
4025 
4026 	intel_encoders_post_disable(state, crtc);
4027 
4028 	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
4029 		if (IS_CHERRYVIEW(dev_priv))
4030 			chv_disable_pll(dev_priv, pipe);
4031 		else if (IS_VALLEYVIEW(dev_priv))
4032 			vlv_disable_pll(dev_priv, pipe);
4033 		else
4034 			i9xx_disable_pll(old_crtc_state);
4035 	}
4036 
4037 	intel_encoders_post_pll_disable(state, crtc);
4038 
4039 	if (DISPLAY_VER(dev_priv) != 2)
4040 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4041 
4042 	if (!dev_priv->display.initial_watermarks)
4043 		intel_update_watermarks(crtc);
4044 
4045 	/* clock the pipe down to 640x480@60 to potentially save power */
4046 	if (IS_I830(dev_priv))
4047 		i830_enable_pipe(dev_priv, pipe);
4048 }
4049 
4050 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
4051 					struct drm_modeset_acquire_ctx *ctx)
4052 {
4053 	struct intel_encoder *encoder;
4054 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4055 	struct intel_bw_state *bw_state =
4056 		to_intel_bw_state(dev_priv->bw_obj.state);
4057 	struct intel_cdclk_state *cdclk_state =
4058 		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
4059 	struct intel_dbuf_state *dbuf_state =
4060 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
4061 	struct intel_crtc_state *crtc_state =
4062 		to_intel_crtc_state(crtc->base.state);
4063 	struct intel_plane *plane;
4064 	struct drm_atomic_state *state;
4065 	struct intel_crtc_state *temp_crtc_state;
4066 	enum pipe pipe = crtc->pipe;
4067 	int ret;
4068 
4069 	if (!crtc_state->hw.active)
4070 		return;
4071 
4072 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
4073 		const struct intel_plane_state *plane_state =
4074 			to_intel_plane_state(plane->base.state);
4075 
4076 		if (plane_state->uapi.visible)
4077 			intel_plane_disable_noatomic(crtc, plane);
4078 	}
4079 
4080 	state = drm_atomic_state_alloc(&dev_priv->drm);
4081 	if (!state) {
4082 		drm_dbg_kms(&dev_priv->drm,
4083 			    "failed to disable [CRTC:%d:%s], out of memory",
4084 			    crtc->base.base.id, crtc->base.name);
4085 		return;
4086 	}
4087 
4088 	state->acquire_ctx = ctx;
4089 
4090 	/* Everything's already locked, -EDEADLK can't happen. */
4091 	temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
4092 	ret = drm_atomic_add_affected_connectors(state, &crtc->base);
4093 
4094 	drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
4095 
4096 	dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
4097 
4098 	drm_atomic_state_put(state);
4099 
4100 	drm_dbg_kms(&dev_priv->drm,
4101 		    "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
4102 		    crtc->base.base.id, crtc->base.name);
4103 
4104 	crtc->active = false;
4105 	crtc->base.enabled = false;
4106 
4107 	drm_WARN_ON(&dev_priv->drm,
4108 		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
4109 	crtc_state->uapi.active = false;
4110 	crtc_state->uapi.connector_mask = 0;
4111 	crtc_state->uapi.encoder_mask = 0;
4112 	intel_crtc_free_hw_state(crtc_state);
4113 	memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
4114 
4115 	for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
4116 		encoder->base.crtc = NULL;
4117 
4118 	intel_fbc_disable(crtc);
4119 	intel_update_watermarks(crtc);
4120 	intel_disable_shared_dpll(crtc_state);
4121 
4122 	intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
4123 
4124 	dev_priv->active_pipes &= ~BIT(pipe);
4125 	cdclk_state->min_cdclk[pipe] = 0;
4126 	cdclk_state->min_voltage_level[pipe] = 0;
4127 	cdclk_state->active_pipes &= ~BIT(pipe);
4128 
4129 	dbuf_state->active_pipes &= ~BIT(pipe);
4130 
4131 	bw_state->data_rate[pipe] = 0;
4132 	bw_state->num_active_planes[pipe] = 0;
4133 }
4134 
4135 /*
4136  * turn all crtc's off, but do not adjust state
4137  * This has to be paired with a call to intel_modeset_setup_hw_state.
4138  */
4139 int intel_display_suspend(struct drm_device *dev)
4140 {
4141 	struct drm_i915_private *dev_priv = to_i915(dev);
4142 	struct drm_atomic_state *state;
4143 	int ret;
4144 
4145 	if (!HAS_DISPLAY(dev_priv))
4146 		return 0;
4147 
4148 	state = drm_atomic_helper_suspend(dev);
4149 	ret = PTR_ERR_OR_ZERO(state);
4150 	if (ret)
4151 		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
4152 			ret);
4153 	else
4154 		dev_priv->modeset_restore_state = state;
4155 	return ret;
4156 }
4157 
4158 void intel_encoder_destroy(struct drm_encoder *encoder)
4159 {
4160 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4161 
4162 	drm_encoder_cleanup(encoder);
4163 	kfree(intel_encoder);
4164 }
4165 
4166 /* Cross check the actual hw state with our own modeset state tracking (and it's
4167  * internal consistency). */
4168 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
4169 					 struct drm_connector_state *conn_state)
4170 {
4171 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
4172 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
4173 
4174 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
4175 		    connector->base.base.id, connector->base.name);
4176 
4177 	if (connector->get_hw_state(connector)) {
4178 		struct intel_encoder *encoder = intel_attached_encoder(connector);
4179 
4180 		I915_STATE_WARN(!crtc_state,
4181 			 "connector enabled without attached crtc\n");
4182 
4183 		if (!crtc_state)
4184 			return;
4185 
4186 		I915_STATE_WARN(!crtc_state->hw.active,
4187 				"connector is active, but attached crtc isn't\n");
4188 
4189 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
4190 			return;
4191 
4192 		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
4193 			"atomic encoder doesn't match attached encoder\n");
4194 
4195 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
4196 			"attached encoder crtc differs from connector crtc\n");
4197 	} else {
4198 		I915_STATE_WARN(crtc_state && crtc_state->hw.active,
4199 				"attached crtc is active, but connector isn't\n");
4200 		I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
4201 			"best encoder set without crtc!\n");
4202 	}
4203 }
4204 
4205 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
4206 {
4207 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4208 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4209 
4210 	/* IPS only exists on ULT machines and is tied to pipe A. */
4211 	if (!hsw_crtc_supports_ips(crtc))
4212 		return false;
4213 
4214 	if (!dev_priv->params.enable_ips)
4215 		return false;
4216 
4217 	if (crtc_state->pipe_bpp > 24)
4218 		return false;
4219 
4220 	/*
4221 	 * We compare against max which means we must take
4222 	 * the increased cdclk requirement into account when
4223 	 * calculating the new cdclk.
4224 	 *
4225 	 * Should measure whether using a lower cdclk w/o IPS
4226 	 */
4227 	if (IS_BROADWELL(dev_priv) &&
4228 	    crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
4229 		return false;
4230 
4231 	return true;
4232 }
4233 
4234 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
4235 {
4236 	struct drm_i915_private *dev_priv =
4237 		to_i915(crtc_state->uapi.crtc->dev);
4238 	struct intel_atomic_state *state =
4239 		to_intel_atomic_state(crtc_state->uapi.state);
4240 
4241 	crtc_state->ips_enabled = false;
4242 
4243 	if (!hsw_crtc_state_ips_capable(crtc_state))
4244 		return 0;
4245 
4246 	/*
4247 	 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
4248 	 * enabled and disabled dynamically based on package C states,
4249 	 * user space can't make reliable use of the CRCs, so let's just
4250 	 * completely disable it.
4251 	 */
4252 	if (crtc_state->crc_enabled)
4253 		return 0;
4254 
4255 	/* IPS should be fine as long as at least one plane is enabled. */
4256 	if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
4257 		return 0;
4258 
4259 	if (IS_BROADWELL(dev_priv)) {
4260 		const struct intel_cdclk_state *cdclk_state;
4261 
4262 		cdclk_state = intel_atomic_get_cdclk_state(state);
4263 		if (IS_ERR(cdclk_state))
4264 			return PTR_ERR(cdclk_state);
4265 
4266 		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
4267 		if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
4268 			return 0;
4269 	}
4270 
4271 	crtc_state->ips_enabled = true;
4272 
4273 	return 0;
4274 }
4275 
4276 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
4277 {
4278 	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4279 
4280 	/* GDG double wide on either pipe, otherwise pipe A only */
4281 	return DISPLAY_VER(dev_priv) < 4 &&
4282 		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
4283 }
4284 
4285 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
4286 {
4287 	u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
4288 	struct drm_rect src;
4289 
4290 	/*
4291 	 * We only use IF-ID interlacing. If we ever use
4292 	 * PF-ID we'll need to adjust the pixel_rate here.
4293 	 */
4294 
4295 	if (!crtc_state->pch_pfit.enabled)
4296 		return pixel_rate;
4297 
4298 	drm_rect_init(&src, 0, 0,
4299 		      crtc_state->pipe_src_w << 16,
4300 		      crtc_state->pipe_src_h << 16);
4301 
4302 	return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
4303 				   pixel_rate);
4304 }
4305 
4306 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
4307 					 const struct drm_display_mode *timings)
4308 {
4309 	mode->hdisplay = timings->crtc_hdisplay;
4310 	mode->htotal = timings->crtc_htotal;
4311 	mode->hsync_start = timings->crtc_hsync_start;
4312 	mode->hsync_end = timings->crtc_hsync_end;
4313 
4314 	mode->vdisplay = timings->crtc_vdisplay;
4315 	mode->vtotal = timings->crtc_vtotal;
4316 	mode->vsync_start = timings->crtc_vsync_start;
4317 	mode->vsync_end = timings->crtc_vsync_end;
4318 
4319 	mode->flags = timings->flags;
4320 	mode->type = DRM_MODE_TYPE_DRIVER;
4321 
4322 	mode->clock = timings->crtc_clock;
4323 
4324 	drm_mode_set_name(mode);
4325 }
4326 
4327 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
4328 {
4329 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4330 
4331 	if (HAS_GMCH(dev_priv))
4332 		/* FIXME calculate proper pipe pixel rate for GMCH pfit */
4333 		crtc_state->pixel_rate =
4334 			crtc_state->hw.pipe_mode.crtc_clock;
4335 	else
4336 		crtc_state->pixel_rate =
4337 			ilk_pipe_pixel_rate(crtc_state);
4338 }
4339 
4340 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
4341 {
4342 	struct drm_display_mode *mode = &crtc_state->hw.mode;
4343 	struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
4344 	struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4345 
4346 	drm_mode_copy(pipe_mode, adjusted_mode);
4347 
4348 	if (crtc_state->bigjoiner) {
4349 		/*
4350 		 * transcoder is programmed to the full mode,
4351 		 * but pipe timings are half of the transcoder mode
4352 		 */
4353 		pipe_mode->crtc_hdisplay /= 2;
4354 		pipe_mode->crtc_hblank_start /= 2;
4355 		pipe_mode->crtc_hblank_end /= 2;
4356 		pipe_mode->crtc_hsync_start /= 2;
4357 		pipe_mode->crtc_hsync_end /= 2;
4358 		pipe_mode->crtc_htotal /= 2;
4359 		pipe_mode->crtc_clock /= 2;
4360 	}
4361 
4362 	if (crtc_state->splitter.enable) {
4363 		int n = crtc_state->splitter.link_count;
4364 		int overlap = crtc_state->splitter.pixel_overlap;
4365 
4366 		/*
4367 		 * eDP MSO uses segment timings from EDID for transcoder
4368 		 * timings, but full mode for everything else.
4369 		 *
4370 		 * h_full = (h_segment - pixel_overlap) * link_count
4371 		 */
4372 		pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4373 		pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4374 		pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4375 		pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4376 		pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4377 		pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4378 		pipe_mode->crtc_clock *= n;
4379 
4380 		intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4381 		intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
4382 	} else {
4383 		intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4384 		intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
4385 	}
4386 
4387 	intel_crtc_compute_pixel_rate(crtc_state);
4388 
4389 	drm_mode_copy(mode, adjusted_mode);
4390 	mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
4391 	mode->vdisplay = crtc_state->pipe_src_h;
4392 }
4393 
4394 static void intel_encoder_get_config(struct intel_encoder *encoder,
4395 				     struct intel_crtc_state *crtc_state)
4396 {
4397 	encoder->get_config(encoder, crtc_state);
4398 
4399 	intel_crtc_readout_derived_state(crtc_state);
4400 }
4401 
4402 static int intel_crtc_compute_config(struct intel_crtc *crtc,
4403 				     struct intel_crtc_state *pipe_config)
4404 {
4405 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4406 	struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
4407 	int clock_limit = dev_priv->max_dotclk_freq;
4408 
4409 	drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
4410 
4411 	/* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
4412 	if (pipe_config->bigjoiner) {
4413 		pipe_mode->crtc_clock /= 2;
4414 		pipe_mode->crtc_hdisplay /= 2;
4415 		pipe_mode->crtc_hblank_start /= 2;
4416 		pipe_mode->crtc_hblank_end /= 2;
4417 		pipe_mode->crtc_hsync_start /= 2;
4418 		pipe_mode->crtc_hsync_end /= 2;
4419 		pipe_mode->crtc_htotal /= 2;
4420 		pipe_config->pipe_src_w /= 2;
4421 	}
4422 
4423 	if (pipe_config->splitter.enable) {
4424 		int n = pipe_config->splitter.link_count;
4425 		int overlap = pipe_config->splitter.pixel_overlap;
4426 
4427 		pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4428 		pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4429 		pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4430 		pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4431 		pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4432 		pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4433 		pipe_mode->crtc_clock *= n;
4434 	}
4435 
4436 	intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4437 
4438 	if (DISPLAY_VER(dev_priv) < 4) {
4439 		clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
4440 
4441 		/*
4442 		 * Enable double wide mode when the dot clock
4443 		 * is > 90% of the (display) core speed.
4444 		 */
4445 		if (intel_crtc_supports_double_wide(crtc) &&
4446 		    pipe_mode->crtc_clock > clock_limit) {
4447 			clock_limit = dev_priv->max_dotclk_freq;
4448 			pipe_config->double_wide = true;
4449 		}
4450 	}
4451 
4452 	if (pipe_mode->crtc_clock > clock_limit) {
4453 		drm_dbg_kms(&dev_priv->drm,
4454 			    "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
4455 			    pipe_mode->crtc_clock, clock_limit,
4456 			    yesno(pipe_config->double_wide));
4457 		return -EINVAL;
4458 	}
4459 
4460 	/*
4461 	 * Pipe horizontal size must be even in:
4462 	 * - DVO ganged mode
4463 	 * - LVDS dual channel mode
4464 	 * - Double wide pipe
4465 	 */
4466 	if (pipe_config->pipe_src_w & 1) {
4467 		if (pipe_config->double_wide) {
4468 			drm_dbg_kms(&dev_priv->drm,
4469 				    "Odd pipe source width not supported with double wide pipe\n");
4470 			return -EINVAL;
4471 		}
4472 
4473 		if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
4474 		    intel_is_dual_link_lvds(dev_priv)) {
4475 			drm_dbg_kms(&dev_priv->drm,
4476 				    "Odd pipe source width not supported with dual link LVDS\n");
4477 			return -EINVAL;
4478 		}
4479 	}
4480 
4481 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
4482 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4483 	 */
4484 	if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
4485 	    pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
4486 		return -EINVAL;
4487 
4488 	intel_crtc_compute_pixel_rate(pipe_config);
4489 
4490 	if (pipe_config->has_pch_encoder)
4491 		return ilk_fdi_compute_config(crtc, pipe_config);
4492 
4493 	return 0;
4494 }
4495 
4496 static void
4497 intel_reduce_m_n_ratio(u32 *num, u32 *den)
4498 {
4499 	while (*num > DATA_LINK_M_N_MASK ||
4500 	       *den > DATA_LINK_M_N_MASK) {
4501 		*num >>= 1;
4502 		*den >>= 1;
4503 	}
4504 }
4505 
4506 static void compute_m_n(unsigned int m, unsigned int n,
4507 			u32 *ret_m, u32 *ret_n,
4508 			bool constant_n)
4509 {
4510 	/*
4511 	 * Several DP dongles in particular seem to be fussy about
4512 	 * too large link M/N values. Give N value as 0x8000 that
4513 	 * should be acceptable by specific devices. 0x8000 is the
4514 	 * specified fixed N value for asynchronous clock mode,
4515 	 * which the devices expect also in synchronous clock mode.
4516 	 */
4517 	if (constant_n)
4518 		*ret_n = DP_LINK_CONSTANT_N_VALUE;
4519 	else
4520 		*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4521 
4522 	*ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
4523 	intel_reduce_m_n_ratio(ret_m, ret_n);
4524 }
4525 
4526 void
4527 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
4528 		       int pixel_clock, int link_clock,
4529 		       struct intel_link_m_n *m_n,
4530 		       bool constant_n, bool fec_enable)
4531 {
4532 	u32 data_clock = bits_per_pixel * pixel_clock;
4533 
4534 	if (fec_enable)
4535 		data_clock = intel_dp_mode_to_fec_clock(data_clock);
4536 
4537 	m_n->tu = 64;
4538 	compute_m_n(data_clock,
4539 		    link_clock * nlanes * 8,
4540 		    &m_n->gmch_m, &m_n->gmch_n,
4541 		    constant_n);
4542 
4543 	compute_m_n(pixel_clock, link_clock,
4544 		    &m_n->link_m, &m_n->link_n,
4545 		    constant_n);
4546 }
4547 
4548 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
4549 {
4550 	/*
4551 	 * There may be no VBT; and if the BIOS enabled SSC we can
4552 	 * just keep using it to avoid unnecessary flicker.  Whereas if the
4553 	 * BIOS isn't using it, don't assume it will work even if the VBT
4554 	 * indicates as much.
4555 	 */
4556 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
4557 		bool bios_lvds_use_ssc = intel_de_read(dev_priv,
4558 						       PCH_DREF_CONTROL) &
4559 			DREF_SSC1_ENABLE;
4560 
4561 		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
4562 			drm_dbg_kms(&dev_priv->drm,
4563 				    "SSC %s by BIOS, overriding VBT which says %s\n",
4564 				    enableddisabled(bios_lvds_use_ssc),
4565 				    enableddisabled(dev_priv->vbt.lvds_use_ssc));
4566 			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
4567 		}
4568 	}
4569 }
4570 
4571 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4572 					 const struct intel_link_m_n *m_n)
4573 {
4574 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4575 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4576 	enum pipe pipe = crtc->pipe;
4577 
4578 	intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
4579 		       TU_SIZE(m_n->tu) | m_n->gmch_m);
4580 	intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
4581 	intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
4582 	intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
4583 }
4584 
4585 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
4586 				 enum transcoder transcoder)
4587 {
4588 	if (IS_HASWELL(dev_priv))
4589 		return transcoder == TRANSCODER_EDP;
4590 
4591 	/*
4592 	 * Strictly speaking some registers are available before
4593 	 * gen7, but we only support DRRS on gen7+
4594 	 */
4595 	return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv);
4596 }
4597 
4598 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4599 					 const struct intel_link_m_n *m_n,
4600 					 const struct intel_link_m_n *m2_n2)
4601 {
4602 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4603 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4604 	enum pipe pipe = crtc->pipe;
4605 	enum transcoder transcoder = crtc_state->cpu_transcoder;
4606 
4607 	if (DISPLAY_VER(dev_priv) >= 5) {
4608 		intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
4609 			       TU_SIZE(m_n->tu) | m_n->gmch_m);
4610 		intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
4611 			       m_n->gmch_n);
4612 		intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
4613 			       m_n->link_m);
4614 		intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
4615 			       m_n->link_n);
4616 		/*
4617 		 *  M2_N2 registers are set only if DRRS is supported
4618 		 * (to make sure the registers are not unnecessarily accessed).
4619 		 */
4620 		if (m2_n2 && crtc_state->has_drrs &&
4621 		    transcoder_has_m2_n2(dev_priv, transcoder)) {
4622 			intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
4623 				       TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
4624 			intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
4625 				       m2_n2->gmch_n);
4626 			intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
4627 				       m2_n2->link_m);
4628 			intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
4629 				       m2_n2->link_n);
4630 		}
4631 	} else {
4632 		intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
4633 			       TU_SIZE(m_n->tu) | m_n->gmch_m);
4634 		intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
4635 		intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
4636 		intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
4637 	}
4638 }
4639 
4640 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
4641 {
4642 	const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
4643 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
4644 
4645 	if (m_n == M1_N1) {
4646 		dp_m_n = &crtc_state->dp_m_n;
4647 		dp_m2_n2 = &crtc_state->dp_m2_n2;
4648 	} else if (m_n == M2_N2) {
4649 
4650 		/*
4651 		 * M2_N2 registers are not supported. Hence m2_n2 divider value
4652 		 * needs to be programmed into M1_N1.
4653 		 */
4654 		dp_m_n = &crtc_state->dp_m2_n2;
4655 	} else {
4656 		drm_err(&i915->drm, "Unsupported divider value\n");
4657 		return;
4658 	}
4659 
4660 	if (crtc_state->has_pch_encoder)
4661 		intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
4662 	else
4663 		intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
4664 }
4665 
4666 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
4667 {
4668 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4669 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4670 	enum pipe pipe = crtc->pipe;
4671 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4672 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4673 	u32 crtc_vtotal, crtc_vblank_end;
4674 	int vsyncshift = 0;
4675 
4676 	/* We need to be careful not to changed the adjusted mode, for otherwise
4677 	 * the hw state checker will get angry at the mismatch. */
4678 	crtc_vtotal = adjusted_mode->crtc_vtotal;
4679 	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
4680 
4681 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4682 		/* the chip adds 2 halflines automatically */
4683 		crtc_vtotal -= 1;
4684 		crtc_vblank_end -= 1;
4685 
4686 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4687 			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
4688 		else
4689 			vsyncshift = adjusted_mode->crtc_hsync_start -
4690 				adjusted_mode->crtc_htotal / 2;
4691 		if (vsyncshift < 0)
4692 			vsyncshift += adjusted_mode->crtc_htotal;
4693 	}
4694 
4695 	if (DISPLAY_VER(dev_priv) > 3)
4696 		intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
4697 		               vsyncshift);
4698 
4699 	intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
4700 		       (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
4701 	intel_de_write(dev_priv, HBLANK(cpu_transcoder),
4702 		       (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
4703 	intel_de_write(dev_priv, HSYNC(cpu_transcoder),
4704 		       (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
4705 
4706 	intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
4707 		       (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
4708 	intel_de_write(dev_priv, VBLANK(cpu_transcoder),
4709 		       (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
4710 	intel_de_write(dev_priv, VSYNC(cpu_transcoder),
4711 		       (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
4712 
4713 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
4714 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
4715 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
4716 	 * bits. */
4717 	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
4718 	    (pipe == PIPE_B || pipe == PIPE_C))
4719 		intel_de_write(dev_priv, VTOTAL(pipe),
4720 		               intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
4721 
4722 }
4723 
4724 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
4725 {
4726 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4727 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4728 	enum pipe pipe = crtc->pipe;
4729 
4730 	/* pipesrc controls the size that is scaled from, which should
4731 	 * always be the user's requested size.
4732 	 */
4733 	intel_de_write(dev_priv, PIPESRC(pipe),
4734 		       ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
4735 }
4736 
4737 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
4738 {
4739 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4740 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4741 
4742 	if (DISPLAY_VER(dev_priv) == 2)
4743 		return false;
4744 
4745 	if (DISPLAY_VER(dev_priv) >= 9 ||
4746 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4747 		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
4748 	else
4749 		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
4750 }
4751 
4752 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
4753 					 struct intel_crtc_state *pipe_config)
4754 {
4755 	struct drm_device *dev = crtc->base.dev;
4756 	struct drm_i915_private *dev_priv = to_i915(dev);
4757 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
4758 	u32 tmp;
4759 
4760 	tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
4761 	pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
4762 	pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
4763 
4764 	if (!transcoder_is_dsi(cpu_transcoder)) {
4765 		tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
4766 		pipe_config->hw.adjusted_mode.crtc_hblank_start =
4767 							(tmp & 0xffff) + 1;
4768 		pipe_config->hw.adjusted_mode.crtc_hblank_end =
4769 						((tmp >> 16) & 0xffff) + 1;
4770 	}
4771 	tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
4772 	pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
4773 	pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
4774 
4775 	tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
4776 	pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
4777 	pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
4778 
4779 	if (!transcoder_is_dsi(cpu_transcoder)) {
4780 		tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
4781 		pipe_config->hw.adjusted_mode.crtc_vblank_start =
4782 							(tmp & 0xffff) + 1;
4783 		pipe_config->hw.adjusted_mode.crtc_vblank_end =
4784 						((tmp >> 16) & 0xffff) + 1;
4785 	}
4786 	tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
4787 	pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
4788 	pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
4789 
4790 	if (intel_pipe_is_interlaced(pipe_config)) {
4791 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
4792 		pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
4793 		pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
4794 	}
4795 }
4796 
4797 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
4798 				    struct intel_crtc_state *pipe_config)
4799 {
4800 	struct drm_device *dev = crtc->base.dev;
4801 	struct drm_i915_private *dev_priv = to_i915(dev);
4802 	u32 tmp;
4803 
4804 	tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
4805 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
4806 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
4807 }
4808 
4809 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
4810 {
4811 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4812 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4813 	u32 pipeconf;
4814 
4815 	pipeconf = 0;
4816 
4817 	/* we keep both pipes enabled on 830 */
4818 	if (IS_I830(dev_priv))
4819 		pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
4820 
4821 	if (crtc_state->double_wide)
4822 		pipeconf |= PIPECONF_DOUBLE_WIDE;
4823 
4824 	/* only g4x and later have fancy bpc/dither controls */
4825 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4826 	    IS_CHERRYVIEW(dev_priv)) {
4827 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
4828 		if (crtc_state->dither && crtc_state->pipe_bpp != 30)
4829 			pipeconf |= PIPECONF_DITHER_EN |
4830 				    PIPECONF_DITHER_TYPE_SP;
4831 
4832 		switch (crtc_state->pipe_bpp) {
4833 		case 18:
4834 			pipeconf |= PIPECONF_6BPC;
4835 			break;
4836 		case 24:
4837 			pipeconf |= PIPECONF_8BPC;
4838 			break;
4839 		case 30:
4840 			pipeconf |= PIPECONF_10BPC;
4841 			break;
4842 		default:
4843 			/* Case prevented by intel_choose_pipe_bpp_dither. */
4844 			BUG();
4845 		}
4846 	}
4847 
4848 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
4849 		if (DISPLAY_VER(dev_priv) < 4 ||
4850 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4851 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4852 		else
4853 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
4854 	} else {
4855 		pipeconf |= PIPECONF_PROGRESSIVE;
4856 	}
4857 
4858 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
4859 	     crtc_state->limited_color_range)
4860 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
4861 
4862 	pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
4863 
4864 	pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
4865 
4866 	intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
4867 	intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
4868 }
4869 
4870 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
4871 {
4872 	if (IS_I830(dev_priv))
4873 		return false;
4874 
4875 	return DISPLAY_VER(dev_priv) >= 4 ||
4876 		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
4877 }
4878 
4879 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
4880 {
4881 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4882 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4883 	u32 tmp;
4884 
4885 	if (!i9xx_has_pfit(dev_priv))
4886 		return;
4887 
4888 	tmp = intel_de_read(dev_priv, PFIT_CONTROL);
4889 	if (!(tmp & PFIT_ENABLE))
4890 		return;
4891 
4892 	/* Check whether the pfit is attached to our pipe. */
4893 	if (DISPLAY_VER(dev_priv) < 4) {
4894 		if (crtc->pipe != PIPE_B)
4895 			return;
4896 	} else {
4897 		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
4898 			return;
4899 	}
4900 
4901 	crtc_state->gmch_pfit.control = tmp;
4902 	crtc_state->gmch_pfit.pgm_ratios =
4903 		intel_de_read(dev_priv, PFIT_PGM_RATIOS);
4904 }
4905 
4906 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
4907 			       struct intel_crtc_state *pipe_config)
4908 {
4909 	struct drm_device *dev = crtc->base.dev;
4910 	struct drm_i915_private *dev_priv = to_i915(dev);
4911 	enum pipe pipe = crtc->pipe;
4912 	struct dpll clock;
4913 	u32 mdiv;
4914 	int refclk = 100000;
4915 
4916 	/* In case of DSI, DPLL will not be used */
4917 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
4918 		return;
4919 
4920 	vlv_dpio_get(dev_priv);
4921 	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
4922 	vlv_dpio_put(dev_priv);
4923 
4924 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
4925 	clock.m2 = mdiv & DPIO_M2DIV_MASK;
4926 	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
4927 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
4928 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
4929 
4930 	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
4931 }
4932 
4933 static void chv_crtc_clock_get(struct intel_crtc *crtc,
4934 			       struct intel_crtc_state *pipe_config)
4935 {
4936 	struct drm_device *dev = crtc->base.dev;
4937 	struct drm_i915_private *dev_priv = to_i915(dev);
4938 	enum pipe pipe = crtc->pipe;
4939 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
4940 	struct dpll clock;
4941 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
4942 	int refclk = 100000;
4943 
4944 	/* In case of DSI, DPLL will not be used */
4945 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
4946 		return;
4947 
4948 	vlv_dpio_get(dev_priv);
4949 	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
4950 	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
4951 	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
4952 	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
4953 	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
4954 	vlv_dpio_put(dev_priv);
4955 
4956 	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
4957 	clock.m2 = (pll_dw0 & 0xff) << 22;
4958 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
4959 		clock.m2 |= pll_dw2 & 0x3fffff;
4960 	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
4961 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
4962 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
4963 
4964 	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
4965 }
4966 
4967 static enum intel_output_format
4968 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
4969 {
4970 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4971 	u32 tmp;
4972 
4973 	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
4974 
4975 	if (tmp & PIPEMISC_YUV420_ENABLE) {
4976 		/* We support 4:2:0 in full blend mode only */
4977 		drm_WARN_ON(&dev_priv->drm,
4978 			    (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
4979 
4980 		return INTEL_OUTPUT_FORMAT_YCBCR420;
4981 	} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
4982 		return INTEL_OUTPUT_FORMAT_YCBCR444;
4983 	} else {
4984 		return INTEL_OUTPUT_FORMAT_RGB;
4985 	}
4986 }
4987 
4988 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
4989 {
4990 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4991 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
4992 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4993 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4994 	u32 tmp;
4995 
4996 	tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
4997 
4998 	if (tmp & DISPPLANE_GAMMA_ENABLE)
4999 		crtc_state->gamma_enable = true;
5000 
5001 	if (!HAS_GMCH(dev_priv) &&
5002 	    tmp & DISPPLANE_PIPE_CSC_ENABLE)
5003 		crtc_state->csc_enable = true;
5004 }
5005 
5006 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5007 				 struct intel_crtc_state *pipe_config)
5008 {
5009 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5010 	enum intel_display_power_domain power_domain;
5011 	intel_wakeref_t wakeref;
5012 	u32 tmp;
5013 	bool ret;
5014 
5015 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
5016 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
5017 	if (!wakeref)
5018 		return false;
5019 
5020 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5021 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5022 	pipe_config->shared_dpll = NULL;
5023 
5024 	ret = false;
5025 
5026 	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
5027 	if (!(tmp & PIPECONF_ENABLE))
5028 		goto out;
5029 
5030 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5031 	    IS_CHERRYVIEW(dev_priv)) {
5032 		switch (tmp & PIPECONF_BPC_MASK) {
5033 		case PIPECONF_6BPC:
5034 			pipe_config->pipe_bpp = 18;
5035 			break;
5036 		case PIPECONF_8BPC:
5037 			pipe_config->pipe_bpp = 24;
5038 			break;
5039 		case PIPECONF_10BPC:
5040 			pipe_config->pipe_bpp = 30;
5041 			break;
5042 		default:
5043 			break;
5044 		}
5045 	}
5046 
5047 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
5048 	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
5049 		pipe_config->limited_color_range = true;
5050 
5051 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
5052 		PIPECONF_GAMMA_MODE_SHIFT;
5053 
5054 	if (IS_CHERRYVIEW(dev_priv))
5055 		pipe_config->cgm_mode = intel_de_read(dev_priv,
5056 						      CGM_PIPE_MODE(crtc->pipe));
5057 
5058 	i9xx_get_pipe_color_config(pipe_config);
5059 	intel_color_get_config(pipe_config);
5060 
5061 	if (DISPLAY_VER(dev_priv) < 4)
5062 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
5063 
5064 	intel_get_transcoder_timings(crtc, pipe_config);
5065 	intel_get_pipe_src_size(crtc, pipe_config);
5066 
5067 	i9xx_get_pfit_config(pipe_config);
5068 
5069 	if (DISPLAY_VER(dev_priv) >= 4) {
5070 		/* No way to read it out on pipes B and C */
5071 		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
5072 			tmp = dev_priv->chv_dpll_md[crtc->pipe];
5073 		else
5074 			tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
5075 		pipe_config->pixel_multiplier =
5076 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
5077 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
5078 		pipe_config->dpll_hw_state.dpll_md = tmp;
5079 	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
5080 		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
5081 		tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
5082 		pipe_config->pixel_multiplier =
5083 			((tmp & SDVO_MULTIPLIER_MASK)
5084 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
5085 	} else {
5086 		/* Note that on i915G/GM the pixel multiplier is in the sdvo
5087 		 * port and will be fixed up in the encoder->get_config
5088 		 * function. */
5089 		pipe_config->pixel_multiplier = 1;
5090 	}
5091 	pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
5092 							DPLL(crtc->pipe));
5093 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
5094 		pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
5095 							       FP0(crtc->pipe));
5096 		pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
5097 							       FP1(crtc->pipe));
5098 	} else {
5099 		/* Mask out read-only status bits. */
5100 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
5101 						     DPLL_PORTC_READY_MASK |
5102 						     DPLL_PORTB_READY_MASK);
5103 	}
5104 
5105 	if (IS_CHERRYVIEW(dev_priv))
5106 		chv_crtc_clock_get(crtc, pipe_config);
5107 	else if (IS_VALLEYVIEW(dev_priv))
5108 		vlv_crtc_clock_get(crtc, pipe_config);
5109 	else
5110 		i9xx_crtc_clock_get(crtc, pipe_config);
5111 
5112 	/*
5113 	 * Normally the dotclock is filled in by the encoder .get_config()
5114 	 * but in case the pipe is enabled w/o any ports we need a sane
5115 	 * default.
5116 	 */
5117 	pipe_config->hw.adjusted_mode.crtc_clock =
5118 		pipe_config->port_clock / pipe_config->pixel_multiplier;
5119 
5120 	ret = true;
5121 
5122 out:
5123 	intel_display_power_put(dev_priv, power_domain, wakeref);
5124 
5125 	return ret;
5126 }
5127 
5128 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
5129 {
5130 	struct intel_encoder *encoder;
5131 	int i;
5132 	u32 val, final;
5133 	bool has_lvds = false;
5134 	bool has_cpu_edp = false;
5135 	bool has_panel = false;
5136 	bool has_ck505 = false;
5137 	bool can_ssc = false;
5138 	bool using_ssc_source = false;
5139 
5140 	/* We need to take the global config into account */
5141 	for_each_intel_encoder(&dev_priv->drm, encoder) {
5142 		switch (encoder->type) {
5143 		case INTEL_OUTPUT_LVDS:
5144 			has_panel = true;
5145 			has_lvds = true;
5146 			break;
5147 		case INTEL_OUTPUT_EDP:
5148 			has_panel = true;
5149 			if (encoder->port == PORT_A)
5150 				has_cpu_edp = true;
5151 			break;
5152 		default:
5153 			break;
5154 		}
5155 	}
5156 
5157 	if (HAS_PCH_IBX(dev_priv)) {
5158 		has_ck505 = dev_priv->vbt.display_clock_mode;
5159 		can_ssc = has_ck505;
5160 	} else {
5161 		has_ck505 = false;
5162 		can_ssc = true;
5163 	}
5164 
5165 	/* Check if any DPLLs are using the SSC source */
5166 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
5167 		u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
5168 
5169 		if (!(temp & DPLL_VCO_ENABLE))
5170 			continue;
5171 
5172 		if ((temp & PLL_REF_INPUT_MASK) ==
5173 		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
5174 			using_ssc_source = true;
5175 			break;
5176 		}
5177 	}
5178 
5179 	drm_dbg_kms(&dev_priv->drm,
5180 		    "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
5181 		    has_panel, has_lvds, has_ck505, using_ssc_source);
5182 
5183 	/* Ironlake: try to setup display ref clock before DPLL
5184 	 * enabling. This is only under driver's control after
5185 	 * PCH B stepping, previous chipset stepping should be
5186 	 * ignoring this setting.
5187 	 */
5188 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
5189 
5190 	/* As we must carefully and slowly disable/enable each source in turn,
5191 	 * compute the final state we want first and check if we need to
5192 	 * make any changes at all.
5193 	 */
5194 	final = val;
5195 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
5196 	if (has_ck505)
5197 		final |= DREF_NONSPREAD_CK505_ENABLE;
5198 	else
5199 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
5200 
5201 	final &= ~DREF_SSC_SOURCE_MASK;
5202 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5203 	final &= ~DREF_SSC1_ENABLE;
5204 
5205 	if (has_panel) {
5206 		final |= DREF_SSC_SOURCE_ENABLE;
5207 
5208 		if (intel_panel_use_ssc(dev_priv) && can_ssc)
5209 			final |= DREF_SSC1_ENABLE;
5210 
5211 		if (has_cpu_edp) {
5212 			if (intel_panel_use_ssc(dev_priv) && can_ssc)
5213 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5214 			else
5215 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5216 		} else
5217 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5218 	} else if (using_ssc_source) {
5219 		final |= DREF_SSC_SOURCE_ENABLE;
5220 		final |= DREF_SSC1_ENABLE;
5221 	}
5222 
5223 	if (final == val)
5224 		return;
5225 
5226 	/* Always enable nonspread source */
5227 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
5228 
5229 	if (has_ck505)
5230 		val |= DREF_NONSPREAD_CK505_ENABLE;
5231 	else
5232 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
5233 
5234 	if (has_panel) {
5235 		val &= ~DREF_SSC_SOURCE_MASK;
5236 		val |= DREF_SSC_SOURCE_ENABLE;
5237 
5238 		/* SSC must be turned on before enabling the CPU output  */
5239 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5240 			drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
5241 			val |= DREF_SSC1_ENABLE;
5242 		} else
5243 			val &= ~DREF_SSC1_ENABLE;
5244 
5245 		/* Get SSC going before enabling the outputs */
5246 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5247 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5248 		udelay(200);
5249 
5250 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5251 
5252 		/* Enable CPU source on CPU attached eDP */
5253 		if (has_cpu_edp) {
5254 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5255 				drm_dbg_kms(&dev_priv->drm,
5256 					    "Using SSC on eDP\n");
5257 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5258 			} else
5259 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5260 		} else
5261 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5262 
5263 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5264 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5265 		udelay(200);
5266 	} else {
5267 		drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
5268 
5269 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5270 
5271 		/* Turn off CPU output */
5272 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5273 
5274 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5275 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5276 		udelay(200);
5277 
5278 		if (!using_ssc_source) {
5279 			drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
5280 
5281 			/* Turn off the SSC source */
5282 			val &= ~DREF_SSC_SOURCE_MASK;
5283 			val |= DREF_SSC_SOURCE_DISABLE;
5284 
5285 			/* Turn off SSC1 */
5286 			val &= ~DREF_SSC1_ENABLE;
5287 
5288 			intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5289 			intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5290 			udelay(200);
5291 		}
5292 	}
5293 
5294 	BUG_ON(val != final);
5295 }
5296 
5297 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
5298 {
5299 	u32 tmp;
5300 
5301 	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5302 	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5303 	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5304 
5305 	if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5306 			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5307 		drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
5308 
5309 	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5310 	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5311 	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5312 
5313 	if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5314 			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
5315 		drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
5316 }
5317 
5318 /* WaMPhyProgramming:hsw */
5319 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5320 {
5321 	u32 tmp;
5322 
5323 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5324 	tmp &= ~(0xFF << 24);
5325 	tmp |= (0x12 << 24);
5326 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5327 
5328 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5329 	tmp |= (1 << 11);
5330 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5331 
5332 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5333 	tmp |= (1 << 11);
5334 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5335 
5336 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5337 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5338 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5339 
5340 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5341 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5342 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5343 
5344 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5345 	tmp &= ~(7 << 13);
5346 	tmp |= (5 << 13);
5347 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5348 
5349 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5350 	tmp &= ~(7 << 13);
5351 	tmp |= (5 << 13);
5352 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5353 
5354 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5355 	tmp &= ~0xFF;
5356 	tmp |= 0x1C;
5357 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5358 
5359 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5360 	tmp &= ~0xFF;
5361 	tmp |= 0x1C;
5362 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5363 
5364 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5365 	tmp &= ~(0xFF << 16);
5366 	tmp |= (0x1C << 16);
5367 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5368 
5369 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5370 	tmp &= ~(0xFF << 16);
5371 	tmp |= (0x1C << 16);
5372 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5373 
5374 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5375 	tmp |= (1 << 27);
5376 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5377 
5378 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5379 	tmp |= (1 << 27);
5380 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5381 
5382 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5383 	tmp &= ~(0xF << 28);
5384 	tmp |= (4 << 28);
5385 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5386 
5387 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5388 	tmp &= ~(0xF << 28);
5389 	tmp |= (4 << 28);
5390 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5391 }
5392 
5393 /* Implements 3 different sequences from BSpec chapter "Display iCLK
5394  * Programming" based on the parameters passed:
5395  * - Sequence to enable CLKOUT_DP
5396  * - Sequence to enable CLKOUT_DP without spread
5397  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5398  */
5399 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
5400 				 bool with_spread, bool with_fdi)
5401 {
5402 	u32 reg, tmp;
5403 
5404 	if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
5405 		     "FDI requires downspread\n"))
5406 		with_spread = true;
5407 	if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
5408 		     with_fdi, "LP PCH doesn't have FDI\n"))
5409 		with_fdi = false;
5410 
5411 	mutex_lock(&dev_priv->sb_lock);
5412 
5413 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5414 	tmp &= ~SBI_SSCCTL_DISABLE;
5415 	tmp |= SBI_SSCCTL_PATHALT;
5416 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5417 
5418 	udelay(24);
5419 
5420 	if (with_spread) {
5421 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5422 		tmp &= ~SBI_SSCCTL_PATHALT;
5423 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5424 
5425 		if (with_fdi) {
5426 			lpt_reset_fdi_mphy(dev_priv);
5427 			lpt_program_fdi_mphy(dev_priv);
5428 		}
5429 	}
5430 
5431 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5432 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5433 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5434 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5435 
5436 	mutex_unlock(&dev_priv->sb_lock);
5437 }
5438 
5439 /* Sequence to disable CLKOUT_DP */
5440 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
5441 {
5442 	u32 reg, tmp;
5443 
5444 	mutex_lock(&dev_priv->sb_lock);
5445 
5446 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5447 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5448 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5449 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5450 
5451 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5452 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
5453 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
5454 			tmp |= SBI_SSCCTL_PATHALT;
5455 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5456 			udelay(32);
5457 		}
5458 		tmp |= SBI_SSCCTL_DISABLE;
5459 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5460 	}
5461 
5462 	mutex_unlock(&dev_priv->sb_lock);
5463 }
5464 
5465 #define BEND_IDX(steps) ((50 + (steps)) / 5)
5466 
5467 static const u16 sscdivintphase[] = {
5468 	[BEND_IDX( 50)] = 0x3B23,
5469 	[BEND_IDX( 45)] = 0x3B23,
5470 	[BEND_IDX( 40)] = 0x3C23,
5471 	[BEND_IDX( 35)] = 0x3C23,
5472 	[BEND_IDX( 30)] = 0x3D23,
5473 	[BEND_IDX( 25)] = 0x3D23,
5474 	[BEND_IDX( 20)] = 0x3E23,
5475 	[BEND_IDX( 15)] = 0x3E23,
5476 	[BEND_IDX( 10)] = 0x3F23,
5477 	[BEND_IDX(  5)] = 0x3F23,
5478 	[BEND_IDX(  0)] = 0x0025,
5479 	[BEND_IDX( -5)] = 0x0025,
5480 	[BEND_IDX(-10)] = 0x0125,
5481 	[BEND_IDX(-15)] = 0x0125,
5482 	[BEND_IDX(-20)] = 0x0225,
5483 	[BEND_IDX(-25)] = 0x0225,
5484 	[BEND_IDX(-30)] = 0x0325,
5485 	[BEND_IDX(-35)] = 0x0325,
5486 	[BEND_IDX(-40)] = 0x0425,
5487 	[BEND_IDX(-45)] = 0x0425,
5488 	[BEND_IDX(-50)] = 0x0525,
5489 };
5490 
5491 /*
5492  * Bend CLKOUT_DP
5493  * steps -50 to 50 inclusive, in steps of 5
5494  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
5495  * change in clock period = -(steps / 10) * 5.787 ps
5496  */
5497 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
5498 {
5499 	u32 tmp;
5500 	int idx = BEND_IDX(steps);
5501 
5502 	if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
5503 		return;
5504 
5505 	if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
5506 		return;
5507 
5508 	mutex_lock(&dev_priv->sb_lock);
5509 
5510 	if (steps % 10 != 0)
5511 		tmp = 0xAAAAAAAB;
5512 	else
5513 		tmp = 0x00000000;
5514 	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
5515 
5516 	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
5517 	tmp &= 0xffff0000;
5518 	tmp |= sscdivintphase[idx];
5519 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
5520 
5521 	mutex_unlock(&dev_priv->sb_lock);
5522 }
5523 
5524 #undef BEND_IDX
5525 
5526 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
5527 {
5528 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5529 	u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
5530 
5531 	if ((ctl & SPLL_PLL_ENABLE) == 0)
5532 		return false;
5533 
5534 	if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
5535 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5536 		return true;
5537 
5538 	if (IS_BROADWELL(dev_priv) &&
5539 	    (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
5540 		return true;
5541 
5542 	return false;
5543 }
5544 
5545 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
5546 			       enum intel_dpll_id id)
5547 {
5548 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5549 	u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
5550 
5551 	if ((ctl & WRPLL_PLL_ENABLE) == 0)
5552 		return false;
5553 
5554 	if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
5555 		return true;
5556 
5557 	if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
5558 	    (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
5559 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5560 		return true;
5561 
5562 	return false;
5563 }
5564 
5565 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
5566 {
5567 	struct intel_encoder *encoder;
5568 	bool has_fdi = false;
5569 
5570 	for_each_intel_encoder(&dev_priv->drm, encoder) {
5571 		switch (encoder->type) {
5572 		case INTEL_OUTPUT_ANALOG:
5573 			has_fdi = true;
5574 			break;
5575 		default:
5576 			break;
5577 		}
5578 	}
5579 
5580 	/*
5581 	 * The BIOS may have decided to use the PCH SSC
5582 	 * reference so we must not disable it until the
5583 	 * relevant PLLs have stopped relying on it. We'll
5584 	 * just leave the PCH SSC reference enabled in case
5585 	 * any active PLL is using it. It will get disabled
5586 	 * after runtime suspend if we don't have FDI.
5587 	 *
5588 	 * TODO: Move the whole reference clock handling
5589 	 * to the modeset sequence proper so that we can
5590 	 * actually enable/disable/reconfigure these things
5591 	 * safely. To do that we need to introduce a real
5592 	 * clock hierarchy. That would also allow us to do
5593 	 * clock bending finally.
5594 	 */
5595 	dev_priv->pch_ssc_use = 0;
5596 
5597 	if (spll_uses_pch_ssc(dev_priv)) {
5598 		drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
5599 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
5600 	}
5601 
5602 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
5603 		drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
5604 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
5605 	}
5606 
5607 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
5608 		drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
5609 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
5610 	}
5611 
5612 	if (dev_priv->pch_ssc_use)
5613 		return;
5614 
5615 	if (has_fdi) {
5616 		lpt_bend_clkout_dp(dev_priv, 0);
5617 		lpt_enable_clkout_dp(dev_priv, true, true);
5618 	} else {
5619 		lpt_disable_clkout_dp(dev_priv);
5620 	}
5621 }
5622 
5623 /*
5624  * Initialize reference clocks when the driver loads
5625  */
5626 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
5627 {
5628 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
5629 		ilk_init_pch_refclk(dev_priv);
5630 	else if (HAS_PCH_LPT(dev_priv))
5631 		lpt_init_pch_refclk(dev_priv);
5632 }
5633 
5634 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
5635 {
5636 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5637 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5638 	enum pipe pipe = crtc->pipe;
5639 	u32 val;
5640 
5641 	val = 0;
5642 
5643 	switch (crtc_state->pipe_bpp) {
5644 	case 18:
5645 		val |= PIPECONF_6BPC;
5646 		break;
5647 	case 24:
5648 		val |= PIPECONF_8BPC;
5649 		break;
5650 	case 30:
5651 		val |= PIPECONF_10BPC;
5652 		break;
5653 	case 36:
5654 		val |= PIPECONF_12BPC;
5655 		break;
5656 	default:
5657 		/* Case prevented by intel_choose_pipe_bpp_dither. */
5658 		BUG();
5659 	}
5660 
5661 	if (crtc_state->dither)
5662 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5663 
5664 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5665 		val |= PIPECONF_INTERLACED_ILK;
5666 	else
5667 		val |= PIPECONF_PROGRESSIVE;
5668 
5669 	/*
5670 	 * This would end up with an odd purple hue over
5671 	 * the entire display. Make sure we don't do it.
5672 	 */
5673 	drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
5674 		    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
5675 
5676 	if (crtc_state->limited_color_range &&
5677 	    !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
5678 		val |= PIPECONF_COLOR_RANGE_SELECT;
5679 
5680 	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5681 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
5682 
5683 	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
5684 
5685 	val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
5686 
5687 	intel_de_write(dev_priv, PIPECONF(pipe), val);
5688 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
5689 }
5690 
5691 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
5692 {
5693 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5694 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5695 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5696 	u32 val = 0;
5697 
5698 	if (IS_HASWELL(dev_priv) && crtc_state->dither)
5699 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5700 
5701 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5702 		val |= PIPECONF_INTERLACED_ILK;
5703 	else
5704 		val |= PIPECONF_PROGRESSIVE;
5705 
5706 	if (IS_HASWELL(dev_priv) &&
5707 	    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5708 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
5709 
5710 	intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
5711 	intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
5712 }
5713 
5714 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
5715 {
5716 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5717 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5718 	u32 val = 0;
5719 
5720 	switch (crtc_state->pipe_bpp) {
5721 	case 18:
5722 		val |= PIPEMISC_DITHER_6_BPC;
5723 		break;
5724 	case 24:
5725 		val |= PIPEMISC_DITHER_8_BPC;
5726 		break;
5727 	case 30:
5728 		val |= PIPEMISC_DITHER_10_BPC;
5729 		break;
5730 	case 36:
5731 		val |= PIPEMISC_DITHER_12_BPC;
5732 		break;
5733 	default:
5734 		MISSING_CASE(crtc_state->pipe_bpp);
5735 		break;
5736 	}
5737 
5738 	if (crtc_state->dither)
5739 		val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
5740 
5741 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
5742 	    crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
5743 		val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
5744 
5745 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5746 		val |= PIPEMISC_YUV420_ENABLE |
5747 			PIPEMISC_YUV420_MODE_FULL_BLEND;
5748 
5749 	if (DISPLAY_VER(dev_priv) >= 11 &&
5750 	    (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
5751 					   BIT(PLANE_CURSOR))) == 0)
5752 		val |= PIPEMISC_HDR_MODE_PRECISION;
5753 
5754 	if (DISPLAY_VER(dev_priv) >= 12)
5755 		val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
5756 
5757 	intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
5758 }
5759 
5760 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
5761 {
5762 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5763 	u32 tmp;
5764 
5765 	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
5766 
5767 	switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
5768 	case PIPEMISC_DITHER_6_BPC:
5769 		return 18;
5770 	case PIPEMISC_DITHER_8_BPC:
5771 		return 24;
5772 	case PIPEMISC_DITHER_10_BPC:
5773 		return 30;
5774 	case PIPEMISC_DITHER_12_BPC:
5775 		return 36;
5776 	default:
5777 		MISSING_CASE(tmp);
5778 		return 0;
5779 	}
5780 }
5781 
5782 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
5783 {
5784 	/*
5785 	 * Account for spread spectrum to avoid
5786 	 * oversubscribing the link. Max center spread
5787 	 * is 2.5%; use 5% for safety's sake.
5788 	 */
5789 	u32 bps = target_clock * bpp * 21 / 20;
5790 	return DIV_ROUND_UP(bps, link_bw * 8);
5791 }
5792 
5793 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
5794 					 struct intel_link_m_n *m_n)
5795 {
5796 	struct drm_device *dev = crtc->base.dev;
5797 	struct drm_i915_private *dev_priv = to_i915(dev);
5798 	enum pipe pipe = crtc->pipe;
5799 
5800 	m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
5801 	m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
5802 	m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5803 		& ~TU_SIZE_MASK;
5804 	m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
5805 	m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5806 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5807 }
5808 
5809 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
5810 					 enum transcoder transcoder,
5811 					 struct intel_link_m_n *m_n,
5812 					 struct intel_link_m_n *m2_n2)
5813 {
5814 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5815 	enum pipe pipe = crtc->pipe;
5816 
5817 	if (DISPLAY_VER(dev_priv) >= 5) {
5818 		m_n->link_m = intel_de_read(dev_priv,
5819 					    PIPE_LINK_M1(transcoder));
5820 		m_n->link_n = intel_de_read(dev_priv,
5821 					    PIPE_LINK_N1(transcoder));
5822 		m_n->gmch_m = intel_de_read(dev_priv,
5823 					    PIPE_DATA_M1(transcoder))
5824 			& ~TU_SIZE_MASK;
5825 		m_n->gmch_n = intel_de_read(dev_priv,
5826 					    PIPE_DATA_N1(transcoder));
5827 		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
5828 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5829 
5830 		if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
5831 			m2_n2->link_m = intel_de_read(dev_priv,
5832 						      PIPE_LINK_M2(transcoder));
5833 			m2_n2->link_n =	intel_de_read(dev_priv,
5834 							     PIPE_LINK_N2(transcoder));
5835 			m2_n2->gmch_m =	intel_de_read(dev_priv,
5836 							     PIPE_DATA_M2(transcoder))
5837 					& ~TU_SIZE_MASK;
5838 			m2_n2->gmch_n =	intel_de_read(dev_priv,
5839 							     PIPE_DATA_N2(transcoder));
5840 			m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
5841 					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5842 		}
5843 	} else {
5844 		m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
5845 		m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
5846 		m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
5847 			& ~TU_SIZE_MASK;
5848 		m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
5849 		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
5850 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5851 	}
5852 }
5853 
5854 void intel_dp_get_m_n(struct intel_crtc *crtc,
5855 		      struct intel_crtc_state *pipe_config)
5856 {
5857 	if (pipe_config->has_pch_encoder)
5858 		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
5859 	else
5860 		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5861 					     &pipe_config->dp_m_n,
5862 					     &pipe_config->dp_m2_n2);
5863 }
5864 
5865 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
5866 				   struct intel_crtc_state *pipe_config)
5867 {
5868 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5869 				     &pipe_config->fdi_m_n, NULL);
5870 }
5871 
5872 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
5873 				  u32 pos, u32 size)
5874 {
5875 	drm_rect_init(&crtc_state->pch_pfit.dst,
5876 		      pos >> 16, pos & 0xffff,
5877 		      size >> 16, size & 0xffff);
5878 }
5879 
5880 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
5881 {
5882 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5883 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5884 	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
5885 	int id = -1;
5886 	int i;
5887 
5888 	/* find scaler attached to this pipe */
5889 	for (i = 0; i < crtc->num_scalers; i++) {
5890 		u32 ctl, pos, size;
5891 
5892 		ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
5893 		if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
5894 			continue;
5895 
5896 		id = i;
5897 		crtc_state->pch_pfit.enabled = true;
5898 
5899 		pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
5900 		size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
5901 
5902 		ilk_get_pfit_pos_size(crtc_state, pos, size);
5903 
5904 		scaler_state->scalers[i].in_use = true;
5905 		break;
5906 	}
5907 
5908 	scaler_state->scaler_id = id;
5909 	if (id >= 0)
5910 		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
5911 	else
5912 		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
5913 }
5914 
5915 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
5916 {
5917 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5918 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5919 	u32 ctl, pos, size;
5920 
5921 	ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
5922 	if ((ctl & PF_ENABLE) == 0)
5923 		return;
5924 
5925 	crtc_state->pch_pfit.enabled = true;
5926 
5927 	pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
5928 	size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
5929 
5930 	ilk_get_pfit_pos_size(crtc_state, pos, size);
5931 
5932 	/*
5933 	 * We currently do not free assignements of panel fitters on
5934 	 * ivb/hsw (since we don't use the higher upscaling modes which
5935 	 * differentiates them) so just WARN about this case for now.
5936 	 */
5937 	drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
5938 		    (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
5939 }
5940 
5941 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
5942 				struct intel_crtc_state *pipe_config)
5943 {
5944 	struct drm_device *dev = crtc->base.dev;
5945 	struct drm_i915_private *dev_priv = to_i915(dev);
5946 	enum intel_display_power_domain power_domain;
5947 	intel_wakeref_t wakeref;
5948 	u32 tmp;
5949 	bool ret;
5950 
5951 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
5952 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
5953 	if (!wakeref)
5954 		return false;
5955 
5956 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5957 	pipe_config->shared_dpll = NULL;
5958 
5959 	ret = false;
5960 	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
5961 	if (!(tmp & PIPECONF_ENABLE))
5962 		goto out;
5963 
5964 	switch (tmp & PIPECONF_BPC_MASK) {
5965 	case PIPECONF_6BPC:
5966 		pipe_config->pipe_bpp = 18;
5967 		break;
5968 	case PIPECONF_8BPC:
5969 		pipe_config->pipe_bpp = 24;
5970 		break;
5971 	case PIPECONF_10BPC:
5972 		pipe_config->pipe_bpp = 30;
5973 		break;
5974 	case PIPECONF_12BPC:
5975 		pipe_config->pipe_bpp = 36;
5976 		break;
5977 	default:
5978 		break;
5979 	}
5980 
5981 	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
5982 		pipe_config->limited_color_range = true;
5983 
5984 	switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
5985 	case PIPECONF_OUTPUT_COLORSPACE_YUV601:
5986 	case PIPECONF_OUTPUT_COLORSPACE_YUV709:
5987 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
5988 		break;
5989 	default:
5990 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5991 		break;
5992 	}
5993 
5994 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
5995 		PIPECONF_GAMMA_MODE_SHIFT;
5996 
5997 	pipe_config->csc_mode = intel_de_read(dev_priv,
5998 					      PIPE_CSC_MODE(crtc->pipe));
5999 
6000 	i9xx_get_pipe_color_config(pipe_config);
6001 	intel_color_get_config(pipe_config);
6002 
6003 	if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
6004 		struct intel_shared_dpll *pll;
6005 		enum intel_dpll_id pll_id;
6006 		bool pll_active;
6007 
6008 		pipe_config->has_pch_encoder = true;
6009 
6010 		tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
6011 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6012 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
6013 
6014 		ilk_get_fdi_m_n_config(crtc, pipe_config);
6015 
6016 		if (HAS_PCH_IBX(dev_priv)) {
6017 			/*
6018 			 * The pipe->pch transcoder and pch transcoder->pll
6019 			 * mapping is fixed.
6020 			 */
6021 			pll_id = (enum intel_dpll_id) crtc->pipe;
6022 		} else {
6023 			tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
6024 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
6025 				pll_id = DPLL_ID_PCH_PLL_B;
6026 			else
6027 				pll_id= DPLL_ID_PCH_PLL_A;
6028 		}
6029 
6030 		pipe_config->shared_dpll =
6031 			intel_get_shared_dpll_by_id(dev_priv, pll_id);
6032 		pll = pipe_config->shared_dpll;
6033 
6034 		pll_active = intel_dpll_get_hw_state(dev_priv, pll,
6035 						     &pipe_config->dpll_hw_state);
6036 		drm_WARN_ON(dev, !pll_active);
6037 
6038 		tmp = pipe_config->dpll_hw_state.dpll;
6039 		pipe_config->pixel_multiplier =
6040 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
6041 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
6042 
6043 		ilk_pch_clock_get(crtc, pipe_config);
6044 	} else {
6045 		pipe_config->pixel_multiplier = 1;
6046 	}
6047 
6048 	intel_get_transcoder_timings(crtc, pipe_config);
6049 	intel_get_pipe_src_size(crtc, pipe_config);
6050 
6051 	ilk_get_pfit_config(pipe_config);
6052 
6053 	ret = true;
6054 
6055 out:
6056 	intel_display_power_put(dev_priv, power_domain, wakeref);
6057 
6058 	return ret;
6059 }
6060 
6061 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
6062 				     struct intel_crtc_state *pipe_config,
6063 				     struct intel_display_power_domain_set *power_domain_set)
6064 {
6065 	struct drm_device *dev = crtc->base.dev;
6066 	struct drm_i915_private *dev_priv = to_i915(dev);
6067 	unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
6068 	unsigned long enabled_panel_transcoders = 0;
6069 	enum transcoder panel_transcoder;
6070 	u32 tmp;
6071 
6072 	if (DISPLAY_VER(dev_priv) >= 11)
6073 		panel_transcoder_mask |=
6074 			BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
6075 
6076 	/*
6077 	 * The pipe->transcoder mapping is fixed with the exception of the eDP
6078 	 * and DSI transcoders handled below.
6079 	 */
6080 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6081 
6082 	/*
6083 	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
6084 	 * consistency and less surprising code; it's in always on power).
6085 	 */
6086 	for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
6087 				       panel_transcoder_mask) {
6088 		bool force_thru = false;
6089 		enum pipe trans_pipe;
6090 
6091 		tmp = intel_de_read(dev_priv,
6092 				    TRANS_DDI_FUNC_CTL(panel_transcoder));
6093 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
6094 			continue;
6095 
6096 		/*
6097 		 * Log all enabled ones, only use the first one.
6098 		 *
6099 		 * FIXME: This won't work for two separate DSI displays.
6100 		 */
6101 		enabled_panel_transcoders |= BIT(panel_transcoder);
6102 		if (enabled_panel_transcoders != BIT(panel_transcoder))
6103 			continue;
6104 
6105 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
6106 		default:
6107 			drm_WARN(dev, 1,
6108 				 "unknown pipe linked to transcoder %s\n",
6109 				 transcoder_name(panel_transcoder));
6110 			fallthrough;
6111 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
6112 			force_thru = true;
6113 			fallthrough;
6114 		case TRANS_DDI_EDP_INPUT_A_ON:
6115 			trans_pipe = PIPE_A;
6116 			break;
6117 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
6118 			trans_pipe = PIPE_B;
6119 			break;
6120 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
6121 			trans_pipe = PIPE_C;
6122 			break;
6123 		case TRANS_DDI_EDP_INPUT_D_ONOFF:
6124 			trans_pipe = PIPE_D;
6125 			break;
6126 		}
6127 
6128 		if (trans_pipe == crtc->pipe) {
6129 			pipe_config->cpu_transcoder = panel_transcoder;
6130 			pipe_config->pch_pfit.force_thru = force_thru;
6131 		}
6132 	}
6133 
6134 	/*
6135 	 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
6136 	 */
6137 	drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
6138 		    enabled_panel_transcoders != BIT(TRANSCODER_EDP));
6139 
6140 	if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
6141 						       POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
6142 		return false;
6143 
6144 	tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
6145 
6146 	return tmp & PIPECONF_ENABLE;
6147 }
6148 
6149 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
6150 					 struct intel_crtc_state *pipe_config,
6151 					 struct intel_display_power_domain_set *power_domain_set)
6152 {
6153 	struct drm_device *dev = crtc->base.dev;
6154 	struct drm_i915_private *dev_priv = to_i915(dev);
6155 	enum transcoder cpu_transcoder;
6156 	enum port port;
6157 	u32 tmp;
6158 
6159 	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
6160 		if (port == PORT_A)
6161 			cpu_transcoder = TRANSCODER_DSI_A;
6162 		else
6163 			cpu_transcoder = TRANSCODER_DSI_C;
6164 
6165 		if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
6166 							       POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
6167 			continue;
6168 
6169 		/*
6170 		 * The PLL needs to be enabled with a valid divider
6171 		 * configuration, otherwise accessing DSI registers will hang
6172 		 * the machine. See BSpec North Display Engine
6173 		 * registers/MIPI[BXT]. We can break out here early, since we
6174 		 * need the same DSI PLL to be enabled for both DSI ports.
6175 		 */
6176 		if (!bxt_dsi_pll_is_enabled(dev_priv))
6177 			break;
6178 
6179 		/* XXX: this works for video mode only */
6180 		tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
6181 		if (!(tmp & DPI_ENABLE))
6182 			continue;
6183 
6184 		tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
6185 		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
6186 			continue;
6187 
6188 		pipe_config->cpu_transcoder = cpu_transcoder;
6189 		break;
6190 	}
6191 
6192 	return transcoder_is_dsi(pipe_config->cpu_transcoder);
6193 }
6194 
6195 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
6196 				   struct intel_crtc_state *pipe_config)
6197 {
6198 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6199 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6200 	enum port port;
6201 	u32 tmp;
6202 
6203 	if (transcoder_is_dsi(cpu_transcoder)) {
6204 		port = (cpu_transcoder == TRANSCODER_DSI_A) ?
6205 						PORT_A : PORT_B;
6206 	} else {
6207 		tmp = intel_de_read(dev_priv,
6208 				    TRANS_DDI_FUNC_CTL(cpu_transcoder));
6209 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
6210 			return;
6211 		if (DISPLAY_VER(dev_priv) >= 12)
6212 			port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
6213 		else
6214 			port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
6215 	}
6216 
6217 	/*
6218 	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
6219 	 * DDI E. So just check whether this pipe is wired to DDI E and whether
6220 	 * the PCH transcoder is on.
6221 	 */
6222 	if (DISPLAY_VER(dev_priv) < 9 &&
6223 	    (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
6224 		pipe_config->has_pch_encoder = true;
6225 
6226 		tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
6227 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6228 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
6229 
6230 		ilk_get_fdi_m_n_config(crtc, pipe_config);
6231 	}
6232 }
6233 
6234 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
6235 				struct intel_crtc_state *pipe_config)
6236 {
6237 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6238 	struct intel_display_power_domain_set power_domain_set = { };
6239 	bool active;
6240 	u32 tmp;
6241 
6242 	if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
6243 						       POWER_DOMAIN_PIPE(crtc->pipe)))
6244 		return false;
6245 
6246 	pipe_config->shared_dpll = NULL;
6247 
6248 	active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
6249 
6250 	if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
6251 	    bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
6252 		drm_WARN_ON(&dev_priv->drm, active);
6253 		active = true;
6254 	}
6255 
6256 	intel_dsc_get_config(pipe_config);
6257 	if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
6258 		intel_uncompressed_joiner_get_config(pipe_config);
6259 
6260 	if (!active) {
6261 		/* bigjoiner slave doesn't enable transcoder */
6262 		if (!pipe_config->bigjoiner_slave)
6263 			goto out;
6264 
6265 		active = true;
6266 		pipe_config->pixel_multiplier = 1;
6267 
6268 		/* we cannot read out most state, so don't bother.. */
6269 		pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
6270 	} else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
6271 	    DISPLAY_VER(dev_priv) >= 11) {
6272 		hsw_get_ddi_port_state(crtc, pipe_config);
6273 		intel_get_transcoder_timings(crtc, pipe_config);
6274 	}
6275 
6276 	if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
6277 		intel_vrr_get_config(crtc, pipe_config);
6278 
6279 	intel_get_pipe_src_size(crtc, pipe_config);
6280 
6281 	if (IS_HASWELL(dev_priv)) {
6282 		u32 tmp = intel_de_read(dev_priv,
6283 					PIPECONF(pipe_config->cpu_transcoder));
6284 
6285 		if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
6286 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
6287 		else
6288 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
6289 	} else {
6290 		pipe_config->output_format =
6291 			bdw_get_pipemisc_output_format(crtc);
6292 	}
6293 
6294 	pipe_config->gamma_mode = intel_de_read(dev_priv,
6295 						GAMMA_MODE(crtc->pipe));
6296 
6297 	pipe_config->csc_mode = intel_de_read(dev_priv,
6298 					      PIPE_CSC_MODE(crtc->pipe));
6299 
6300 	if (DISPLAY_VER(dev_priv) >= 9) {
6301 		tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
6302 
6303 		if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
6304 			pipe_config->gamma_enable = true;
6305 
6306 		if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
6307 			pipe_config->csc_enable = true;
6308 	} else {
6309 		i9xx_get_pipe_color_config(pipe_config);
6310 	}
6311 
6312 	intel_color_get_config(pipe_config);
6313 
6314 	tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
6315 	pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
6316 	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6317 		pipe_config->ips_linetime =
6318 			REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
6319 
6320 	if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
6321 						      POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
6322 		if (DISPLAY_VER(dev_priv) >= 9)
6323 			skl_get_pfit_config(pipe_config);
6324 		else
6325 			ilk_get_pfit_config(pipe_config);
6326 	}
6327 
6328 	if (hsw_crtc_supports_ips(crtc)) {
6329 		if (IS_HASWELL(dev_priv))
6330 			pipe_config->ips_enabled = intel_de_read(dev_priv,
6331 								 IPS_CTL) & IPS_ENABLE;
6332 		else {
6333 			/*
6334 			 * We cannot readout IPS state on broadwell, set to
6335 			 * true so we can set it to a defined state on first
6336 			 * commit.
6337 			 */
6338 			pipe_config->ips_enabled = true;
6339 		}
6340 	}
6341 
6342 	if (pipe_config->bigjoiner_slave) {
6343 		/* Cannot be read out as a slave, set to 0. */
6344 		pipe_config->pixel_multiplier = 0;
6345 	} else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
6346 	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
6347 		pipe_config->pixel_multiplier =
6348 			intel_de_read(dev_priv,
6349 				      PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
6350 	} else {
6351 		pipe_config->pixel_multiplier = 1;
6352 	}
6353 
6354 out:
6355 	intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
6356 
6357 	return active;
6358 }
6359 
6360 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
6361 {
6362 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6363 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6364 
6365 	if (!i915->display.get_pipe_config(crtc, crtc_state))
6366 		return false;
6367 
6368 	crtc_state->hw.active = true;
6369 
6370 	intel_crtc_readout_derived_state(crtc_state);
6371 
6372 	return true;
6373 }
6374 
6375 /* VESA 640x480x72Hz mode to set on the pipe */
6376 static const struct drm_display_mode load_detect_mode = {
6377 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6378 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6379 };
6380 
6381 struct drm_framebuffer *
6382 intel_framebuffer_create(struct drm_i915_gem_object *obj,
6383 			 struct drm_mode_fb_cmd2 *mode_cmd)
6384 {
6385 	struct intel_framebuffer *intel_fb;
6386 	int ret;
6387 
6388 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6389 	if (!intel_fb)
6390 		return ERR_PTR(-ENOMEM);
6391 
6392 	ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
6393 	if (ret)
6394 		goto err;
6395 
6396 	return &intel_fb->base;
6397 
6398 err:
6399 	kfree(intel_fb);
6400 	return ERR_PTR(ret);
6401 }
6402 
6403 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
6404 					struct drm_crtc *crtc)
6405 {
6406 	struct drm_plane *plane;
6407 	struct drm_plane_state *plane_state;
6408 	int ret, i;
6409 
6410 	ret = drm_atomic_add_affected_planes(state, crtc);
6411 	if (ret)
6412 		return ret;
6413 
6414 	for_each_new_plane_in_state(state, plane, plane_state, i) {
6415 		if (plane_state->crtc != crtc)
6416 			continue;
6417 
6418 		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
6419 		if (ret)
6420 			return ret;
6421 
6422 		drm_atomic_set_fb_for_plane(plane_state, NULL);
6423 	}
6424 
6425 	return 0;
6426 }
6427 
6428 int intel_get_load_detect_pipe(struct drm_connector *connector,
6429 			       struct intel_load_detect_pipe *old,
6430 			       struct drm_modeset_acquire_ctx *ctx)
6431 {
6432 	struct intel_crtc *intel_crtc;
6433 	struct intel_encoder *intel_encoder =
6434 		intel_attached_encoder(to_intel_connector(connector));
6435 	struct drm_crtc *possible_crtc;
6436 	struct drm_encoder *encoder = &intel_encoder->base;
6437 	struct drm_crtc *crtc = NULL;
6438 	struct drm_device *dev = encoder->dev;
6439 	struct drm_i915_private *dev_priv = to_i915(dev);
6440 	struct drm_mode_config *config = &dev->mode_config;
6441 	struct drm_atomic_state *state = NULL, *restore_state = NULL;
6442 	struct drm_connector_state *connector_state;
6443 	struct intel_crtc_state *crtc_state;
6444 	int ret, i = -1;
6445 
6446 	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6447 		    connector->base.id, connector->name,
6448 		    encoder->base.id, encoder->name);
6449 
6450 	old->restore_state = NULL;
6451 
6452 	drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
6453 
6454 	/*
6455 	 * Algorithm gets a little messy:
6456 	 *
6457 	 *   - if the connector already has an assigned crtc, use it (but make
6458 	 *     sure it's on first)
6459 	 *
6460 	 *   - try to find the first unused crtc that can drive this connector,
6461 	 *     and use that if we find one
6462 	 */
6463 
6464 	/* See if we already have a CRTC for this connector */
6465 	if (connector->state->crtc) {
6466 		crtc = connector->state->crtc;
6467 
6468 		ret = drm_modeset_lock(&crtc->mutex, ctx);
6469 		if (ret)
6470 			goto fail;
6471 
6472 		/* Make sure the crtc and connector are running */
6473 		goto found;
6474 	}
6475 
6476 	/* Find an unused one (if possible) */
6477 	for_each_crtc(dev, possible_crtc) {
6478 		i++;
6479 		if (!(encoder->possible_crtcs & (1 << i)))
6480 			continue;
6481 
6482 		ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
6483 		if (ret)
6484 			goto fail;
6485 
6486 		if (possible_crtc->state->enable) {
6487 			drm_modeset_unlock(&possible_crtc->mutex);
6488 			continue;
6489 		}
6490 
6491 		crtc = possible_crtc;
6492 		break;
6493 	}
6494 
6495 	/*
6496 	 * If we didn't find an unused CRTC, don't use any.
6497 	 */
6498 	if (!crtc) {
6499 		drm_dbg_kms(&dev_priv->drm,
6500 			    "no pipe available for load-detect\n");
6501 		ret = -ENODEV;
6502 		goto fail;
6503 	}
6504 
6505 found:
6506 	intel_crtc = to_intel_crtc(crtc);
6507 
6508 	state = drm_atomic_state_alloc(dev);
6509 	restore_state = drm_atomic_state_alloc(dev);
6510 	if (!state || !restore_state) {
6511 		ret = -ENOMEM;
6512 		goto fail;
6513 	}
6514 
6515 	state->acquire_ctx = ctx;
6516 	restore_state->acquire_ctx = ctx;
6517 
6518 	connector_state = drm_atomic_get_connector_state(state, connector);
6519 	if (IS_ERR(connector_state)) {
6520 		ret = PTR_ERR(connector_state);
6521 		goto fail;
6522 	}
6523 
6524 	ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
6525 	if (ret)
6526 		goto fail;
6527 
6528 	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6529 	if (IS_ERR(crtc_state)) {
6530 		ret = PTR_ERR(crtc_state);
6531 		goto fail;
6532 	}
6533 
6534 	crtc_state->uapi.active = true;
6535 
6536 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
6537 					   &load_detect_mode);
6538 	if (ret)
6539 		goto fail;
6540 
6541 	ret = intel_modeset_disable_planes(state, crtc);
6542 	if (ret)
6543 		goto fail;
6544 
6545 	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
6546 	if (!ret)
6547 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
6548 	if (!ret)
6549 		ret = drm_atomic_add_affected_planes(restore_state, crtc);
6550 	if (ret) {
6551 		drm_dbg_kms(&dev_priv->drm,
6552 			    "Failed to create a copy of old state to restore: %i\n",
6553 			    ret);
6554 		goto fail;
6555 	}
6556 
6557 	ret = drm_atomic_commit(state);
6558 	if (ret) {
6559 		drm_dbg_kms(&dev_priv->drm,
6560 			    "failed to set mode on load-detect pipe\n");
6561 		goto fail;
6562 	}
6563 
6564 	old->restore_state = restore_state;
6565 	drm_atomic_state_put(state);
6566 
6567 	/* let the connector get through one full cycle before testing */
6568 	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
6569 	return true;
6570 
6571 fail:
6572 	if (state) {
6573 		drm_atomic_state_put(state);
6574 		state = NULL;
6575 	}
6576 	if (restore_state) {
6577 		drm_atomic_state_put(restore_state);
6578 		restore_state = NULL;
6579 	}
6580 
6581 	if (ret == -EDEADLK)
6582 		return ret;
6583 
6584 	return false;
6585 }
6586 
6587 void intel_release_load_detect_pipe(struct drm_connector *connector,
6588 				    struct intel_load_detect_pipe *old,
6589 				    struct drm_modeset_acquire_ctx *ctx)
6590 {
6591 	struct intel_encoder *intel_encoder =
6592 		intel_attached_encoder(to_intel_connector(connector));
6593 	struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
6594 	struct drm_encoder *encoder = &intel_encoder->base;
6595 	struct drm_atomic_state *state = old->restore_state;
6596 	int ret;
6597 
6598 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6599 		    connector->base.id, connector->name,
6600 		    encoder->base.id, encoder->name);
6601 
6602 	if (!state)
6603 		return;
6604 
6605 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
6606 	if (ret)
6607 		drm_dbg_kms(&i915->drm,
6608 			    "Couldn't release load detect pipe: %i\n", ret);
6609 	drm_atomic_state_put(state);
6610 }
6611 
6612 static int i9xx_pll_refclk(struct drm_device *dev,
6613 			   const struct intel_crtc_state *pipe_config)
6614 {
6615 	struct drm_i915_private *dev_priv = to_i915(dev);
6616 	u32 dpll = pipe_config->dpll_hw_state.dpll;
6617 
6618 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
6619 		return dev_priv->vbt.lvds_ssc_freq;
6620 	else if (HAS_PCH_SPLIT(dev_priv))
6621 		return 120000;
6622 	else if (DISPLAY_VER(dev_priv) != 2)
6623 		return 96000;
6624 	else
6625 		return 48000;
6626 }
6627 
6628 /* Returns the clock of the currently programmed mode of the given pipe. */
6629 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
6630 				struct intel_crtc_state *pipe_config)
6631 {
6632 	struct drm_device *dev = crtc->base.dev;
6633 	struct drm_i915_private *dev_priv = to_i915(dev);
6634 	enum pipe pipe = crtc->pipe;
6635 	u32 dpll = pipe_config->dpll_hw_state.dpll;
6636 	u32 fp;
6637 	struct dpll clock;
6638 	int port_clock;
6639 	int refclk = i9xx_pll_refclk(dev, pipe_config);
6640 
6641 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6642 		fp = pipe_config->dpll_hw_state.fp0;
6643 	else
6644 		fp = pipe_config->dpll_hw_state.fp1;
6645 
6646 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6647 	if (IS_PINEVIEW(dev_priv)) {
6648 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6649 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6650 	} else {
6651 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6652 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6653 	}
6654 
6655 	if (DISPLAY_VER(dev_priv) != 2) {
6656 		if (IS_PINEVIEW(dev_priv))
6657 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6658 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6659 		else
6660 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6661 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
6662 
6663 		switch (dpll & DPLL_MODE_MASK) {
6664 		case DPLLB_MODE_DAC_SERIAL:
6665 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6666 				5 : 10;
6667 			break;
6668 		case DPLLB_MODE_LVDS:
6669 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6670 				7 : 14;
6671 			break;
6672 		default:
6673 			drm_dbg_kms(&dev_priv->drm,
6674 				    "Unknown DPLL mode %08x in programmed "
6675 				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
6676 			return;
6677 		}
6678 
6679 		if (IS_PINEVIEW(dev_priv))
6680 			port_clock = pnv_calc_dpll_params(refclk, &clock);
6681 		else
6682 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
6683 	} else {
6684 		u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
6685 								 LVDS);
6686 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
6687 
6688 		if (is_lvds) {
6689 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6690 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
6691 
6692 			if (lvds & LVDS_CLKB_POWER_UP)
6693 				clock.p2 = 7;
6694 			else
6695 				clock.p2 = 14;
6696 		} else {
6697 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
6698 				clock.p1 = 2;
6699 			else {
6700 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6701 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6702 			}
6703 			if (dpll & PLL_P2_DIVIDE_BY_4)
6704 				clock.p2 = 4;
6705 			else
6706 				clock.p2 = 2;
6707 		}
6708 
6709 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
6710 	}
6711 
6712 	/*
6713 	 * This value includes pixel_multiplier. We will use
6714 	 * port_clock to compute adjusted_mode.crtc_clock in the
6715 	 * encoder's get_config() function.
6716 	 */
6717 	pipe_config->port_clock = port_clock;
6718 }
6719 
6720 int intel_dotclock_calculate(int link_freq,
6721 			     const struct intel_link_m_n *m_n)
6722 {
6723 	/*
6724 	 * The calculation for the data clock is:
6725 	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
6726 	 * But we want to avoid losing precison if possible, so:
6727 	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
6728 	 *
6729 	 * and the link clock is simpler:
6730 	 * link_clock = (m * link_clock) / n
6731 	 */
6732 
6733 	if (!m_n->link_n)
6734 		return 0;
6735 
6736 	return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
6737 }
6738 
6739 static void ilk_pch_clock_get(struct intel_crtc *crtc,
6740 			      struct intel_crtc_state *pipe_config)
6741 {
6742 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6743 
6744 	/* read out port_clock from the DPLL */
6745 	i9xx_crtc_clock_get(crtc, pipe_config);
6746 
6747 	/*
6748 	 * In case there is an active pipe without active ports,
6749 	 * we may need some idea for the dotclock anyway.
6750 	 * Calculate one based on the FDI configuration.
6751 	 */
6752 	pipe_config->hw.adjusted_mode.crtc_clock =
6753 		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
6754 					 &pipe_config->fdi_m_n);
6755 }
6756 
6757 /* Returns the currently programmed mode of the given encoder. */
6758 struct drm_display_mode *
6759 intel_encoder_current_mode(struct intel_encoder *encoder)
6760 {
6761 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6762 	struct intel_crtc_state *crtc_state;
6763 	struct drm_display_mode *mode;
6764 	struct intel_crtc *crtc;
6765 	enum pipe pipe;
6766 
6767 	if (!encoder->get_hw_state(encoder, &pipe))
6768 		return NULL;
6769 
6770 	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
6771 
6772 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6773 	if (!mode)
6774 		return NULL;
6775 
6776 	crtc_state = intel_crtc_state_alloc(crtc);
6777 	if (!crtc_state) {
6778 		kfree(mode);
6779 		return NULL;
6780 	}
6781 
6782 	if (!intel_crtc_get_pipe_config(crtc_state)) {
6783 		kfree(crtc_state);
6784 		kfree(mode);
6785 		return NULL;
6786 	}
6787 
6788 	intel_encoder_get_config(encoder, crtc_state);
6789 
6790 	intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
6791 
6792 	kfree(crtc_state);
6793 
6794 	return mode;
6795 }
6796 
6797 /**
6798  * intel_wm_need_update - Check whether watermarks need updating
6799  * @cur: current plane state
6800  * @new: new plane state
6801  *
6802  * Check current plane state versus the new one to determine whether
6803  * watermarks need to be recalculated.
6804  *
6805  * Returns true or false.
6806  */
6807 static bool intel_wm_need_update(const struct intel_plane_state *cur,
6808 				 struct intel_plane_state *new)
6809 {
6810 	/* Update watermarks on tiling or size changes. */
6811 	if (new->uapi.visible != cur->uapi.visible)
6812 		return true;
6813 
6814 	if (!cur->hw.fb || !new->hw.fb)
6815 		return false;
6816 
6817 	if (cur->hw.fb->modifier != new->hw.fb->modifier ||
6818 	    cur->hw.rotation != new->hw.rotation ||
6819 	    drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
6820 	    drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
6821 	    drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
6822 	    drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
6823 		return true;
6824 
6825 	return false;
6826 }
6827 
6828 static bool needs_scaling(const struct intel_plane_state *state)
6829 {
6830 	int src_w = drm_rect_width(&state->uapi.src) >> 16;
6831 	int src_h = drm_rect_height(&state->uapi.src) >> 16;
6832 	int dst_w = drm_rect_width(&state->uapi.dst);
6833 	int dst_h = drm_rect_height(&state->uapi.dst);
6834 
6835 	return (src_w != dst_w || src_h != dst_h);
6836 }
6837 
6838 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
6839 				    struct intel_crtc_state *crtc_state,
6840 				    const struct intel_plane_state *old_plane_state,
6841 				    struct intel_plane_state *plane_state)
6842 {
6843 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6844 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
6845 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6846 	bool mode_changed = intel_crtc_needs_modeset(crtc_state);
6847 	bool was_crtc_enabled = old_crtc_state->hw.active;
6848 	bool is_crtc_enabled = crtc_state->hw.active;
6849 	bool turn_off, turn_on, visible, was_visible;
6850 	int ret;
6851 
6852 	if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
6853 		ret = skl_update_scaler_plane(crtc_state, plane_state);
6854 		if (ret)
6855 			return ret;
6856 	}
6857 
6858 	was_visible = old_plane_state->uapi.visible;
6859 	visible = plane_state->uapi.visible;
6860 
6861 	if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
6862 		was_visible = false;
6863 
6864 	/*
6865 	 * Visibility is calculated as if the crtc was on, but
6866 	 * after scaler setup everything depends on it being off
6867 	 * when the crtc isn't active.
6868 	 *
6869 	 * FIXME this is wrong for watermarks. Watermarks should also
6870 	 * be computed as if the pipe would be active. Perhaps move
6871 	 * per-plane wm computation to the .check_plane() hook, and
6872 	 * only combine the results from all planes in the current place?
6873 	 */
6874 	if (!is_crtc_enabled) {
6875 		intel_plane_set_invisible(crtc_state, plane_state);
6876 		visible = false;
6877 	}
6878 
6879 	if (!was_visible && !visible)
6880 		return 0;
6881 
6882 	turn_off = was_visible && (!visible || mode_changed);
6883 	turn_on = visible && (!was_visible || mode_changed);
6884 
6885 	drm_dbg_atomic(&dev_priv->drm,
6886 		       "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
6887 		       crtc->base.base.id, crtc->base.name,
6888 		       plane->base.base.id, plane->base.name,
6889 		       was_visible, visible,
6890 		       turn_off, turn_on, mode_changed);
6891 
6892 	if (turn_on) {
6893 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
6894 			crtc_state->update_wm_pre = true;
6895 
6896 		/* must disable cxsr around plane enable/disable */
6897 		if (plane->id != PLANE_CURSOR)
6898 			crtc_state->disable_cxsr = true;
6899 	} else if (turn_off) {
6900 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
6901 			crtc_state->update_wm_post = true;
6902 
6903 		/* must disable cxsr around plane enable/disable */
6904 		if (plane->id != PLANE_CURSOR)
6905 			crtc_state->disable_cxsr = true;
6906 	} else if (intel_wm_need_update(old_plane_state, plane_state)) {
6907 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
6908 			/* FIXME bollocks */
6909 			crtc_state->update_wm_pre = true;
6910 			crtc_state->update_wm_post = true;
6911 		}
6912 	}
6913 
6914 	if (visible || was_visible)
6915 		crtc_state->fb_bits |= plane->frontbuffer_bit;
6916 
6917 	/*
6918 	 * ILK/SNB DVSACNTR/Sprite Enable
6919 	 * IVB SPR_CTL/Sprite Enable
6920 	 * "When in Self Refresh Big FIFO mode, a write to enable the
6921 	 *  plane will be internally buffered and delayed while Big FIFO
6922 	 *  mode is exiting."
6923 	 *
6924 	 * Which means that enabling the sprite can take an extra frame
6925 	 * when we start in big FIFO mode (LP1+). Thus we need to drop
6926 	 * down to LP0 and wait for vblank in order to make sure the
6927 	 * sprite gets enabled on the next vblank after the register write.
6928 	 * Doing otherwise would risk enabling the sprite one frame after
6929 	 * we've already signalled flip completion. We can resume LP1+
6930 	 * once the sprite has been enabled.
6931 	 *
6932 	 *
6933 	 * WaCxSRDisabledForSpriteScaling:ivb
6934 	 * IVB SPR_SCALE/Scaling Enable
6935 	 * "Low Power watermarks must be disabled for at least one
6936 	 *  frame before enabling sprite scaling, and kept disabled
6937 	 *  until sprite scaling is disabled."
6938 	 *
6939 	 * ILK/SNB DVSASCALE/Scaling Enable
6940 	 * "When in Self Refresh Big FIFO mode, scaling enable will be
6941 	 *  masked off while Big FIFO mode is exiting."
6942 	 *
6943 	 * Despite the w/a only being listed for IVB we assume that
6944 	 * the ILK/SNB note has similar ramifications, hence we apply
6945 	 * the w/a on all three platforms.
6946 	 *
6947 	 * With experimental results seems this is needed also for primary
6948 	 * plane, not only sprite plane.
6949 	 */
6950 	if (plane->id != PLANE_CURSOR &&
6951 	    (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
6952 	     IS_IVYBRIDGE(dev_priv)) &&
6953 	    (turn_on || (!needs_scaling(old_plane_state) &&
6954 			 needs_scaling(plane_state))))
6955 		crtc_state->disable_lp_wm = true;
6956 
6957 	return 0;
6958 }
6959 
6960 static bool encoders_cloneable(const struct intel_encoder *a,
6961 			       const struct intel_encoder *b)
6962 {
6963 	/* masks could be asymmetric, so check both ways */
6964 	return a == b || (a->cloneable & (1 << b->type) &&
6965 			  b->cloneable & (1 << a->type));
6966 }
6967 
6968 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
6969 					 struct intel_crtc *crtc,
6970 					 struct intel_encoder *encoder)
6971 {
6972 	struct intel_encoder *source_encoder;
6973 	struct drm_connector *connector;
6974 	struct drm_connector_state *connector_state;
6975 	int i;
6976 
6977 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6978 		if (connector_state->crtc != &crtc->base)
6979 			continue;
6980 
6981 		source_encoder =
6982 			to_intel_encoder(connector_state->best_encoder);
6983 		if (!encoders_cloneable(encoder, source_encoder))
6984 			return false;
6985 	}
6986 
6987 	return true;
6988 }
6989 
6990 static int icl_add_linked_planes(struct intel_atomic_state *state)
6991 {
6992 	struct intel_plane *plane, *linked;
6993 	struct intel_plane_state *plane_state, *linked_plane_state;
6994 	int i;
6995 
6996 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6997 		linked = plane_state->planar_linked_plane;
6998 
6999 		if (!linked)
7000 			continue;
7001 
7002 		linked_plane_state = intel_atomic_get_plane_state(state, linked);
7003 		if (IS_ERR(linked_plane_state))
7004 			return PTR_ERR(linked_plane_state);
7005 
7006 		drm_WARN_ON(state->base.dev,
7007 			    linked_plane_state->planar_linked_plane != plane);
7008 		drm_WARN_ON(state->base.dev,
7009 			    linked_plane_state->planar_slave == plane_state->planar_slave);
7010 	}
7011 
7012 	return 0;
7013 }
7014 
7015 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
7016 {
7017 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7018 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7019 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
7020 	struct intel_plane *plane, *linked;
7021 	struct intel_plane_state *plane_state;
7022 	int i;
7023 
7024 	if (DISPLAY_VER(dev_priv) < 11)
7025 		return 0;
7026 
7027 	/*
7028 	 * Destroy all old plane links and make the slave plane invisible
7029 	 * in the crtc_state->active_planes mask.
7030 	 */
7031 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7032 		if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
7033 			continue;
7034 
7035 		plane_state->planar_linked_plane = NULL;
7036 		if (plane_state->planar_slave && !plane_state->uapi.visible) {
7037 			crtc_state->enabled_planes &= ~BIT(plane->id);
7038 			crtc_state->active_planes &= ~BIT(plane->id);
7039 			crtc_state->update_planes |= BIT(plane->id);
7040 		}
7041 
7042 		plane_state->planar_slave = false;
7043 	}
7044 
7045 	if (!crtc_state->nv12_planes)
7046 		return 0;
7047 
7048 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7049 		struct intel_plane_state *linked_state = NULL;
7050 
7051 		if (plane->pipe != crtc->pipe ||
7052 		    !(crtc_state->nv12_planes & BIT(plane->id)))
7053 			continue;
7054 
7055 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
7056 			if (!icl_is_nv12_y_plane(dev_priv, linked->id))
7057 				continue;
7058 
7059 			if (crtc_state->active_planes & BIT(linked->id))
7060 				continue;
7061 
7062 			linked_state = intel_atomic_get_plane_state(state, linked);
7063 			if (IS_ERR(linked_state))
7064 				return PTR_ERR(linked_state);
7065 
7066 			break;
7067 		}
7068 
7069 		if (!linked_state) {
7070 			drm_dbg_kms(&dev_priv->drm,
7071 				    "Need %d free Y planes for planar YUV\n",
7072 				    hweight8(crtc_state->nv12_planes));
7073 
7074 			return -EINVAL;
7075 		}
7076 
7077 		plane_state->planar_linked_plane = linked;
7078 
7079 		linked_state->planar_slave = true;
7080 		linked_state->planar_linked_plane = plane;
7081 		crtc_state->enabled_planes |= BIT(linked->id);
7082 		crtc_state->active_planes |= BIT(linked->id);
7083 		crtc_state->update_planes |= BIT(linked->id);
7084 		drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
7085 			    linked->base.name, plane->base.name);
7086 
7087 		/* Copy parameters to slave plane */
7088 		linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
7089 		linked_state->color_ctl = plane_state->color_ctl;
7090 		linked_state->view = plane_state->view;
7091 
7092 		intel_plane_copy_hw_state(linked_state, plane_state);
7093 		linked_state->uapi.src = plane_state->uapi.src;
7094 		linked_state->uapi.dst = plane_state->uapi.dst;
7095 
7096 		if (icl_is_hdr_plane(dev_priv, plane->id)) {
7097 			if (linked->id == PLANE_SPRITE5)
7098 				plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
7099 			else if (linked->id == PLANE_SPRITE4)
7100 				plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
7101 			else if (linked->id == PLANE_SPRITE3)
7102 				plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
7103 			else if (linked->id == PLANE_SPRITE2)
7104 				plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
7105 			else
7106 				MISSING_CASE(linked->id);
7107 		}
7108 	}
7109 
7110 	return 0;
7111 }
7112 
7113 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
7114 {
7115 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
7116 	struct intel_atomic_state *state =
7117 		to_intel_atomic_state(new_crtc_state->uapi.state);
7118 	const struct intel_crtc_state *old_crtc_state =
7119 		intel_atomic_get_old_crtc_state(state, crtc);
7120 
7121 	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
7122 }
7123 
7124 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
7125 {
7126 	const struct drm_display_mode *pipe_mode =
7127 		&crtc_state->hw.pipe_mode;
7128 	int linetime_wm;
7129 
7130 	if (!crtc_state->hw.enable)
7131 		return 0;
7132 
7133 	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
7134 					pipe_mode->crtc_clock);
7135 
7136 	return min(linetime_wm, 0x1ff);
7137 }
7138 
7139 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
7140 			       const struct intel_cdclk_state *cdclk_state)
7141 {
7142 	const struct drm_display_mode *pipe_mode =
7143 		&crtc_state->hw.pipe_mode;
7144 	int linetime_wm;
7145 
7146 	if (!crtc_state->hw.enable)
7147 		return 0;
7148 
7149 	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
7150 					cdclk_state->logical.cdclk);
7151 
7152 	return min(linetime_wm, 0x1ff);
7153 }
7154 
7155 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
7156 {
7157 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7158 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7159 	const struct drm_display_mode *pipe_mode =
7160 		&crtc_state->hw.pipe_mode;
7161 	int linetime_wm;
7162 
7163 	if (!crtc_state->hw.enable)
7164 		return 0;
7165 
7166 	linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
7167 				   crtc_state->pixel_rate);
7168 
7169 	/* Display WA #1135: BXT:ALL GLK:ALL */
7170 	if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
7171 	    dev_priv->ipc_enabled)
7172 		linetime_wm /= 2;
7173 
7174 	return min(linetime_wm, 0x1ff);
7175 }
7176 
7177 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
7178 				   struct intel_crtc *crtc)
7179 {
7180 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7181 	struct intel_crtc_state *crtc_state =
7182 		intel_atomic_get_new_crtc_state(state, crtc);
7183 	const struct intel_cdclk_state *cdclk_state;
7184 
7185 	if (DISPLAY_VER(dev_priv) >= 9)
7186 		crtc_state->linetime = skl_linetime_wm(crtc_state);
7187 	else
7188 		crtc_state->linetime = hsw_linetime_wm(crtc_state);
7189 
7190 	if (!hsw_crtc_supports_ips(crtc))
7191 		return 0;
7192 
7193 	cdclk_state = intel_atomic_get_cdclk_state(state);
7194 	if (IS_ERR(cdclk_state))
7195 		return PTR_ERR(cdclk_state);
7196 
7197 	crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
7198 						       cdclk_state);
7199 
7200 	return 0;
7201 }
7202 
7203 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
7204 				   struct intel_crtc *crtc)
7205 {
7206 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7207 	struct intel_crtc_state *crtc_state =
7208 		intel_atomic_get_new_crtc_state(state, crtc);
7209 	bool mode_changed = intel_crtc_needs_modeset(crtc_state);
7210 	int ret;
7211 
7212 	if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
7213 	    mode_changed && !crtc_state->hw.active)
7214 		crtc_state->update_wm_post = true;
7215 
7216 	if (mode_changed && crtc_state->hw.enable &&
7217 	    dev_priv->display.crtc_compute_clock &&
7218 	    !crtc_state->bigjoiner_slave &&
7219 	    !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
7220 		ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
7221 		if (ret)
7222 			return ret;
7223 	}
7224 
7225 	/*
7226 	 * May need to update pipe gamma enable bits
7227 	 * when C8 planes are getting enabled/disabled.
7228 	 */
7229 	if (c8_planes_changed(crtc_state))
7230 		crtc_state->uapi.color_mgmt_changed = true;
7231 
7232 	if (mode_changed || crtc_state->update_pipe ||
7233 	    crtc_state->uapi.color_mgmt_changed) {
7234 		ret = intel_color_check(crtc_state);
7235 		if (ret)
7236 			return ret;
7237 	}
7238 
7239 	if (dev_priv->display.compute_pipe_wm) {
7240 		ret = dev_priv->display.compute_pipe_wm(crtc_state);
7241 		if (ret) {
7242 			drm_dbg_kms(&dev_priv->drm,
7243 				    "Target pipe watermarks are invalid\n");
7244 			return ret;
7245 		}
7246 	}
7247 
7248 	if (dev_priv->display.compute_intermediate_wm) {
7249 		if (drm_WARN_ON(&dev_priv->drm,
7250 				!dev_priv->display.compute_pipe_wm))
7251 			return 0;
7252 
7253 		/*
7254 		 * Calculate 'intermediate' watermarks that satisfy both the
7255 		 * old state and the new state.  We can program these
7256 		 * immediately.
7257 		 */
7258 		ret = dev_priv->display.compute_intermediate_wm(crtc_state);
7259 		if (ret) {
7260 			drm_dbg_kms(&dev_priv->drm,
7261 				    "No valid intermediate pipe watermarks are possible\n");
7262 			return ret;
7263 		}
7264 	}
7265 
7266 	if (DISPLAY_VER(dev_priv) >= 9) {
7267 		if (mode_changed || crtc_state->update_pipe) {
7268 			ret = skl_update_scaler_crtc(crtc_state);
7269 			if (ret)
7270 				return ret;
7271 		}
7272 
7273 		ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
7274 		if (ret)
7275 			return ret;
7276 	}
7277 
7278 	if (HAS_IPS(dev_priv)) {
7279 		ret = hsw_compute_ips_config(crtc_state);
7280 		if (ret)
7281 			return ret;
7282 	}
7283 
7284 	if (DISPLAY_VER(dev_priv) >= 9 ||
7285 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
7286 		ret = hsw_compute_linetime_wm(state, crtc);
7287 		if (ret)
7288 			return ret;
7289 
7290 	}
7291 
7292 	if (!mode_changed) {
7293 		ret = intel_psr2_sel_fetch_update(state, crtc);
7294 		if (ret)
7295 			return ret;
7296 	}
7297 
7298 	return 0;
7299 }
7300 
7301 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
7302 {
7303 	struct intel_connector *connector;
7304 	struct drm_connector_list_iter conn_iter;
7305 
7306 	drm_connector_list_iter_begin(dev, &conn_iter);
7307 	for_each_intel_connector_iter(connector, &conn_iter) {
7308 		struct drm_connector_state *conn_state = connector->base.state;
7309 		struct intel_encoder *encoder =
7310 			to_intel_encoder(connector->base.encoder);
7311 
7312 		if (conn_state->crtc)
7313 			drm_connector_put(&connector->base);
7314 
7315 		if (encoder) {
7316 			struct intel_crtc *crtc =
7317 				to_intel_crtc(encoder->base.crtc);
7318 			const struct intel_crtc_state *crtc_state =
7319 				to_intel_crtc_state(crtc->base.state);
7320 
7321 			conn_state->best_encoder = &encoder->base;
7322 			conn_state->crtc = &crtc->base;
7323 			conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
7324 
7325 			drm_connector_get(&connector->base);
7326 		} else {
7327 			conn_state->best_encoder = NULL;
7328 			conn_state->crtc = NULL;
7329 		}
7330 	}
7331 	drm_connector_list_iter_end(&conn_iter);
7332 }
7333 
7334 static int
7335 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
7336 		      struct intel_crtc_state *pipe_config)
7337 {
7338 	struct drm_connector *connector = conn_state->connector;
7339 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7340 	const struct drm_display_info *info = &connector->display_info;
7341 	int bpp;
7342 
7343 	switch (conn_state->max_bpc) {
7344 	case 6 ... 7:
7345 		bpp = 6 * 3;
7346 		break;
7347 	case 8 ... 9:
7348 		bpp = 8 * 3;
7349 		break;
7350 	case 10 ... 11:
7351 		bpp = 10 * 3;
7352 		break;
7353 	case 12 ... 16:
7354 		bpp = 12 * 3;
7355 		break;
7356 	default:
7357 		MISSING_CASE(conn_state->max_bpc);
7358 		return -EINVAL;
7359 	}
7360 
7361 	if (bpp < pipe_config->pipe_bpp) {
7362 		drm_dbg_kms(&i915->drm,
7363 			    "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
7364 			    "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
7365 			    connector->base.id, connector->name,
7366 			    bpp, 3 * info->bpc,
7367 			    3 * conn_state->max_requested_bpc,
7368 			    pipe_config->pipe_bpp);
7369 
7370 		pipe_config->pipe_bpp = bpp;
7371 	}
7372 
7373 	return 0;
7374 }
7375 
7376 static int
7377 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
7378 			  struct intel_crtc_state *pipe_config)
7379 {
7380 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7381 	struct drm_atomic_state *state = pipe_config->uapi.state;
7382 	struct drm_connector *connector;
7383 	struct drm_connector_state *connector_state;
7384 	int bpp, i;
7385 
7386 	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7387 	    IS_CHERRYVIEW(dev_priv)))
7388 		bpp = 10*3;
7389 	else if (DISPLAY_VER(dev_priv) >= 5)
7390 		bpp = 12*3;
7391 	else
7392 		bpp = 8*3;
7393 
7394 	pipe_config->pipe_bpp = bpp;
7395 
7396 	/* Clamp display bpp to connector max bpp */
7397 	for_each_new_connector_in_state(state, connector, connector_state, i) {
7398 		int ret;
7399 
7400 		if (connector_state->crtc != &crtc->base)
7401 			continue;
7402 
7403 		ret = compute_sink_pipe_bpp(connector_state, pipe_config);
7404 		if (ret)
7405 			return ret;
7406 	}
7407 
7408 	return 0;
7409 }
7410 
7411 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
7412 				    const struct drm_display_mode *mode)
7413 {
7414 	drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
7415 		    "type: 0x%x flags: 0x%x\n",
7416 		    mode->crtc_clock,
7417 		    mode->crtc_hdisplay, mode->crtc_hsync_start,
7418 		    mode->crtc_hsync_end, mode->crtc_htotal,
7419 		    mode->crtc_vdisplay, mode->crtc_vsync_start,
7420 		    mode->crtc_vsync_end, mode->crtc_vtotal,
7421 		    mode->type, mode->flags);
7422 }
7423 
7424 static void
7425 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
7426 		      const char *id, unsigned int lane_count,
7427 		      const struct intel_link_m_n *m_n)
7428 {
7429 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7430 
7431 	drm_dbg_kms(&i915->drm,
7432 		    "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
7433 		    id, lane_count,
7434 		    m_n->gmch_m, m_n->gmch_n,
7435 		    m_n->link_m, m_n->link_n, m_n->tu);
7436 }
7437 
7438 static void
7439 intel_dump_infoframe(struct drm_i915_private *dev_priv,
7440 		     const union hdmi_infoframe *frame)
7441 {
7442 	if (!drm_debug_enabled(DRM_UT_KMS))
7443 		return;
7444 
7445 	hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
7446 }
7447 
7448 static void
7449 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
7450 		      const struct drm_dp_vsc_sdp *vsc)
7451 {
7452 	if (!drm_debug_enabled(DRM_UT_KMS))
7453 		return;
7454 
7455 	drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
7456 }
7457 
7458 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
7459 
7460 static const char * const output_type_str[] = {
7461 	OUTPUT_TYPE(UNUSED),
7462 	OUTPUT_TYPE(ANALOG),
7463 	OUTPUT_TYPE(DVO),
7464 	OUTPUT_TYPE(SDVO),
7465 	OUTPUT_TYPE(LVDS),
7466 	OUTPUT_TYPE(TVOUT),
7467 	OUTPUT_TYPE(HDMI),
7468 	OUTPUT_TYPE(DP),
7469 	OUTPUT_TYPE(EDP),
7470 	OUTPUT_TYPE(DSI),
7471 	OUTPUT_TYPE(DDI),
7472 	OUTPUT_TYPE(DP_MST),
7473 };
7474 
7475 #undef OUTPUT_TYPE
7476 
7477 static void snprintf_output_types(char *buf, size_t len,
7478 				  unsigned int output_types)
7479 {
7480 	char *str = buf;
7481 	int i;
7482 
7483 	str[0] = '\0';
7484 
7485 	for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
7486 		int r;
7487 
7488 		if ((output_types & BIT(i)) == 0)
7489 			continue;
7490 
7491 		r = snprintf(str, len, "%s%s",
7492 			     str != buf ? "," : "", output_type_str[i]);
7493 		if (r >= len)
7494 			break;
7495 		str += r;
7496 		len -= r;
7497 
7498 		output_types &= ~BIT(i);
7499 	}
7500 
7501 	WARN_ON_ONCE(output_types != 0);
7502 }
7503 
7504 static const char * const output_format_str[] = {
7505 	[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
7506 	[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
7507 	[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
7508 };
7509 
7510 static const char *output_formats(enum intel_output_format format)
7511 {
7512 	if (format >= ARRAY_SIZE(output_format_str))
7513 		return "invalid";
7514 	return output_format_str[format];
7515 }
7516 
7517 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
7518 {
7519 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
7520 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
7521 	const struct drm_framebuffer *fb = plane_state->hw.fb;
7522 
7523 	if (!fb) {
7524 		drm_dbg_kms(&i915->drm,
7525 			    "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
7526 			    plane->base.base.id, plane->base.name,
7527 			    yesno(plane_state->uapi.visible));
7528 		return;
7529 	}
7530 
7531 	drm_dbg_kms(&i915->drm,
7532 		    "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
7533 		    plane->base.base.id, plane->base.name,
7534 		    fb->base.id, fb->width, fb->height, &fb->format->format,
7535 		    fb->modifier, yesno(plane_state->uapi.visible));
7536 	drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
7537 		    plane_state->hw.rotation, plane_state->scaler_id);
7538 	if (plane_state->uapi.visible)
7539 		drm_dbg_kms(&i915->drm,
7540 			    "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
7541 			    DRM_RECT_FP_ARG(&plane_state->uapi.src),
7542 			    DRM_RECT_ARG(&plane_state->uapi.dst));
7543 }
7544 
7545 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
7546 				   struct intel_atomic_state *state,
7547 				   const char *context)
7548 {
7549 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
7550 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7551 	const struct intel_plane_state *plane_state;
7552 	struct intel_plane *plane;
7553 	char buf[64];
7554 	int i;
7555 
7556 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
7557 		    crtc->base.base.id, crtc->base.name,
7558 		    yesno(pipe_config->hw.enable), context);
7559 
7560 	if (!pipe_config->hw.enable)
7561 		goto dump_planes;
7562 
7563 	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
7564 	drm_dbg_kms(&dev_priv->drm,
7565 		    "active: %s, output_types: %s (0x%x), output format: %s\n",
7566 		    yesno(pipe_config->hw.active),
7567 		    buf, pipe_config->output_types,
7568 		    output_formats(pipe_config->output_format));
7569 
7570 	drm_dbg_kms(&dev_priv->drm,
7571 		    "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
7572 		    transcoder_name(pipe_config->cpu_transcoder),
7573 		    pipe_config->pipe_bpp, pipe_config->dither);
7574 
7575 	drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
7576 		    transcoder_name(pipe_config->mst_master_transcoder));
7577 
7578 	drm_dbg_kms(&dev_priv->drm,
7579 		    "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
7580 		    transcoder_name(pipe_config->master_transcoder),
7581 		    pipe_config->sync_mode_slaves_mask);
7582 
7583 	drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
7584 		    pipe_config->bigjoiner_slave ? "slave" :
7585 		    pipe_config->bigjoiner ? "master" : "no");
7586 
7587 	drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
7588 		    enableddisabled(pipe_config->splitter.enable),
7589 		    pipe_config->splitter.link_count,
7590 		    pipe_config->splitter.pixel_overlap);
7591 
7592 	if (pipe_config->has_pch_encoder)
7593 		intel_dump_m_n_config(pipe_config, "fdi",
7594 				      pipe_config->fdi_lanes,
7595 				      &pipe_config->fdi_m_n);
7596 
7597 	if (intel_crtc_has_dp_encoder(pipe_config)) {
7598 		intel_dump_m_n_config(pipe_config, "dp m_n",
7599 				pipe_config->lane_count, &pipe_config->dp_m_n);
7600 		if (pipe_config->has_drrs)
7601 			intel_dump_m_n_config(pipe_config, "dp m2_n2",
7602 					      pipe_config->lane_count,
7603 					      &pipe_config->dp_m2_n2);
7604 	}
7605 
7606 	drm_dbg_kms(&dev_priv->drm,
7607 		    "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
7608 		    pipe_config->has_audio, pipe_config->has_infoframe,
7609 		    pipe_config->infoframes.enable);
7610 
7611 	if (pipe_config->infoframes.enable &
7612 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
7613 		drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
7614 			    pipe_config->infoframes.gcp);
7615 	if (pipe_config->infoframes.enable &
7616 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
7617 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
7618 	if (pipe_config->infoframes.enable &
7619 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
7620 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
7621 	if (pipe_config->infoframes.enable &
7622 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
7623 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
7624 	if (pipe_config->infoframes.enable &
7625 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
7626 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7627 	if (pipe_config->infoframes.enable &
7628 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
7629 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7630 	if (pipe_config->infoframes.enable &
7631 	    intel_hdmi_infoframe_enable(DP_SDP_VSC))
7632 		intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
7633 
7634 	drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
7635 		    yesno(pipe_config->vrr.enable),
7636 		    pipe_config->vrr.vmin, pipe_config->vrr.vmax,
7637 		    pipe_config->vrr.pipeline_full, pipe_config->vrr.flipline,
7638 		    intel_vrr_vmin_vblank_start(pipe_config),
7639 		    intel_vrr_vmax_vblank_start(pipe_config));
7640 
7641 	drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
7642 	drm_mode_debug_printmodeline(&pipe_config->hw.mode);
7643 	drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
7644 	drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
7645 	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
7646 	drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
7647 	drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
7648 	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
7649 	drm_dbg_kms(&dev_priv->drm,
7650 		    "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
7651 		    pipe_config->port_clock,
7652 		    pipe_config->pipe_src_w, pipe_config->pipe_src_h,
7653 		    pipe_config->pixel_rate);
7654 
7655 	drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
7656 		    pipe_config->linetime, pipe_config->ips_linetime);
7657 
7658 	if (DISPLAY_VER(dev_priv) >= 9)
7659 		drm_dbg_kms(&dev_priv->drm,
7660 			    "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
7661 			    crtc->num_scalers,
7662 			    pipe_config->scaler_state.scaler_users,
7663 			    pipe_config->scaler_state.scaler_id);
7664 
7665 	if (HAS_GMCH(dev_priv))
7666 		drm_dbg_kms(&dev_priv->drm,
7667 			    "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
7668 			    pipe_config->gmch_pfit.control,
7669 			    pipe_config->gmch_pfit.pgm_ratios,
7670 			    pipe_config->gmch_pfit.lvds_border_bits);
7671 	else
7672 		drm_dbg_kms(&dev_priv->drm,
7673 			    "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
7674 			    DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
7675 			    enableddisabled(pipe_config->pch_pfit.enabled),
7676 			    yesno(pipe_config->pch_pfit.force_thru));
7677 
7678 	drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
7679 		    pipe_config->ips_enabled, pipe_config->double_wide);
7680 
7681 	intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
7682 
7683 	if (IS_CHERRYVIEW(dev_priv))
7684 		drm_dbg_kms(&dev_priv->drm,
7685 			    "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7686 			    pipe_config->cgm_mode, pipe_config->gamma_mode,
7687 			    pipe_config->gamma_enable, pipe_config->csc_enable);
7688 	else
7689 		drm_dbg_kms(&dev_priv->drm,
7690 			    "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7691 			    pipe_config->csc_mode, pipe_config->gamma_mode,
7692 			    pipe_config->gamma_enable, pipe_config->csc_enable);
7693 
7694 	drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
7695 		    pipe_config->hw.degamma_lut ?
7696 		    drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
7697 		    pipe_config->hw.gamma_lut ?
7698 		    drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
7699 
7700 dump_planes:
7701 	if (!state)
7702 		return;
7703 
7704 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7705 		if (plane->pipe == crtc->pipe)
7706 			intel_dump_plane_state(plane_state);
7707 	}
7708 }
7709 
7710 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
7711 {
7712 	struct drm_device *dev = state->base.dev;
7713 	struct drm_connector *connector;
7714 	struct drm_connector_list_iter conn_iter;
7715 	unsigned int used_ports = 0;
7716 	unsigned int used_mst_ports = 0;
7717 	bool ret = true;
7718 
7719 	/*
7720 	 * We're going to peek into connector->state,
7721 	 * hence connection_mutex must be held.
7722 	 */
7723 	drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
7724 
7725 	/*
7726 	 * Walk the connector list instead of the encoder
7727 	 * list to detect the problem on ddi platforms
7728 	 * where there's just one encoder per digital port.
7729 	 */
7730 	drm_connector_list_iter_begin(dev, &conn_iter);
7731 	drm_for_each_connector_iter(connector, &conn_iter) {
7732 		struct drm_connector_state *connector_state;
7733 		struct intel_encoder *encoder;
7734 
7735 		connector_state =
7736 			drm_atomic_get_new_connector_state(&state->base,
7737 							   connector);
7738 		if (!connector_state)
7739 			connector_state = connector->state;
7740 
7741 		if (!connector_state->best_encoder)
7742 			continue;
7743 
7744 		encoder = to_intel_encoder(connector_state->best_encoder);
7745 
7746 		drm_WARN_ON(dev, !connector_state->crtc);
7747 
7748 		switch (encoder->type) {
7749 		case INTEL_OUTPUT_DDI:
7750 			if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
7751 				break;
7752 			fallthrough;
7753 		case INTEL_OUTPUT_DP:
7754 		case INTEL_OUTPUT_HDMI:
7755 		case INTEL_OUTPUT_EDP:
7756 			/* the same port mustn't appear more than once */
7757 			if (used_ports & BIT(encoder->port))
7758 				ret = false;
7759 
7760 			used_ports |= BIT(encoder->port);
7761 			break;
7762 		case INTEL_OUTPUT_DP_MST:
7763 			used_mst_ports |=
7764 				1 << encoder->port;
7765 			break;
7766 		default:
7767 			break;
7768 		}
7769 	}
7770 	drm_connector_list_iter_end(&conn_iter);
7771 
7772 	/* can't mix MST and SST/HDMI on the same port */
7773 	if (used_ports & used_mst_ports)
7774 		return false;
7775 
7776 	return ret;
7777 }
7778 
7779 static void
7780 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
7781 					   struct intel_crtc_state *crtc_state)
7782 {
7783 	const struct intel_crtc_state *from_crtc_state = crtc_state;
7784 
7785 	if (crtc_state->bigjoiner_slave) {
7786 		from_crtc_state = intel_atomic_get_new_crtc_state(state,
7787 								  crtc_state->bigjoiner_linked_crtc);
7788 
7789 		/* No need to copy state if the master state is unchanged */
7790 		if (!from_crtc_state)
7791 			return;
7792 	}
7793 
7794 	intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
7795 }
7796 
7797 static void
7798 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
7799 				 struct intel_crtc_state *crtc_state)
7800 {
7801 	crtc_state->hw.enable = crtc_state->uapi.enable;
7802 	crtc_state->hw.active = crtc_state->uapi.active;
7803 	crtc_state->hw.mode = crtc_state->uapi.mode;
7804 	crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
7805 	crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
7806 
7807 	intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
7808 }
7809 
7810 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
7811 {
7812 	if (crtc_state->bigjoiner_slave)
7813 		return;
7814 
7815 	crtc_state->uapi.enable = crtc_state->hw.enable;
7816 	crtc_state->uapi.active = crtc_state->hw.active;
7817 	drm_WARN_ON(crtc_state->uapi.crtc->dev,
7818 		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
7819 
7820 	crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
7821 	crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
7822 
7823 	/* copy color blobs to uapi */
7824 	drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
7825 				  crtc_state->hw.degamma_lut);
7826 	drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
7827 				  crtc_state->hw.gamma_lut);
7828 	drm_property_replace_blob(&crtc_state->uapi.ctm,
7829 				  crtc_state->hw.ctm);
7830 }
7831 
7832 static int
7833 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
7834 			  const struct intel_crtc_state *from_crtc_state)
7835 {
7836 	struct intel_crtc_state *saved_state;
7837 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7838 
7839 	saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
7840 	if (!saved_state)
7841 		return -ENOMEM;
7842 
7843 	saved_state->uapi = crtc_state->uapi;
7844 	saved_state->scaler_state = crtc_state->scaler_state;
7845 	saved_state->shared_dpll = crtc_state->shared_dpll;
7846 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
7847 	saved_state->crc_enabled = crtc_state->crc_enabled;
7848 
7849 	intel_crtc_free_hw_state(crtc_state);
7850 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
7851 	kfree(saved_state);
7852 
7853 	/* Re-init hw state */
7854 	memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
7855 	crtc_state->hw.enable = from_crtc_state->hw.enable;
7856 	crtc_state->hw.active = from_crtc_state->hw.active;
7857 	crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
7858 	crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
7859 
7860 	/* Some fixups */
7861 	crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
7862 	crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
7863 	crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
7864 	crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
7865 	crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
7866 	crtc_state->bigjoiner_slave = true;
7867 	crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
7868 	crtc_state->has_audio = false;
7869 
7870 	return 0;
7871 }
7872 
7873 static int
7874 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
7875 				 struct intel_crtc_state *crtc_state)
7876 {
7877 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7878 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7879 	struct intel_crtc_state *saved_state;
7880 
7881 	saved_state = intel_crtc_state_alloc(crtc);
7882 	if (!saved_state)
7883 		return -ENOMEM;
7884 
7885 	/* free the old crtc_state->hw members */
7886 	intel_crtc_free_hw_state(crtc_state);
7887 
7888 	/* FIXME: before the switch to atomic started, a new pipe_config was
7889 	 * kzalloc'd. Code that depends on any field being zero should be
7890 	 * fixed, so that the crtc_state can be safely duplicated. For now,
7891 	 * only fields that are know to not cause problems are preserved. */
7892 
7893 	saved_state->uapi = crtc_state->uapi;
7894 	saved_state->scaler_state = crtc_state->scaler_state;
7895 	saved_state->shared_dpll = crtc_state->shared_dpll;
7896 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
7897 	memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
7898 	       sizeof(saved_state->icl_port_dplls));
7899 	saved_state->crc_enabled = crtc_state->crc_enabled;
7900 	if (IS_G4X(dev_priv) ||
7901 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7902 		saved_state->wm = crtc_state->wm;
7903 
7904 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
7905 	kfree(saved_state);
7906 
7907 	intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
7908 
7909 	return 0;
7910 }
7911 
7912 static int
7913 intel_modeset_pipe_config(struct intel_atomic_state *state,
7914 			  struct intel_crtc_state *pipe_config)
7915 {
7916 	struct drm_crtc *crtc = pipe_config->uapi.crtc;
7917 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7918 	struct drm_connector *connector;
7919 	struct drm_connector_state *connector_state;
7920 	int base_bpp, ret, i;
7921 	bool retry = true;
7922 
7923 	pipe_config->cpu_transcoder =
7924 		(enum transcoder) to_intel_crtc(crtc)->pipe;
7925 
7926 	/*
7927 	 * Sanitize sync polarity flags based on requested ones. If neither
7928 	 * positive or negative polarity is requested, treat this as meaning
7929 	 * negative polarity.
7930 	 */
7931 	if (!(pipe_config->hw.adjusted_mode.flags &
7932 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
7933 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
7934 
7935 	if (!(pipe_config->hw.adjusted_mode.flags &
7936 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
7937 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
7938 
7939 	ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
7940 					pipe_config);
7941 	if (ret)
7942 		return ret;
7943 
7944 	base_bpp = pipe_config->pipe_bpp;
7945 
7946 	/*
7947 	 * Determine the real pipe dimensions. Note that stereo modes can
7948 	 * increase the actual pipe size due to the frame doubling and
7949 	 * insertion of additional space for blanks between the frame. This
7950 	 * is stored in the crtc timings. We use the requested mode to do this
7951 	 * computation to clearly distinguish it from the adjusted mode, which
7952 	 * can be changed by the connectors in the below retry loop.
7953 	 */
7954 	drm_mode_get_hv_timing(&pipe_config->hw.mode,
7955 			       &pipe_config->pipe_src_w,
7956 			       &pipe_config->pipe_src_h);
7957 
7958 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7959 		struct intel_encoder *encoder =
7960 			to_intel_encoder(connector_state->best_encoder);
7961 
7962 		if (connector_state->crtc != crtc)
7963 			continue;
7964 
7965 		if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
7966 			drm_dbg_kms(&i915->drm,
7967 				    "rejecting invalid cloning configuration\n");
7968 			return -EINVAL;
7969 		}
7970 
7971 		/*
7972 		 * Determine output_types before calling the .compute_config()
7973 		 * hooks so that the hooks can use this information safely.
7974 		 */
7975 		if (encoder->compute_output_type)
7976 			pipe_config->output_types |=
7977 				BIT(encoder->compute_output_type(encoder, pipe_config,
7978 								 connector_state));
7979 		else
7980 			pipe_config->output_types |= BIT(encoder->type);
7981 	}
7982 
7983 encoder_retry:
7984 	/* Ensure the port clock defaults are reset when retrying. */
7985 	pipe_config->port_clock = 0;
7986 	pipe_config->pixel_multiplier = 1;
7987 
7988 	/* Fill in default crtc timings, allow encoders to overwrite them. */
7989 	drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
7990 			      CRTC_STEREO_DOUBLE);
7991 
7992 	/* Pass our mode to the connectors and the CRTC to give them a chance to
7993 	 * adjust it according to limitations or connector properties, and also
7994 	 * a chance to reject the mode entirely.
7995 	 */
7996 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7997 		struct intel_encoder *encoder =
7998 			to_intel_encoder(connector_state->best_encoder);
7999 
8000 		if (connector_state->crtc != crtc)
8001 			continue;
8002 
8003 		ret = encoder->compute_config(encoder, pipe_config,
8004 					      connector_state);
8005 		if (ret < 0) {
8006 			if (ret != -EDEADLK)
8007 				drm_dbg_kms(&i915->drm,
8008 					    "Encoder config failure: %d\n",
8009 					    ret);
8010 			return ret;
8011 		}
8012 	}
8013 
8014 	/* Set default port clock if not overwritten by the encoder. Needs to be
8015 	 * done afterwards in case the encoder adjusts the mode. */
8016 	if (!pipe_config->port_clock)
8017 		pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
8018 			* pipe_config->pixel_multiplier;
8019 
8020 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
8021 	if (ret == -EDEADLK)
8022 		return ret;
8023 	if (ret < 0) {
8024 		drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
8025 		return ret;
8026 	}
8027 
8028 	if (ret == I915_DISPLAY_CONFIG_RETRY) {
8029 		if (drm_WARN(&i915->drm, !retry,
8030 			     "loop in pipe configuration computation\n"))
8031 			return -EINVAL;
8032 
8033 		drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
8034 		retry = false;
8035 		goto encoder_retry;
8036 	}
8037 
8038 	/* Dithering seems to not pass-through bits correctly when it should, so
8039 	 * only enable it on 6bpc panels and when its not a compliance
8040 	 * test requesting 6bpc video pattern.
8041 	 */
8042 	pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
8043 		!pipe_config->dither_force_disable;
8044 	drm_dbg_kms(&i915->drm,
8045 		    "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
8046 		    base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
8047 
8048 	return 0;
8049 }
8050 
8051 static int
8052 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
8053 {
8054 	struct intel_atomic_state *state =
8055 		to_intel_atomic_state(crtc_state->uapi.state);
8056 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8057 	struct drm_connector_state *conn_state;
8058 	struct drm_connector *connector;
8059 	int i;
8060 
8061 	for_each_new_connector_in_state(&state->base, connector,
8062 					conn_state, i) {
8063 		struct intel_encoder *encoder =
8064 			to_intel_encoder(conn_state->best_encoder);
8065 		int ret;
8066 
8067 		if (conn_state->crtc != &crtc->base ||
8068 		    !encoder->compute_config_late)
8069 			continue;
8070 
8071 		ret = encoder->compute_config_late(encoder, crtc_state,
8072 						   conn_state);
8073 		if (ret)
8074 			return ret;
8075 	}
8076 
8077 	return 0;
8078 }
8079 
8080 bool intel_fuzzy_clock_check(int clock1, int clock2)
8081 {
8082 	int diff;
8083 
8084 	if (clock1 == clock2)
8085 		return true;
8086 
8087 	if (!clock1 || !clock2)
8088 		return false;
8089 
8090 	diff = abs(clock1 - clock2);
8091 
8092 	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
8093 		return true;
8094 
8095 	return false;
8096 }
8097 
8098 static bool
8099 intel_compare_m_n(unsigned int m, unsigned int n,
8100 		  unsigned int m2, unsigned int n2,
8101 		  bool exact)
8102 {
8103 	if (m == m2 && n == n2)
8104 		return true;
8105 
8106 	if (exact || !m || !n || !m2 || !n2)
8107 		return false;
8108 
8109 	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
8110 
8111 	if (n > n2) {
8112 		while (n > n2) {
8113 			m2 <<= 1;
8114 			n2 <<= 1;
8115 		}
8116 	} else if (n < n2) {
8117 		while (n < n2) {
8118 			m <<= 1;
8119 			n <<= 1;
8120 		}
8121 	}
8122 
8123 	if (n != n2)
8124 		return false;
8125 
8126 	return intel_fuzzy_clock_check(m, m2);
8127 }
8128 
8129 static bool
8130 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
8131 		       const struct intel_link_m_n *m2_n2,
8132 		       bool exact)
8133 {
8134 	return m_n->tu == m2_n2->tu &&
8135 		intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
8136 				  m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
8137 		intel_compare_m_n(m_n->link_m, m_n->link_n,
8138 				  m2_n2->link_m, m2_n2->link_n, exact);
8139 }
8140 
8141 static bool
8142 intel_compare_infoframe(const union hdmi_infoframe *a,
8143 			const union hdmi_infoframe *b)
8144 {
8145 	return memcmp(a, b, sizeof(*a)) == 0;
8146 }
8147 
8148 static bool
8149 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
8150 			 const struct drm_dp_vsc_sdp *b)
8151 {
8152 	return memcmp(a, b, sizeof(*a)) == 0;
8153 }
8154 
8155 static void
8156 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
8157 			       bool fastset, const char *name,
8158 			       const union hdmi_infoframe *a,
8159 			       const union hdmi_infoframe *b)
8160 {
8161 	if (fastset) {
8162 		if (!drm_debug_enabled(DRM_UT_KMS))
8163 			return;
8164 
8165 		drm_dbg_kms(&dev_priv->drm,
8166 			    "fastset mismatch in %s infoframe\n", name);
8167 		drm_dbg_kms(&dev_priv->drm, "expected:\n");
8168 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
8169 		drm_dbg_kms(&dev_priv->drm, "found:\n");
8170 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
8171 	} else {
8172 		drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
8173 		drm_err(&dev_priv->drm, "expected:\n");
8174 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
8175 		drm_err(&dev_priv->drm, "found:\n");
8176 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
8177 	}
8178 }
8179 
8180 static void
8181 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
8182 				bool fastset, const char *name,
8183 				const struct drm_dp_vsc_sdp *a,
8184 				const struct drm_dp_vsc_sdp *b)
8185 {
8186 	if (fastset) {
8187 		if (!drm_debug_enabled(DRM_UT_KMS))
8188 			return;
8189 
8190 		drm_dbg_kms(&dev_priv->drm,
8191 			    "fastset mismatch in %s dp sdp\n", name);
8192 		drm_dbg_kms(&dev_priv->drm, "expected:\n");
8193 		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
8194 		drm_dbg_kms(&dev_priv->drm, "found:\n");
8195 		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
8196 	} else {
8197 		drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
8198 		drm_err(&dev_priv->drm, "expected:\n");
8199 		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
8200 		drm_err(&dev_priv->drm, "found:\n");
8201 		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
8202 	}
8203 }
8204 
8205 static void __printf(4, 5)
8206 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
8207 		     const char *name, const char *format, ...)
8208 {
8209 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
8210 	struct va_format vaf;
8211 	va_list args;
8212 
8213 	va_start(args, format);
8214 	vaf.fmt = format;
8215 	vaf.va = &args;
8216 
8217 	if (fastset)
8218 		drm_dbg_kms(&i915->drm,
8219 			    "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
8220 			    crtc->base.base.id, crtc->base.name, name, &vaf);
8221 	else
8222 		drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
8223 			crtc->base.base.id, crtc->base.name, name, &vaf);
8224 
8225 	va_end(args);
8226 }
8227 
8228 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
8229 {
8230 	if (dev_priv->params.fastboot != -1)
8231 		return dev_priv->params.fastboot;
8232 
8233 	/* Enable fastboot by default on Skylake and newer */
8234 	if (DISPLAY_VER(dev_priv) >= 9)
8235 		return true;
8236 
8237 	/* Enable fastboot by default on VLV and CHV */
8238 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8239 		return true;
8240 
8241 	/* Disabled by default on all others */
8242 	return false;
8243 }
8244 
8245 static bool
8246 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
8247 			  const struct intel_crtc_state *pipe_config,
8248 			  bool fastset)
8249 {
8250 	struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
8251 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
8252 	bool ret = true;
8253 	u32 bp_gamma = 0;
8254 	bool fixup_inherited = fastset &&
8255 		current_config->inherited && !pipe_config->inherited;
8256 
8257 	if (fixup_inherited && !fastboot_enabled(dev_priv)) {
8258 		drm_dbg_kms(&dev_priv->drm,
8259 			    "initial modeset and fastboot not set\n");
8260 		ret = false;
8261 	}
8262 
8263 #define PIPE_CONF_CHECK_X(name) do { \
8264 	if (current_config->name != pipe_config->name) { \
8265 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8266 				     "(expected 0x%08x, found 0x%08x)", \
8267 				     current_config->name, \
8268 				     pipe_config->name); \
8269 		ret = false; \
8270 	} \
8271 } while (0)
8272 
8273 #define PIPE_CONF_CHECK_I(name) do { \
8274 	if (current_config->name != pipe_config->name) { \
8275 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8276 				     "(expected %i, found %i)", \
8277 				     current_config->name, \
8278 				     pipe_config->name); \
8279 		ret = false; \
8280 	} \
8281 } while (0)
8282 
8283 #define PIPE_CONF_CHECK_BOOL(name) do { \
8284 	if (current_config->name != pipe_config->name) { \
8285 		pipe_config_mismatch(fastset, crtc,  __stringify(name), \
8286 				     "(expected %s, found %s)", \
8287 				     yesno(current_config->name), \
8288 				     yesno(pipe_config->name)); \
8289 		ret = false; \
8290 	} \
8291 } while (0)
8292 
8293 /*
8294  * Checks state where we only read out the enabling, but not the entire
8295  * state itself (like full infoframes or ELD for audio). These states
8296  * require a full modeset on bootup to fix up.
8297  */
8298 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
8299 	if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
8300 		PIPE_CONF_CHECK_BOOL(name); \
8301 	} else { \
8302 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8303 				     "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
8304 				     yesno(current_config->name), \
8305 				     yesno(pipe_config->name)); \
8306 		ret = false; \
8307 	} \
8308 } while (0)
8309 
8310 #define PIPE_CONF_CHECK_P(name) do { \
8311 	if (current_config->name != pipe_config->name) { \
8312 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8313 				     "(expected %p, found %p)", \
8314 				     current_config->name, \
8315 				     pipe_config->name); \
8316 		ret = false; \
8317 	} \
8318 } while (0)
8319 
8320 #define PIPE_CONF_CHECK_M_N(name) do { \
8321 	if (!intel_compare_link_m_n(&current_config->name, \
8322 				    &pipe_config->name,\
8323 				    !fastset)) { \
8324 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8325 				     "(expected tu %i gmch %i/%i link %i/%i, " \
8326 				     "found tu %i, gmch %i/%i link %i/%i)", \
8327 				     current_config->name.tu, \
8328 				     current_config->name.gmch_m, \
8329 				     current_config->name.gmch_n, \
8330 				     current_config->name.link_m, \
8331 				     current_config->name.link_n, \
8332 				     pipe_config->name.tu, \
8333 				     pipe_config->name.gmch_m, \
8334 				     pipe_config->name.gmch_n, \
8335 				     pipe_config->name.link_m, \
8336 				     pipe_config->name.link_n); \
8337 		ret = false; \
8338 	} \
8339 } while (0)
8340 
8341 /* This is required for BDW+ where there is only one set of registers for
8342  * switching between high and low RR.
8343  * This macro can be used whenever a comparison has to be made between one
8344  * hw state and multiple sw state variables.
8345  */
8346 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
8347 	if (!intel_compare_link_m_n(&current_config->name, \
8348 				    &pipe_config->name, !fastset) && \
8349 	    !intel_compare_link_m_n(&current_config->alt_name, \
8350 				    &pipe_config->name, !fastset)) { \
8351 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8352 				     "(expected tu %i gmch %i/%i link %i/%i, " \
8353 				     "or tu %i gmch %i/%i link %i/%i, " \
8354 				     "found tu %i, gmch %i/%i link %i/%i)", \
8355 				     current_config->name.tu, \
8356 				     current_config->name.gmch_m, \
8357 				     current_config->name.gmch_n, \
8358 				     current_config->name.link_m, \
8359 				     current_config->name.link_n, \
8360 				     current_config->alt_name.tu, \
8361 				     current_config->alt_name.gmch_m, \
8362 				     current_config->alt_name.gmch_n, \
8363 				     current_config->alt_name.link_m, \
8364 				     current_config->alt_name.link_n, \
8365 				     pipe_config->name.tu, \
8366 				     pipe_config->name.gmch_m, \
8367 				     pipe_config->name.gmch_n, \
8368 				     pipe_config->name.link_m, \
8369 				     pipe_config->name.link_n); \
8370 		ret = false; \
8371 	} \
8372 } while (0)
8373 
8374 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
8375 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
8376 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8377 				     "(%x) (expected %i, found %i)", \
8378 				     (mask), \
8379 				     current_config->name & (mask), \
8380 				     pipe_config->name & (mask)); \
8381 		ret = false; \
8382 	} \
8383 } while (0)
8384 
8385 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
8386 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
8387 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8388 				     "(expected %i, found %i)", \
8389 				     current_config->name, \
8390 				     pipe_config->name); \
8391 		ret = false; \
8392 	} \
8393 } while (0)
8394 
8395 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
8396 	if (!intel_compare_infoframe(&current_config->infoframes.name, \
8397 				     &pipe_config->infoframes.name)) { \
8398 		pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
8399 					       &current_config->infoframes.name, \
8400 					       &pipe_config->infoframes.name); \
8401 		ret = false; \
8402 	} \
8403 } while (0)
8404 
8405 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
8406 	if (!current_config->has_psr && !pipe_config->has_psr && \
8407 	    !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
8408 				      &pipe_config->infoframes.name)) { \
8409 		pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
8410 						&current_config->infoframes.name, \
8411 						&pipe_config->infoframes.name); \
8412 		ret = false; \
8413 	} \
8414 } while (0)
8415 
8416 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
8417 	if (current_config->name1 != pipe_config->name1) { \
8418 		pipe_config_mismatch(fastset, crtc, __stringify(name1), \
8419 				"(expected %i, found %i, won't compare lut values)", \
8420 				current_config->name1, \
8421 				pipe_config->name1); \
8422 		ret = false;\
8423 	} else { \
8424 		if (!intel_color_lut_equal(current_config->name2, \
8425 					pipe_config->name2, pipe_config->name1, \
8426 					bit_precision)) { \
8427 			pipe_config_mismatch(fastset, crtc, __stringify(name2), \
8428 					"hw_state doesn't match sw_state"); \
8429 			ret = false; \
8430 		} \
8431 	} \
8432 } while (0)
8433 
8434 #define PIPE_CONF_QUIRK(quirk) \
8435 	((current_config->quirks | pipe_config->quirks) & (quirk))
8436 
8437 	PIPE_CONF_CHECK_I(cpu_transcoder);
8438 
8439 	PIPE_CONF_CHECK_BOOL(has_pch_encoder);
8440 	PIPE_CONF_CHECK_I(fdi_lanes);
8441 	PIPE_CONF_CHECK_M_N(fdi_m_n);
8442 
8443 	PIPE_CONF_CHECK_I(lane_count);
8444 	PIPE_CONF_CHECK_X(lane_lat_optim_mask);
8445 
8446 	if (DISPLAY_VER(dev_priv) < 8) {
8447 		PIPE_CONF_CHECK_M_N(dp_m_n);
8448 
8449 		if (current_config->has_drrs)
8450 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
8451 	} else
8452 		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
8453 
8454 	PIPE_CONF_CHECK_X(output_types);
8455 
8456 	/* FIXME do the readout properly and get rid of this quirk */
8457 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8458 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
8459 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
8460 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
8461 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
8462 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
8463 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
8464 
8465 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
8466 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
8467 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
8468 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
8469 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
8470 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
8471 
8472 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
8473 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
8474 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
8475 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
8476 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
8477 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
8478 
8479 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
8480 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
8481 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
8482 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
8483 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
8484 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
8485 
8486 		PIPE_CONF_CHECK_I(pixel_multiplier);
8487 
8488 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8489 				      DRM_MODE_FLAG_INTERLACE);
8490 
8491 		if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
8492 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8493 					      DRM_MODE_FLAG_PHSYNC);
8494 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8495 					      DRM_MODE_FLAG_NHSYNC);
8496 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8497 					      DRM_MODE_FLAG_PVSYNC);
8498 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8499 					      DRM_MODE_FLAG_NVSYNC);
8500 		}
8501 	}
8502 
8503 	PIPE_CONF_CHECK_I(output_format);
8504 	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
8505 	if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
8506 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8507 		PIPE_CONF_CHECK_BOOL(limited_color_range);
8508 
8509 	PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
8510 	PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
8511 	PIPE_CONF_CHECK_BOOL(has_infoframe);
8512 	/* FIXME do the readout properly and get rid of this quirk */
8513 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8514 		PIPE_CONF_CHECK_BOOL(fec_enable);
8515 
8516 	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
8517 
8518 	PIPE_CONF_CHECK_X(gmch_pfit.control);
8519 	/* pfit ratios are autocomputed by the hw on gen4+ */
8520 	if (DISPLAY_VER(dev_priv) < 4)
8521 		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
8522 	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
8523 
8524 	/*
8525 	 * Changing the EDP transcoder input mux
8526 	 * (A_ONOFF vs. A_ON) requires a full modeset.
8527 	 */
8528 	PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
8529 
8530 	if (!fastset) {
8531 		PIPE_CONF_CHECK_I(pipe_src_w);
8532 		PIPE_CONF_CHECK_I(pipe_src_h);
8533 
8534 		PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
8535 		if (current_config->pch_pfit.enabled) {
8536 			PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
8537 			PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
8538 			PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
8539 			PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
8540 		}
8541 
8542 		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
8543 		/* FIXME do the readout properly and get rid of this quirk */
8544 		if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8545 			PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
8546 
8547 		PIPE_CONF_CHECK_X(gamma_mode);
8548 		if (IS_CHERRYVIEW(dev_priv))
8549 			PIPE_CONF_CHECK_X(cgm_mode);
8550 		else
8551 			PIPE_CONF_CHECK_X(csc_mode);
8552 		PIPE_CONF_CHECK_BOOL(gamma_enable);
8553 		PIPE_CONF_CHECK_BOOL(csc_enable);
8554 
8555 		PIPE_CONF_CHECK_I(linetime);
8556 		PIPE_CONF_CHECK_I(ips_linetime);
8557 
8558 		bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
8559 		if (bp_gamma)
8560 			PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
8561 	}
8562 
8563 	PIPE_CONF_CHECK_BOOL(double_wide);
8564 
8565 	PIPE_CONF_CHECK_P(shared_dpll);
8566 
8567 	/* FIXME do the readout properly and get rid of this quirk */
8568 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8569 		PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8570 		PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
8571 		PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8572 		PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
8573 		PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
8574 		PIPE_CONF_CHECK_X(dpll_hw_state.spll);
8575 		PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
8576 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
8577 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
8578 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
8579 		PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
8580 		PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
8581 		PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
8582 		PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
8583 		PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
8584 		PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
8585 		PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
8586 		PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
8587 		PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
8588 		PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
8589 		PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
8590 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
8591 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
8592 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
8593 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
8594 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
8595 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
8596 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
8597 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
8598 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
8599 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
8600 
8601 		PIPE_CONF_CHECK_X(dsi_pll.ctrl);
8602 		PIPE_CONF_CHECK_X(dsi_pll.div);
8603 
8604 		if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
8605 			PIPE_CONF_CHECK_I(pipe_bpp);
8606 
8607 		PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
8608 		PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
8609 		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
8610 
8611 		PIPE_CONF_CHECK_I(min_voltage_level);
8612 	}
8613 
8614 	PIPE_CONF_CHECK_X(infoframes.enable);
8615 	PIPE_CONF_CHECK_X(infoframes.gcp);
8616 	PIPE_CONF_CHECK_INFOFRAME(avi);
8617 	PIPE_CONF_CHECK_INFOFRAME(spd);
8618 	PIPE_CONF_CHECK_INFOFRAME(hdmi);
8619 	PIPE_CONF_CHECK_INFOFRAME(drm);
8620 	PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
8621 
8622 	PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
8623 	PIPE_CONF_CHECK_I(master_transcoder);
8624 	PIPE_CONF_CHECK_BOOL(bigjoiner);
8625 	PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
8626 	PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
8627 
8628 	PIPE_CONF_CHECK_I(dsc.compression_enable);
8629 	PIPE_CONF_CHECK_I(dsc.dsc_split);
8630 	PIPE_CONF_CHECK_I(dsc.compressed_bpp);
8631 
8632 	PIPE_CONF_CHECK_BOOL(splitter.enable);
8633 	PIPE_CONF_CHECK_I(splitter.link_count);
8634 	PIPE_CONF_CHECK_I(splitter.pixel_overlap);
8635 
8636 	PIPE_CONF_CHECK_I(mst_master_transcoder);
8637 
8638 	PIPE_CONF_CHECK_BOOL(vrr.enable);
8639 	PIPE_CONF_CHECK_I(vrr.vmin);
8640 	PIPE_CONF_CHECK_I(vrr.vmax);
8641 	PIPE_CONF_CHECK_I(vrr.flipline);
8642 	PIPE_CONF_CHECK_I(vrr.pipeline_full);
8643 
8644 	PIPE_CONF_CHECK_BOOL(has_psr);
8645 	PIPE_CONF_CHECK_BOOL(has_psr2);
8646 	PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
8647 	PIPE_CONF_CHECK_I(dc3co_exitline);
8648 
8649 #undef PIPE_CONF_CHECK_X
8650 #undef PIPE_CONF_CHECK_I
8651 #undef PIPE_CONF_CHECK_BOOL
8652 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
8653 #undef PIPE_CONF_CHECK_P
8654 #undef PIPE_CONF_CHECK_FLAGS
8655 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
8656 #undef PIPE_CONF_CHECK_COLOR_LUT
8657 #undef PIPE_CONF_QUIRK
8658 
8659 	return ret;
8660 }
8661 
8662 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
8663 					   const struct intel_crtc_state *pipe_config)
8664 {
8665 	if (pipe_config->has_pch_encoder) {
8666 		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
8667 							    &pipe_config->fdi_m_n);
8668 		int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
8669 
8670 		/*
8671 		 * FDI already provided one idea for the dotclock.
8672 		 * Yell if the encoder disagrees.
8673 		 */
8674 		drm_WARN(&dev_priv->drm,
8675 			 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
8676 			 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
8677 			 fdi_dotclock, dotclock);
8678 	}
8679 }
8680 
8681 static void verify_wm_state(struct intel_crtc *crtc,
8682 			    struct intel_crtc_state *new_crtc_state)
8683 {
8684 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8685 	struct skl_hw_state {
8686 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
8687 		struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
8688 		struct skl_pipe_wm wm;
8689 	} *hw;
8690 	const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
8691 	int level, max_level = ilk_wm_max_level(dev_priv);
8692 	struct intel_plane *plane;
8693 	u8 hw_enabled_slices;
8694 
8695 	if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
8696 		return;
8697 
8698 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
8699 	if (!hw)
8700 		return;
8701 
8702 	skl_pipe_wm_get_hw_state(crtc, &hw->wm);
8703 
8704 	skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
8705 
8706 	hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
8707 
8708 	if (DISPLAY_VER(dev_priv) >= 11 &&
8709 	    hw_enabled_slices != dev_priv->dbuf.enabled_slices)
8710 		drm_err(&dev_priv->drm,
8711 			"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
8712 			dev_priv->dbuf.enabled_slices,
8713 			hw_enabled_slices);
8714 
8715 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
8716 		const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
8717 		const struct skl_wm_level *hw_wm_level, *sw_wm_level;
8718 
8719 		/* Watermarks */
8720 		for (level = 0; level <= max_level; level++) {
8721 			hw_wm_level = &hw->wm.planes[plane->id].wm[level];
8722 			sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
8723 
8724 			if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
8725 				continue;
8726 
8727 			drm_err(&dev_priv->drm,
8728 				"[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8729 				plane->base.base.id, plane->base.name, level,
8730 				sw_wm_level->enable,
8731 				sw_wm_level->blocks,
8732 				sw_wm_level->lines,
8733 				hw_wm_level->enable,
8734 				hw_wm_level->blocks,
8735 				hw_wm_level->lines);
8736 		}
8737 
8738 		hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
8739 		sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
8740 
8741 		if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8742 			drm_err(&dev_priv->drm,
8743 				"[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8744 				plane->base.base.id, plane->base.name,
8745 				sw_wm_level->enable,
8746 				sw_wm_level->blocks,
8747 				sw_wm_level->lines,
8748 				hw_wm_level->enable,
8749 				hw_wm_level->blocks,
8750 				hw_wm_level->lines);
8751 		}
8752 
8753 		/* DDB */
8754 		hw_ddb_entry = &hw->ddb_y[plane->id];
8755 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
8756 
8757 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
8758 			drm_err(&dev_priv->drm,
8759 				"[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
8760 				plane->base.base.id, plane->base.name,
8761 				sw_ddb_entry->start, sw_ddb_entry->end,
8762 				hw_ddb_entry->start, hw_ddb_entry->end);
8763 		}
8764 	}
8765 
8766 	kfree(hw);
8767 }
8768 
8769 static void
8770 verify_connector_state(struct intel_atomic_state *state,
8771 		       struct intel_crtc *crtc)
8772 {
8773 	struct drm_connector *connector;
8774 	struct drm_connector_state *new_conn_state;
8775 	int i;
8776 
8777 	for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
8778 		struct drm_encoder *encoder = connector->encoder;
8779 		struct intel_crtc_state *crtc_state = NULL;
8780 
8781 		if (new_conn_state->crtc != &crtc->base)
8782 			continue;
8783 
8784 		if (crtc)
8785 			crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
8786 
8787 		intel_connector_verify_state(crtc_state, new_conn_state);
8788 
8789 		I915_STATE_WARN(new_conn_state->best_encoder != encoder,
8790 		     "connector's atomic encoder doesn't match legacy encoder\n");
8791 	}
8792 }
8793 
8794 static void
8795 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
8796 {
8797 	struct intel_encoder *encoder;
8798 	struct drm_connector *connector;
8799 	struct drm_connector_state *old_conn_state, *new_conn_state;
8800 	int i;
8801 
8802 	for_each_intel_encoder(&dev_priv->drm, encoder) {
8803 		bool enabled = false, found = false;
8804 		enum pipe pipe;
8805 
8806 		drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
8807 			    encoder->base.base.id,
8808 			    encoder->base.name);
8809 
8810 		for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
8811 						   new_conn_state, i) {
8812 			if (old_conn_state->best_encoder == &encoder->base)
8813 				found = true;
8814 
8815 			if (new_conn_state->best_encoder != &encoder->base)
8816 				continue;
8817 			found = enabled = true;
8818 
8819 			I915_STATE_WARN(new_conn_state->crtc !=
8820 					encoder->base.crtc,
8821 			     "connector's crtc doesn't match encoder crtc\n");
8822 		}
8823 
8824 		if (!found)
8825 			continue;
8826 
8827 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
8828 		     "encoder's enabled state mismatch "
8829 		     "(expected %i, found %i)\n",
8830 		     !!encoder->base.crtc, enabled);
8831 
8832 		if (!encoder->base.crtc) {
8833 			bool active;
8834 
8835 			active = encoder->get_hw_state(encoder, &pipe);
8836 			I915_STATE_WARN(active,
8837 			     "encoder detached but still enabled on pipe %c.\n",
8838 			     pipe_name(pipe));
8839 		}
8840 	}
8841 }
8842 
8843 static void
8844 verify_crtc_state(struct intel_crtc *crtc,
8845 		  struct intel_crtc_state *old_crtc_state,
8846 		  struct intel_crtc_state *new_crtc_state)
8847 {
8848 	struct drm_device *dev = crtc->base.dev;
8849 	struct drm_i915_private *dev_priv = to_i915(dev);
8850 	struct intel_encoder *encoder;
8851 	struct intel_crtc_state *pipe_config = old_crtc_state;
8852 	struct drm_atomic_state *state = old_crtc_state->uapi.state;
8853 	struct intel_crtc *master = crtc;
8854 
8855 	__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
8856 	intel_crtc_free_hw_state(old_crtc_state);
8857 	intel_crtc_state_reset(old_crtc_state, crtc);
8858 	old_crtc_state->uapi.state = state;
8859 
8860 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
8861 		    crtc->base.name);
8862 
8863 	pipe_config->hw.enable = new_crtc_state->hw.enable;
8864 
8865 	intel_crtc_get_pipe_config(pipe_config);
8866 
8867 	/* we keep both pipes enabled on 830 */
8868 	if (IS_I830(dev_priv) && pipe_config->hw.active)
8869 		pipe_config->hw.active = new_crtc_state->hw.active;
8870 
8871 	I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
8872 			"crtc active state doesn't match with hw state "
8873 			"(expected %i, found %i)\n",
8874 			new_crtc_state->hw.active, pipe_config->hw.active);
8875 
8876 	I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
8877 			"transitional active state does not match atomic hw state "
8878 			"(expected %i, found %i)\n",
8879 			new_crtc_state->hw.active, crtc->active);
8880 
8881 	if (new_crtc_state->bigjoiner_slave)
8882 		master = new_crtc_state->bigjoiner_linked_crtc;
8883 
8884 	for_each_encoder_on_crtc(dev, &master->base, encoder) {
8885 		enum pipe pipe;
8886 		bool active;
8887 
8888 		active = encoder->get_hw_state(encoder, &pipe);
8889 		I915_STATE_WARN(active != new_crtc_state->hw.active,
8890 				"[ENCODER:%i] active %i with crtc active %i\n",
8891 				encoder->base.base.id, active,
8892 				new_crtc_state->hw.active);
8893 
8894 		I915_STATE_WARN(active && master->pipe != pipe,
8895 				"Encoder connected to wrong pipe %c\n",
8896 				pipe_name(pipe));
8897 
8898 		if (active)
8899 			intel_encoder_get_config(encoder, pipe_config);
8900 	}
8901 
8902 	if (!new_crtc_state->hw.active)
8903 		return;
8904 
8905 	intel_pipe_config_sanity_check(dev_priv, pipe_config);
8906 
8907 	if (!intel_pipe_config_compare(new_crtc_state,
8908 				       pipe_config, false)) {
8909 		I915_STATE_WARN(1, "pipe state doesn't match!\n");
8910 		intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
8911 		intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
8912 	}
8913 }
8914 
8915 static void
8916 intel_verify_planes(struct intel_atomic_state *state)
8917 {
8918 	struct intel_plane *plane;
8919 	const struct intel_plane_state *plane_state;
8920 	int i;
8921 
8922 	for_each_new_intel_plane_in_state(state, plane,
8923 					  plane_state, i)
8924 		assert_plane(plane, plane_state->planar_slave ||
8925 			     plane_state->uapi.visible);
8926 }
8927 
8928 static void
8929 verify_single_dpll_state(struct drm_i915_private *dev_priv,
8930 			 struct intel_shared_dpll *pll,
8931 			 struct intel_crtc *crtc,
8932 			 struct intel_crtc_state *new_crtc_state)
8933 {
8934 	struct intel_dpll_hw_state dpll_hw_state;
8935 	u8 pipe_mask;
8936 	bool active;
8937 
8938 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
8939 
8940 	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
8941 
8942 	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
8943 
8944 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
8945 		I915_STATE_WARN(!pll->on && pll->active_mask,
8946 		     "pll in active use but not on in sw tracking\n");
8947 		I915_STATE_WARN(pll->on && !pll->active_mask,
8948 		     "pll is on but not used by any active pipe\n");
8949 		I915_STATE_WARN(pll->on != active,
8950 		     "pll on state mismatch (expected %i, found %i)\n",
8951 		     pll->on, active);
8952 	}
8953 
8954 	if (!crtc) {
8955 		I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
8956 				"more active pll users than references: 0x%x vs 0x%x\n",
8957 				pll->active_mask, pll->state.pipe_mask);
8958 
8959 		return;
8960 	}
8961 
8962 	pipe_mask = BIT(crtc->pipe);
8963 
8964 	if (new_crtc_state->hw.active)
8965 		I915_STATE_WARN(!(pll->active_mask & pipe_mask),
8966 				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
8967 				pipe_name(crtc->pipe), pll->active_mask);
8968 	else
8969 		I915_STATE_WARN(pll->active_mask & pipe_mask,
8970 				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
8971 				pipe_name(crtc->pipe), pll->active_mask);
8972 
8973 	I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
8974 			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
8975 			pipe_mask, pll->state.pipe_mask);
8976 
8977 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
8978 					  &dpll_hw_state,
8979 					  sizeof(dpll_hw_state)),
8980 			"pll hw state mismatch\n");
8981 }
8982 
8983 static void
8984 verify_shared_dpll_state(struct intel_crtc *crtc,
8985 			 struct intel_crtc_state *old_crtc_state,
8986 			 struct intel_crtc_state *new_crtc_state)
8987 {
8988 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8989 
8990 	if (new_crtc_state->shared_dpll)
8991 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
8992 
8993 	if (old_crtc_state->shared_dpll &&
8994 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
8995 		u8 pipe_mask = BIT(crtc->pipe);
8996 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
8997 
8998 		I915_STATE_WARN(pll->active_mask & pipe_mask,
8999 				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
9000 				pipe_name(crtc->pipe), pll->active_mask);
9001 		I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
9002 				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
9003 				pipe_name(crtc->pipe), pll->state.pipe_mask);
9004 	}
9005 }
9006 
9007 static void
9008 intel_modeset_verify_crtc(struct intel_crtc *crtc,
9009 			  struct intel_atomic_state *state,
9010 			  struct intel_crtc_state *old_crtc_state,
9011 			  struct intel_crtc_state *new_crtc_state)
9012 {
9013 	if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
9014 		return;
9015 
9016 	verify_wm_state(crtc, new_crtc_state);
9017 	verify_connector_state(state, crtc);
9018 	verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
9019 	verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
9020 }
9021 
9022 static void
9023 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
9024 {
9025 	int i;
9026 
9027 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
9028 		verify_single_dpll_state(dev_priv,
9029 					 &dev_priv->dpll.shared_dplls[i],
9030 					 NULL, NULL);
9031 }
9032 
9033 static void
9034 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
9035 			      struct intel_atomic_state *state)
9036 {
9037 	verify_encoder_state(dev_priv, state);
9038 	verify_connector_state(state, NULL);
9039 	verify_disabled_dpll_state(dev_priv);
9040 }
9041 
9042 int intel_modeset_all_pipes(struct intel_atomic_state *state)
9043 {
9044 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9045 	struct intel_crtc *crtc;
9046 
9047 	/*
9048 	 * Add all pipes to the state, and force
9049 	 * a modeset on all the active ones.
9050 	 */
9051 	for_each_intel_crtc(&dev_priv->drm, crtc) {
9052 		struct intel_crtc_state *crtc_state;
9053 		int ret;
9054 
9055 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
9056 		if (IS_ERR(crtc_state))
9057 			return PTR_ERR(crtc_state);
9058 
9059 		if (!crtc_state->hw.active ||
9060 		    drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
9061 			continue;
9062 
9063 		crtc_state->uapi.mode_changed = true;
9064 
9065 		ret = drm_atomic_add_affected_connectors(&state->base,
9066 							 &crtc->base);
9067 		if (ret)
9068 			return ret;
9069 
9070 		ret = intel_atomic_add_affected_planes(state, crtc);
9071 		if (ret)
9072 			return ret;
9073 
9074 		crtc_state->update_planes |= crtc_state->active_planes;
9075 	}
9076 
9077 	return 0;
9078 }
9079 
9080 static void
9081 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
9082 {
9083 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9084 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9085 	struct drm_display_mode adjusted_mode =
9086 		crtc_state->hw.adjusted_mode;
9087 
9088 	if (crtc_state->vrr.enable) {
9089 		adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
9090 		adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
9091 		adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
9092 		crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
9093 	}
9094 
9095 	drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
9096 
9097 	crtc->mode_flags = crtc_state->mode_flags;
9098 
9099 	/*
9100 	 * The scanline counter increments at the leading edge of hsync.
9101 	 *
9102 	 * On most platforms it starts counting from vtotal-1 on the
9103 	 * first active line. That means the scanline counter value is
9104 	 * always one less than what we would expect. Ie. just after
9105 	 * start of vblank, which also occurs at start of hsync (on the
9106 	 * last active line), the scanline counter will read vblank_start-1.
9107 	 *
9108 	 * On gen2 the scanline counter starts counting from 1 instead
9109 	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
9110 	 * to keep the value positive), instead of adding one.
9111 	 *
9112 	 * On HSW+ the behaviour of the scanline counter depends on the output
9113 	 * type. For DP ports it behaves like most other platforms, but on HDMI
9114 	 * there's an extra 1 line difference. So we need to add two instead of
9115 	 * one to the value.
9116 	 *
9117 	 * On VLV/CHV DSI the scanline counter would appear to increment
9118 	 * approx. 1/3 of a scanline before start of vblank. Unfortunately
9119 	 * that means we can't tell whether we're in vblank or not while
9120 	 * we're on that particular line. We must still set scanline_offset
9121 	 * to 1 so that the vblank timestamps come out correct when we query
9122 	 * the scanline counter from within the vblank interrupt handler.
9123 	 * However if queried just before the start of vblank we'll get an
9124 	 * answer that's slightly in the future.
9125 	 */
9126 	if (DISPLAY_VER(dev_priv) == 2) {
9127 		int vtotal;
9128 
9129 		vtotal = adjusted_mode.crtc_vtotal;
9130 		if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9131 			vtotal /= 2;
9132 
9133 		crtc->scanline_offset = vtotal - 1;
9134 	} else if (HAS_DDI(dev_priv) &&
9135 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
9136 		crtc->scanline_offset = 2;
9137 	} else {
9138 		crtc->scanline_offset = 1;
9139 	}
9140 }
9141 
9142 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
9143 {
9144 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9145 	struct intel_crtc_state *new_crtc_state;
9146 	struct intel_crtc *crtc;
9147 	int i;
9148 
9149 	if (!dev_priv->display.crtc_compute_clock)
9150 		return;
9151 
9152 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9153 		if (!intel_crtc_needs_modeset(new_crtc_state))
9154 			continue;
9155 
9156 		intel_release_shared_dplls(state, crtc);
9157 	}
9158 }
9159 
9160 /*
9161  * This implements the workaround described in the "notes" section of the mode
9162  * set sequence documentation. When going from no pipes or single pipe to
9163  * multiple pipes, and planes are enabled after the pipe, we need to wait at
9164  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
9165  */
9166 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
9167 {
9168 	struct intel_crtc_state *crtc_state;
9169 	struct intel_crtc *crtc;
9170 	struct intel_crtc_state *first_crtc_state = NULL;
9171 	struct intel_crtc_state *other_crtc_state = NULL;
9172 	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
9173 	int i;
9174 
9175 	/* look at all crtc's that are going to be enabled in during modeset */
9176 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9177 		if (!crtc_state->hw.active ||
9178 		    !intel_crtc_needs_modeset(crtc_state))
9179 			continue;
9180 
9181 		if (first_crtc_state) {
9182 			other_crtc_state = crtc_state;
9183 			break;
9184 		} else {
9185 			first_crtc_state = crtc_state;
9186 			first_pipe = crtc->pipe;
9187 		}
9188 	}
9189 
9190 	/* No workaround needed? */
9191 	if (!first_crtc_state)
9192 		return 0;
9193 
9194 	/* w/a possibly needed, check how many crtc's are already enabled. */
9195 	for_each_intel_crtc(state->base.dev, crtc) {
9196 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
9197 		if (IS_ERR(crtc_state))
9198 			return PTR_ERR(crtc_state);
9199 
9200 		crtc_state->hsw_workaround_pipe = INVALID_PIPE;
9201 
9202 		if (!crtc_state->hw.active ||
9203 		    intel_crtc_needs_modeset(crtc_state))
9204 			continue;
9205 
9206 		/* 2 or more enabled crtcs means no need for w/a */
9207 		if (enabled_pipe != INVALID_PIPE)
9208 			return 0;
9209 
9210 		enabled_pipe = crtc->pipe;
9211 	}
9212 
9213 	if (enabled_pipe != INVALID_PIPE)
9214 		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
9215 	else if (other_crtc_state)
9216 		other_crtc_state->hsw_workaround_pipe = first_pipe;
9217 
9218 	return 0;
9219 }
9220 
9221 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
9222 			   u8 active_pipes)
9223 {
9224 	const struct intel_crtc_state *crtc_state;
9225 	struct intel_crtc *crtc;
9226 	int i;
9227 
9228 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9229 		if (crtc_state->hw.active)
9230 			active_pipes |= BIT(crtc->pipe);
9231 		else
9232 			active_pipes &= ~BIT(crtc->pipe);
9233 	}
9234 
9235 	return active_pipes;
9236 }
9237 
9238 static int intel_modeset_checks(struct intel_atomic_state *state)
9239 {
9240 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9241 
9242 	state->modeset = true;
9243 
9244 	if (IS_HASWELL(dev_priv))
9245 		return hsw_mode_set_planes_workaround(state);
9246 
9247 	return 0;
9248 }
9249 
9250 /*
9251  * Handle calculation of various watermark data at the end of the atomic check
9252  * phase.  The code here should be run after the per-crtc and per-plane 'check'
9253  * handlers to ensure that all derived state has been updated.
9254  */
9255 static int calc_watermark_data(struct intel_atomic_state *state)
9256 {
9257 	struct drm_device *dev = state->base.dev;
9258 	struct drm_i915_private *dev_priv = to_i915(dev);
9259 
9260 	/* Is there platform-specific watermark information to calculate? */
9261 	if (dev_priv->display.compute_global_watermarks)
9262 		return dev_priv->display.compute_global_watermarks(state);
9263 
9264 	return 0;
9265 }
9266 
9267 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
9268 				     struct intel_crtc_state *new_crtc_state)
9269 {
9270 	if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
9271 		return;
9272 
9273 	new_crtc_state->uapi.mode_changed = false;
9274 	new_crtc_state->update_pipe = true;
9275 }
9276 
9277 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
9278 				    struct intel_crtc_state *new_crtc_state)
9279 {
9280 	/*
9281 	 * If we're not doing the full modeset we want to
9282 	 * keep the current M/N values as they may be
9283 	 * sufficiently different to the computed values
9284 	 * to cause problems.
9285 	 *
9286 	 * FIXME: should really copy more fuzzy state here
9287 	 */
9288 	new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
9289 	new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
9290 	new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
9291 	new_crtc_state->has_drrs = old_crtc_state->has_drrs;
9292 }
9293 
9294 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
9295 					  struct intel_crtc *crtc,
9296 					  u8 plane_ids_mask)
9297 {
9298 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9299 	struct intel_plane *plane;
9300 
9301 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
9302 		struct intel_plane_state *plane_state;
9303 
9304 		if ((plane_ids_mask & BIT(plane->id)) == 0)
9305 			continue;
9306 
9307 		plane_state = intel_atomic_get_plane_state(state, plane);
9308 		if (IS_ERR(plane_state))
9309 			return PTR_ERR(plane_state);
9310 	}
9311 
9312 	return 0;
9313 }
9314 
9315 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
9316 				     struct intel_crtc *crtc)
9317 {
9318 	const struct intel_crtc_state *old_crtc_state =
9319 		intel_atomic_get_old_crtc_state(state, crtc);
9320 	const struct intel_crtc_state *new_crtc_state =
9321 		intel_atomic_get_new_crtc_state(state, crtc);
9322 
9323 	return intel_crtc_add_planes_to_state(state, crtc,
9324 					      old_crtc_state->enabled_planes |
9325 					      new_crtc_state->enabled_planes);
9326 }
9327 
9328 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
9329 {
9330 	/* See {hsw,vlv,ivb}_plane_ratio() */
9331 	return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
9332 		IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9333 		IS_IVYBRIDGE(dev_priv);
9334 }
9335 
9336 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
9337 					   struct intel_crtc *crtc,
9338 					   struct intel_crtc *other)
9339 {
9340 	const struct intel_plane_state *plane_state;
9341 	struct intel_plane *plane;
9342 	u8 plane_ids = 0;
9343 	int i;
9344 
9345 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9346 		if (plane->pipe == crtc->pipe)
9347 			plane_ids |= BIT(plane->id);
9348 	}
9349 
9350 	return intel_crtc_add_planes_to_state(state, other, plane_ids);
9351 }
9352 
9353 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
9354 {
9355 	const struct intel_crtc_state *crtc_state;
9356 	struct intel_crtc *crtc;
9357 	int i;
9358 
9359 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9360 		int ret;
9361 
9362 		if (!crtc_state->bigjoiner)
9363 			continue;
9364 
9365 		ret = intel_crtc_add_bigjoiner_planes(state, crtc,
9366 						      crtc_state->bigjoiner_linked_crtc);
9367 		if (ret)
9368 			return ret;
9369 	}
9370 
9371 	return 0;
9372 }
9373 
9374 static int intel_atomic_check_planes(struct intel_atomic_state *state)
9375 {
9376 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9377 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9378 	struct intel_plane_state *plane_state;
9379 	struct intel_plane *plane;
9380 	struct intel_crtc *crtc;
9381 	int i, ret;
9382 
9383 	ret = icl_add_linked_planes(state);
9384 	if (ret)
9385 		return ret;
9386 
9387 	ret = intel_bigjoiner_add_affected_planes(state);
9388 	if (ret)
9389 		return ret;
9390 
9391 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9392 		ret = intel_plane_atomic_check(state, plane);
9393 		if (ret) {
9394 			drm_dbg_atomic(&dev_priv->drm,
9395 				       "[PLANE:%d:%s] atomic driver check failed\n",
9396 				       plane->base.base.id, plane->base.name);
9397 			return ret;
9398 		}
9399 	}
9400 
9401 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9402 					    new_crtc_state, i) {
9403 		u8 old_active_planes, new_active_planes;
9404 
9405 		ret = icl_check_nv12_planes(new_crtc_state);
9406 		if (ret)
9407 			return ret;
9408 
9409 		/*
9410 		 * On some platforms the number of active planes affects
9411 		 * the planes' minimum cdclk calculation. Add such planes
9412 		 * to the state before we compute the minimum cdclk.
9413 		 */
9414 		if (!active_planes_affects_min_cdclk(dev_priv))
9415 			continue;
9416 
9417 		old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9418 		new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9419 
9420 		if (hweight8(old_active_planes) == hweight8(new_active_planes))
9421 			continue;
9422 
9423 		ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
9424 		if (ret)
9425 			return ret;
9426 	}
9427 
9428 	return 0;
9429 }
9430 
9431 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
9432 				    bool *need_cdclk_calc)
9433 {
9434 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9435 	const struct intel_cdclk_state *old_cdclk_state;
9436 	const struct intel_cdclk_state *new_cdclk_state;
9437 	struct intel_plane_state *plane_state;
9438 	struct intel_bw_state *new_bw_state;
9439 	struct intel_plane *plane;
9440 	int min_cdclk = 0;
9441 	enum pipe pipe;
9442 	int ret;
9443 	int i;
9444 	/*
9445 	 * active_planes bitmask has been updated, and potentially
9446 	 * affected planes are part of the state. We can now
9447 	 * compute the minimum cdclk for each plane.
9448 	 */
9449 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9450 		ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
9451 		if (ret)
9452 			return ret;
9453 	}
9454 
9455 	old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
9456 	new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
9457 
9458 	if (new_cdclk_state &&
9459 	    old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
9460 		*need_cdclk_calc = true;
9461 
9462 	ret = dev_priv->display.bw_calc_min_cdclk(state);
9463 	if (ret)
9464 		return ret;
9465 
9466 	new_bw_state = intel_atomic_get_new_bw_state(state);
9467 
9468 	if (!new_cdclk_state || !new_bw_state)
9469 		return 0;
9470 
9471 	for_each_pipe(dev_priv, pipe) {
9472 		min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
9473 
9474 		/*
9475 		 * Currently do this change only if we need to increase
9476 		 */
9477 		if (new_bw_state->min_cdclk > min_cdclk)
9478 			*need_cdclk_calc = true;
9479 	}
9480 
9481 	return 0;
9482 }
9483 
9484 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
9485 {
9486 	struct intel_crtc_state *crtc_state;
9487 	struct intel_crtc *crtc;
9488 	int i;
9489 
9490 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9491 		struct drm_i915_private *i915 = to_i915(crtc->base.dev);
9492 		int ret;
9493 
9494 		ret = intel_crtc_atomic_check(state, crtc);
9495 		if (ret) {
9496 			drm_dbg_atomic(&i915->drm,
9497 				       "[CRTC:%d:%s] atomic driver check failed\n",
9498 				       crtc->base.base.id, crtc->base.name);
9499 			return ret;
9500 		}
9501 	}
9502 
9503 	return 0;
9504 }
9505 
9506 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
9507 					       u8 transcoders)
9508 {
9509 	const struct intel_crtc_state *new_crtc_state;
9510 	struct intel_crtc *crtc;
9511 	int i;
9512 
9513 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9514 		if (new_crtc_state->hw.enable &&
9515 		    transcoders & BIT(new_crtc_state->cpu_transcoder) &&
9516 		    intel_crtc_needs_modeset(new_crtc_state))
9517 			return true;
9518 	}
9519 
9520 	return false;
9521 }
9522 
9523 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
9524 					struct intel_crtc *crtc,
9525 					struct intel_crtc_state *old_crtc_state,
9526 					struct intel_crtc_state *new_crtc_state)
9527 {
9528 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9529 	struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
9530 	struct intel_crtc *slave, *master;
9531 
9532 	/* slave being enabled, is master is still claiming this crtc? */
9533 	if (old_crtc_state->bigjoiner_slave) {
9534 		slave = crtc;
9535 		master = old_crtc_state->bigjoiner_linked_crtc;
9536 		master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
9537 		if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
9538 			goto claimed;
9539 	}
9540 
9541 	if (!new_crtc_state->bigjoiner)
9542 		return 0;
9543 
9544 	if (1 + crtc->pipe >= INTEL_NUM_PIPES(dev_priv)) {
9545 		DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
9546 			      "CRTC + 1 to be used, doesn't exist\n",
9547 			      crtc->base.base.id, crtc->base.name);
9548 		return -EINVAL;
9549 	}
9550 
9551 	slave = new_crtc_state->bigjoiner_linked_crtc =
9552 		intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1);
9553 	slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
9554 	master = crtc;
9555 	if (IS_ERR(slave_crtc_state))
9556 		return PTR_ERR(slave_crtc_state);
9557 
9558 	/* master being enabled, slave was already configured? */
9559 	if (slave_crtc_state->uapi.enable)
9560 		goto claimed;
9561 
9562 	DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
9563 		      slave->base.base.id, slave->base.name);
9564 
9565 	return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
9566 
9567 claimed:
9568 	DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
9569 		      "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
9570 		      slave->base.base.id, slave->base.name,
9571 		      master->base.base.id, master->base.name);
9572 	return -EINVAL;
9573 }
9574 
9575 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
9576 				 struct intel_crtc_state *master_crtc_state)
9577 {
9578 	struct intel_crtc_state *slave_crtc_state =
9579 		intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
9580 
9581 	slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
9582 	slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
9583 	slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
9584 	intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
9585 }
9586 
9587 /**
9588  * DOC: asynchronous flip implementation
9589  *
9590  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
9591  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
9592  * Correspondingly, support is currently added for primary plane only.
9593  *
9594  * Async flip can only change the plane surface address, so anything else
9595  * changing is rejected from the intel_atomic_check_async() function.
9596  * Once this check is cleared, flip done interrupt is enabled using
9597  * the intel_crtc_enable_flip_done() function.
9598  *
9599  * As soon as the surface address register is written, flip done interrupt is
9600  * generated and the requested events are sent to the usersapce in the interrupt
9601  * handler itself. The timestamp and sequence sent during the flip done event
9602  * correspond to the last vblank and have no relation to the actual time when
9603  * the flip done event was sent.
9604  */
9605 static int intel_atomic_check_async(struct intel_atomic_state *state)
9606 {
9607 	struct drm_i915_private *i915 = to_i915(state->base.dev);
9608 	const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9609 	const struct intel_plane_state *new_plane_state, *old_plane_state;
9610 	struct intel_crtc *crtc;
9611 	struct intel_plane *plane;
9612 	int i;
9613 
9614 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9615 					    new_crtc_state, i) {
9616 		if (intel_crtc_needs_modeset(new_crtc_state)) {
9617 			drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
9618 			return -EINVAL;
9619 		}
9620 
9621 		if (!new_crtc_state->hw.active) {
9622 			drm_dbg_kms(&i915->drm, "CRTC inactive\n");
9623 			return -EINVAL;
9624 		}
9625 		if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
9626 			drm_dbg_kms(&i915->drm,
9627 				    "Active planes cannot be changed during async flip\n");
9628 			return -EINVAL;
9629 		}
9630 	}
9631 
9632 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
9633 					     new_plane_state, i) {
9634 		/*
9635 		 * TODO: Async flip is only supported through the page flip IOCTL
9636 		 * as of now. So support currently added for primary plane only.
9637 		 * Support for other planes on platforms on which supports
9638 		 * this(vlv/chv and icl+) should be added when async flip is
9639 		 * enabled in the atomic IOCTL path.
9640 		 */
9641 		if (!plane->async_flip)
9642 			return -EINVAL;
9643 
9644 		/*
9645 		 * FIXME: This check is kept generic for all platforms.
9646 		 * Need to verify this for all gen9 and gen10 platforms to enable
9647 		 * this selectively if required.
9648 		 */
9649 		switch (new_plane_state->hw.fb->modifier) {
9650 		case I915_FORMAT_MOD_X_TILED:
9651 		case I915_FORMAT_MOD_Y_TILED:
9652 		case I915_FORMAT_MOD_Yf_TILED:
9653 			break;
9654 		default:
9655 			drm_dbg_kms(&i915->drm,
9656 				    "Linear memory/CCS does not support async flips\n");
9657 			return -EINVAL;
9658 		}
9659 
9660 		if (old_plane_state->view.color_plane[0].stride !=
9661 		    new_plane_state->view.color_plane[0].stride) {
9662 			drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
9663 			return -EINVAL;
9664 		}
9665 
9666 		if (old_plane_state->hw.fb->modifier !=
9667 		    new_plane_state->hw.fb->modifier) {
9668 			drm_dbg_kms(&i915->drm,
9669 				    "Framebuffer modifiers cannot be changed in async flip\n");
9670 			return -EINVAL;
9671 		}
9672 
9673 		if (old_plane_state->hw.fb->format !=
9674 		    new_plane_state->hw.fb->format) {
9675 			drm_dbg_kms(&i915->drm,
9676 				    "Framebuffer format cannot be changed in async flip\n");
9677 			return -EINVAL;
9678 		}
9679 
9680 		if (old_plane_state->hw.rotation !=
9681 		    new_plane_state->hw.rotation) {
9682 			drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
9683 			return -EINVAL;
9684 		}
9685 
9686 		if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
9687 		    !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
9688 			drm_dbg_kms(&i915->drm,
9689 				    "Plane size/co-ordinates cannot be changed in async flip\n");
9690 			return -EINVAL;
9691 		}
9692 
9693 		if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
9694 			drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
9695 			return -EINVAL;
9696 		}
9697 
9698 		if (old_plane_state->hw.pixel_blend_mode !=
9699 		    new_plane_state->hw.pixel_blend_mode) {
9700 			drm_dbg_kms(&i915->drm,
9701 				    "Pixel blend mode cannot be changed in async flip\n");
9702 			return -EINVAL;
9703 		}
9704 
9705 		if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
9706 			drm_dbg_kms(&i915->drm,
9707 				    "Color encoding cannot be changed in async flip\n");
9708 			return -EINVAL;
9709 		}
9710 
9711 		if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
9712 			drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
9713 			return -EINVAL;
9714 		}
9715 	}
9716 
9717 	return 0;
9718 }
9719 
9720 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
9721 {
9722 	struct intel_crtc_state *crtc_state;
9723 	struct intel_crtc *crtc;
9724 	int i;
9725 
9726 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9727 		struct intel_crtc_state *linked_crtc_state;
9728 		struct intel_crtc *linked_crtc;
9729 		int ret;
9730 
9731 		if (!crtc_state->bigjoiner)
9732 			continue;
9733 
9734 		linked_crtc = crtc_state->bigjoiner_linked_crtc;
9735 		linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
9736 		if (IS_ERR(linked_crtc_state))
9737 			return PTR_ERR(linked_crtc_state);
9738 
9739 		if (!intel_crtc_needs_modeset(crtc_state))
9740 			continue;
9741 
9742 		linked_crtc_state->uapi.mode_changed = true;
9743 
9744 		ret = drm_atomic_add_affected_connectors(&state->base,
9745 							 &linked_crtc->base);
9746 		if (ret)
9747 			return ret;
9748 
9749 		ret = intel_atomic_add_affected_planes(state, linked_crtc);
9750 		if (ret)
9751 			return ret;
9752 	}
9753 
9754 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9755 		/* Kill old bigjoiner link, we may re-establish afterwards */
9756 		if (intel_crtc_needs_modeset(crtc_state) &&
9757 		    crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
9758 			kill_bigjoiner_slave(state, crtc_state);
9759 	}
9760 
9761 	return 0;
9762 }
9763 
9764 /**
9765  * intel_atomic_check - validate state object
9766  * @dev: drm device
9767  * @_state: state to validate
9768  */
9769 static int intel_atomic_check(struct drm_device *dev,
9770 			      struct drm_atomic_state *_state)
9771 {
9772 	struct drm_i915_private *dev_priv = to_i915(dev);
9773 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
9774 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9775 	struct intel_crtc *crtc;
9776 	int ret, i;
9777 	bool any_ms = false;
9778 
9779 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9780 					    new_crtc_state, i) {
9781 		if (new_crtc_state->inherited != old_crtc_state->inherited)
9782 			new_crtc_state->uapi.mode_changed = true;
9783 	}
9784 
9785 	intel_vrr_check_modeset(state);
9786 
9787 	ret = drm_atomic_helper_check_modeset(dev, &state->base);
9788 	if (ret)
9789 		goto fail;
9790 
9791 	ret = intel_bigjoiner_add_affected_crtcs(state);
9792 	if (ret)
9793 		goto fail;
9794 
9795 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9796 					    new_crtc_state, i) {
9797 		if (!intel_crtc_needs_modeset(new_crtc_state)) {
9798 			/* Light copy */
9799 			intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
9800 
9801 			continue;
9802 		}
9803 
9804 		if (!new_crtc_state->uapi.enable) {
9805 			if (!new_crtc_state->bigjoiner_slave) {
9806 				intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
9807 				any_ms = true;
9808 			}
9809 			continue;
9810 		}
9811 
9812 		ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
9813 		if (ret)
9814 			goto fail;
9815 
9816 		ret = intel_modeset_pipe_config(state, new_crtc_state);
9817 		if (ret)
9818 			goto fail;
9819 
9820 		ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
9821 						   new_crtc_state);
9822 		if (ret)
9823 			goto fail;
9824 	}
9825 
9826 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9827 					    new_crtc_state, i) {
9828 		if (!intel_crtc_needs_modeset(new_crtc_state))
9829 			continue;
9830 
9831 		ret = intel_modeset_pipe_config_late(new_crtc_state);
9832 		if (ret)
9833 			goto fail;
9834 
9835 		intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
9836 	}
9837 
9838 	/**
9839 	 * Check if fastset is allowed by external dependencies like other
9840 	 * pipes and transcoders.
9841 	 *
9842 	 * Right now it only forces a fullmodeset when the MST master
9843 	 * transcoder did not changed but the pipe of the master transcoder
9844 	 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
9845 	 * in case of port synced crtcs, if one of the synced crtcs
9846 	 * needs a full modeset, all other synced crtcs should be
9847 	 * forced a full modeset.
9848 	 */
9849 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9850 		if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
9851 			continue;
9852 
9853 		if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
9854 			enum transcoder master = new_crtc_state->mst_master_transcoder;
9855 
9856 			if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
9857 				new_crtc_state->uapi.mode_changed = true;
9858 				new_crtc_state->update_pipe = false;
9859 			}
9860 		}
9861 
9862 		if (is_trans_port_sync_mode(new_crtc_state)) {
9863 			u8 trans = new_crtc_state->sync_mode_slaves_mask;
9864 
9865 			if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
9866 				trans |= BIT(new_crtc_state->master_transcoder);
9867 
9868 			if (intel_cpu_transcoders_need_modeset(state, trans)) {
9869 				new_crtc_state->uapi.mode_changed = true;
9870 				new_crtc_state->update_pipe = false;
9871 			}
9872 		}
9873 
9874 		if (new_crtc_state->bigjoiner) {
9875 			struct intel_crtc_state *linked_crtc_state =
9876 				intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
9877 
9878 			if (intel_crtc_needs_modeset(linked_crtc_state)) {
9879 				new_crtc_state->uapi.mode_changed = true;
9880 				new_crtc_state->update_pipe = false;
9881 			}
9882 		}
9883 	}
9884 
9885 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9886 					    new_crtc_state, i) {
9887 		if (intel_crtc_needs_modeset(new_crtc_state)) {
9888 			any_ms = true;
9889 			continue;
9890 		}
9891 
9892 		if (!new_crtc_state->update_pipe)
9893 			continue;
9894 
9895 		intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
9896 	}
9897 
9898 	if (any_ms && !check_digital_port_conflicts(state)) {
9899 		drm_dbg_kms(&dev_priv->drm,
9900 			    "rejecting conflicting digital port configuration\n");
9901 		ret = -EINVAL;
9902 		goto fail;
9903 	}
9904 
9905 	ret = drm_dp_mst_atomic_check(&state->base);
9906 	if (ret)
9907 		goto fail;
9908 
9909 	ret = intel_atomic_check_planes(state);
9910 	if (ret)
9911 		goto fail;
9912 
9913 	intel_fbc_choose_crtc(dev_priv, state);
9914 	ret = calc_watermark_data(state);
9915 	if (ret)
9916 		goto fail;
9917 
9918 	ret = intel_bw_atomic_check(state);
9919 	if (ret)
9920 		goto fail;
9921 
9922 	ret = intel_atomic_check_cdclk(state, &any_ms);
9923 	if (ret)
9924 		goto fail;
9925 
9926 	if (any_ms) {
9927 		ret = intel_modeset_checks(state);
9928 		if (ret)
9929 			goto fail;
9930 
9931 		ret = intel_modeset_calc_cdclk(state);
9932 		if (ret)
9933 			return ret;
9934 
9935 		intel_modeset_clear_plls(state);
9936 	}
9937 
9938 	ret = intel_atomic_check_crtcs(state);
9939 	if (ret)
9940 		goto fail;
9941 
9942 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9943 					    new_crtc_state, i) {
9944 		if (new_crtc_state->uapi.async_flip) {
9945 			ret = intel_atomic_check_async(state);
9946 			if (ret)
9947 				goto fail;
9948 		}
9949 
9950 		if (!intel_crtc_needs_modeset(new_crtc_state) &&
9951 		    !new_crtc_state->update_pipe)
9952 			continue;
9953 
9954 		intel_dump_pipe_config(new_crtc_state, state,
9955 				       intel_crtc_needs_modeset(new_crtc_state) ?
9956 				       "[modeset]" : "[fastset]");
9957 	}
9958 
9959 	return 0;
9960 
9961  fail:
9962 	if (ret == -EDEADLK)
9963 		return ret;
9964 
9965 	/*
9966 	 * FIXME would probably be nice to know which crtc specifically
9967 	 * caused the failure, in cases where we can pinpoint it.
9968 	 */
9969 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9970 					    new_crtc_state, i)
9971 		intel_dump_pipe_config(new_crtc_state, state, "[failed]");
9972 
9973 	return ret;
9974 }
9975 
9976 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
9977 {
9978 	struct intel_crtc_state *crtc_state;
9979 	struct intel_crtc *crtc;
9980 	int i, ret;
9981 
9982 	ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
9983 	if (ret < 0)
9984 		return ret;
9985 
9986 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9987 		bool mode_changed = intel_crtc_needs_modeset(crtc_state);
9988 
9989 		if (mode_changed || crtc_state->update_pipe ||
9990 		    crtc_state->uapi.color_mgmt_changed) {
9991 			intel_dsb_prepare(crtc_state);
9992 		}
9993 	}
9994 
9995 	return 0;
9996 }
9997 
9998 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
9999 				  struct intel_crtc_state *crtc_state)
10000 {
10001 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10002 
10003 	if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
10004 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
10005 
10006 	if (crtc_state->has_pch_encoder) {
10007 		enum pipe pch_transcoder =
10008 			intel_crtc_pch_transcoder(crtc);
10009 
10010 		intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
10011 	}
10012 }
10013 
10014 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
10015 			       const struct intel_crtc_state *new_crtc_state)
10016 {
10017 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
10018 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10019 
10020 	/*
10021 	 * Update pipe size and adjust fitter if needed: the reason for this is
10022 	 * that in compute_mode_changes we check the native mode (not the pfit
10023 	 * mode) to see if we can flip rather than do a full mode set. In the
10024 	 * fastboot case, we'll flip, but if we don't update the pipesrc and
10025 	 * pfit state, we'll end up with a big fb scanned out into the wrong
10026 	 * sized surface.
10027 	 */
10028 	intel_set_pipe_src_size(new_crtc_state);
10029 
10030 	/* on skylake this is done by detaching scalers */
10031 	if (DISPLAY_VER(dev_priv) >= 9) {
10032 		if (new_crtc_state->pch_pfit.enabled)
10033 			skl_pfit_enable(new_crtc_state);
10034 	} else if (HAS_PCH_SPLIT(dev_priv)) {
10035 		if (new_crtc_state->pch_pfit.enabled)
10036 			ilk_pfit_enable(new_crtc_state);
10037 		else if (old_crtc_state->pch_pfit.enabled)
10038 			ilk_pfit_disable(old_crtc_state);
10039 	}
10040 
10041 	/*
10042 	 * The register is supposedly single buffered so perhaps
10043 	 * not 100% correct to do this here. But SKL+ calculate
10044 	 * this based on the adjust pixel rate so pfit changes do
10045 	 * affect it and so it must be updated for fastsets.
10046 	 * HSW/BDW only really need this here for fastboot, after
10047 	 * that the value should not change without a full modeset.
10048 	 */
10049 	if (DISPLAY_VER(dev_priv) >= 9 ||
10050 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
10051 		hsw_set_linetime_wm(new_crtc_state);
10052 
10053 	if (DISPLAY_VER(dev_priv) >= 11)
10054 		icl_set_pipe_chicken(crtc);
10055 }
10056 
10057 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
10058 				   struct intel_crtc *crtc)
10059 {
10060 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10061 	const struct intel_crtc_state *old_crtc_state =
10062 		intel_atomic_get_old_crtc_state(state, crtc);
10063 	const struct intel_crtc_state *new_crtc_state =
10064 		intel_atomic_get_new_crtc_state(state, crtc);
10065 	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10066 
10067 	/*
10068 	 * During modesets pipe configuration was programmed as the
10069 	 * CRTC was enabled.
10070 	 */
10071 	if (!modeset) {
10072 		if (new_crtc_state->uapi.color_mgmt_changed ||
10073 		    new_crtc_state->update_pipe)
10074 			intel_color_commit(new_crtc_state);
10075 
10076 		if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
10077 			bdw_set_pipemisc(new_crtc_state);
10078 
10079 		if (new_crtc_state->update_pipe)
10080 			intel_pipe_fastset(old_crtc_state, new_crtc_state);
10081 
10082 		intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
10083 	}
10084 
10085 	if (dev_priv->display.atomic_update_watermarks)
10086 		dev_priv->display.atomic_update_watermarks(state, crtc);
10087 }
10088 
10089 static void commit_pipe_post_planes(struct intel_atomic_state *state,
10090 				    struct intel_crtc *crtc)
10091 {
10092 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10093 	const struct intel_crtc_state *new_crtc_state =
10094 		intel_atomic_get_new_crtc_state(state, crtc);
10095 
10096 	/*
10097 	 * Disable the scaler(s) after the plane(s) so that we don't
10098 	 * get a catastrophic underrun even if the two operations
10099 	 * end up happening in two different frames.
10100 	 */
10101 	if (DISPLAY_VER(dev_priv) >= 9 &&
10102 	    !intel_crtc_needs_modeset(new_crtc_state))
10103 		skl_detach_scalers(new_crtc_state);
10104 }
10105 
10106 static void intel_enable_crtc(struct intel_atomic_state *state,
10107 			      struct intel_crtc *crtc)
10108 {
10109 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10110 	const struct intel_crtc_state *new_crtc_state =
10111 		intel_atomic_get_new_crtc_state(state, crtc);
10112 
10113 	if (!intel_crtc_needs_modeset(new_crtc_state))
10114 		return;
10115 
10116 	intel_crtc_update_active_timings(new_crtc_state);
10117 
10118 	dev_priv->display.crtc_enable(state, crtc);
10119 
10120 	if (new_crtc_state->bigjoiner_slave)
10121 		return;
10122 
10123 	/* vblanks work again, re-enable pipe CRC. */
10124 	intel_crtc_enable_pipe_crc(crtc);
10125 }
10126 
10127 static void intel_update_crtc(struct intel_atomic_state *state,
10128 			      struct intel_crtc *crtc)
10129 {
10130 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10131 	const struct intel_crtc_state *old_crtc_state =
10132 		intel_atomic_get_old_crtc_state(state, crtc);
10133 	struct intel_crtc_state *new_crtc_state =
10134 		intel_atomic_get_new_crtc_state(state, crtc);
10135 	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10136 
10137 	if (!modeset) {
10138 		if (new_crtc_state->preload_luts &&
10139 		    (new_crtc_state->uapi.color_mgmt_changed ||
10140 		     new_crtc_state->update_pipe))
10141 			intel_color_load_luts(new_crtc_state);
10142 
10143 		intel_pre_plane_update(state, crtc);
10144 
10145 		if (new_crtc_state->update_pipe)
10146 			intel_encoders_update_pipe(state, crtc);
10147 	}
10148 
10149 	if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
10150 		intel_fbc_disable(crtc);
10151 	else
10152 		intel_fbc_enable(state, crtc);
10153 
10154 	/* Perform vblank evasion around commit operation */
10155 	intel_pipe_update_start(new_crtc_state);
10156 
10157 	commit_pipe_pre_planes(state, crtc);
10158 
10159 	if (DISPLAY_VER(dev_priv) >= 9)
10160 		skl_update_planes_on_crtc(state, crtc);
10161 	else
10162 		i9xx_update_planes_on_crtc(state, crtc);
10163 
10164 	commit_pipe_post_planes(state, crtc);
10165 
10166 	intel_pipe_update_end(new_crtc_state);
10167 
10168 	/*
10169 	 * We usually enable FIFO underrun interrupts as part of the
10170 	 * CRTC enable sequence during modesets.  But when we inherit a
10171 	 * valid pipe configuration from the BIOS we need to take care
10172 	 * of enabling them on the CRTC's first fastset.
10173 	 */
10174 	if (new_crtc_state->update_pipe && !modeset &&
10175 	    old_crtc_state->inherited)
10176 		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
10177 }
10178 
10179 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
10180 					  struct intel_crtc_state *old_crtc_state,
10181 					  struct intel_crtc_state *new_crtc_state,
10182 					  struct intel_crtc *crtc)
10183 {
10184 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10185 
10186 	drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
10187 
10188 	intel_crtc_disable_planes(state, crtc);
10189 
10190 	/*
10191 	 * We still need special handling for disabling bigjoiner master
10192 	 * and slaves since for slave we do not have encoder or plls
10193 	 * so we dont need to disable those.
10194 	 */
10195 	if (old_crtc_state->bigjoiner) {
10196 		intel_crtc_disable_planes(state,
10197 					  old_crtc_state->bigjoiner_linked_crtc);
10198 		old_crtc_state->bigjoiner_linked_crtc->active = false;
10199 	}
10200 
10201 	/*
10202 	 * We need to disable pipe CRC before disabling the pipe,
10203 	 * or we race against vblank off.
10204 	 */
10205 	intel_crtc_disable_pipe_crc(crtc);
10206 
10207 	dev_priv->display.crtc_disable(state, crtc);
10208 	crtc->active = false;
10209 	intel_fbc_disable(crtc);
10210 	intel_disable_shared_dpll(old_crtc_state);
10211 
10212 	/* FIXME unify this for all platforms */
10213 	if (!new_crtc_state->hw.active &&
10214 	    !HAS_GMCH(dev_priv) &&
10215 	    dev_priv->display.initial_watermarks)
10216 		dev_priv->display.initial_watermarks(state, crtc);
10217 }
10218 
10219 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
10220 {
10221 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10222 	struct intel_crtc *crtc;
10223 	u32 handled = 0;
10224 	int i;
10225 
10226 	/* Only disable port sync and MST slaves */
10227 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10228 					    new_crtc_state, i) {
10229 		if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
10230 			continue;
10231 
10232 		if (!old_crtc_state->hw.active)
10233 			continue;
10234 
10235 		/* In case of Transcoder port Sync master slave CRTCs can be
10236 		 * assigned in any order and we need to make sure that
10237 		 * slave CRTCs are disabled first and then master CRTC since
10238 		 * Slave vblanks are masked till Master Vblanks.
10239 		 */
10240 		if (!is_trans_port_sync_slave(old_crtc_state) &&
10241 		    !intel_dp_mst_is_slave_trans(old_crtc_state))
10242 			continue;
10243 
10244 		intel_pre_plane_update(state, crtc);
10245 		intel_old_crtc_state_disables(state, old_crtc_state,
10246 					      new_crtc_state, crtc);
10247 		handled |= BIT(crtc->pipe);
10248 	}
10249 
10250 	/* Disable everything else left on */
10251 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10252 					    new_crtc_state, i) {
10253 		if (!intel_crtc_needs_modeset(new_crtc_state) ||
10254 		    (handled & BIT(crtc->pipe)) ||
10255 		    old_crtc_state->bigjoiner_slave)
10256 			continue;
10257 
10258 		intel_pre_plane_update(state, crtc);
10259 		if (old_crtc_state->bigjoiner) {
10260 			struct intel_crtc *slave =
10261 				old_crtc_state->bigjoiner_linked_crtc;
10262 
10263 			intel_pre_plane_update(state, slave);
10264 		}
10265 
10266 		if (old_crtc_state->hw.active)
10267 			intel_old_crtc_state_disables(state, old_crtc_state,
10268 						      new_crtc_state, crtc);
10269 	}
10270 }
10271 
10272 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
10273 {
10274 	struct intel_crtc_state *new_crtc_state;
10275 	struct intel_crtc *crtc;
10276 	int i;
10277 
10278 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10279 		if (!new_crtc_state->hw.active)
10280 			continue;
10281 
10282 		intel_enable_crtc(state, crtc);
10283 		intel_update_crtc(state, crtc);
10284 	}
10285 }
10286 
10287 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
10288 {
10289 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10290 	struct intel_crtc *crtc;
10291 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10292 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
10293 	u8 update_pipes = 0, modeset_pipes = 0;
10294 	int i;
10295 
10296 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10297 		enum pipe pipe = crtc->pipe;
10298 
10299 		if (!new_crtc_state->hw.active)
10300 			continue;
10301 
10302 		/* ignore allocations for crtc's that have been turned off. */
10303 		if (!intel_crtc_needs_modeset(new_crtc_state)) {
10304 			entries[pipe] = old_crtc_state->wm.skl.ddb;
10305 			update_pipes |= BIT(pipe);
10306 		} else {
10307 			modeset_pipes |= BIT(pipe);
10308 		}
10309 	}
10310 
10311 	/*
10312 	 * Whenever the number of active pipes changes, we need to make sure we
10313 	 * update the pipes in the right order so that their ddb allocations
10314 	 * never overlap with each other between CRTC updates. Otherwise we'll
10315 	 * cause pipe underruns and other bad stuff.
10316 	 *
10317 	 * So first lets enable all pipes that do not need a fullmodeset as
10318 	 * those don't have any external dependency.
10319 	 */
10320 	while (update_pipes) {
10321 		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10322 						    new_crtc_state, i) {
10323 			enum pipe pipe = crtc->pipe;
10324 
10325 			if ((update_pipes & BIT(pipe)) == 0)
10326 				continue;
10327 
10328 			if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10329 							entries, I915_MAX_PIPES, pipe))
10330 				continue;
10331 
10332 			entries[pipe] = new_crtc_state->wm.skl.ddb;
10333 			update_pipes &= ~BIT(pipe);
10334 
10335 			intel_update_crtc(state, crtc);
10336 
10337 			/*
10338 			 * If this is an already active pipe, it's DDB changed,
10339 			 * and this isn't the last pipe that needs updating
10340 			 * then we need to wait for a vblank to pass for the
10341 			 * new ddb allocation to take effect.
10342 			 */
10343 			if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
10344 						 &old_crtc_state->wm.skl.ddb) &&
10345 			    (update_pipes | modeset_pipes))
10346 				intel_wait_for_vblank(dev_priv, pipe);
10347 		}
10348 	}
10349 
10350 	update_pipes = modeset_pipes;
10351 
10352 	/*
10353 	 * Enable all pipes that needs a modeset and do not depends on other
10354 	 * pipes
10355 	 */
10356 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10357 		enum pipe pipe = crtc->pipe;
10358 
10359 		if ((modeset_pipes & BIT(pipe)) == 0)
10360 			continue;
10361 
10362 		if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
10363 		    is_trans_port_sync_master(new_crtc_state) ||
10364 		    (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
10365 			continue;
10366 
10367 		modeset_pipes &= ~BIT(pipe);
10368 
10369 		intel_enable_crtc(state, crtc);
10370 	}
10371 
10372 	/*
10373 	 * Then we enable all remaining pipes that depend on other
10374 	 * pipes: MST slaves and port sync masters, big joiner master
10375 	 */
10376 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10377 		enum pipe pipe = crtc->pipe;
10378 
10379 		if ((modeset_pipes & BIT(pipe)) == 0)
10380 			continue;
10381 
10382 		modeset_pipes &= ~BIT(pipe);
10383 
10384 		intel_enable_crtc(state, crtc);
10385 	}
10386 
10387 	/*
10388 	 * Finally we do the plane updates/etc. for all pipes that got enabled.
10389 	 */
10390 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10391 		enum pipe pipe = crtc->pipe;
10392 
10393 		if ((update_pipes & BIT(pipe)) == 0)
10394 			continue;
10395 
10396 		drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10397 									entries, I915_MAX_PIPES, pipe));
10398 
10399 		entries[pipe] = new_crtc_state->wm.skl.ddb;
10400 		update_pipes &= ~BIT(pipe);
10401 
10402 		intel_update_crtc(state, crtc);
10403 	}
10404 
10405 	drm_WARN_ON(&dev_priv->drm, modeset_pipes);
10406 	drm_WARN_ON(&dev_priv->drm, update_pipes);
10407 }
10408 
10409 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
10410 {
10411 	struct intel_atomic_state *state, *next;
10412 	struct llist_node *freed;
10413 
10414 	freed = llist_del_all(&dev_priv->atomic_helper.free_list);
10415 	llist_for_each_entry_safe(state, next, freed, freed)
10416 		drm_atomic_state_put(&state->base);
10417 }
10418 
10419 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
10420 {
10421 	struct drm_i915_private *dev_priv =
10422 		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
10423 
10424 	intel_atomic_helper_free_state(dev_priv);
10425 }
10426 
10427 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
10428 {
10429 	struct wait_queue_entry wait_fence, wait_reset;
10430 	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
10431 
10432 	init_wait_entry(&wait_fence, 0);
10433 	init_wait_entry(&wait_reset, 0);
10434 	for (;;) {
10435 		prepare_to_wait(&intel_state->commit_ready.wait,
10436 				&wait_fence, TASK_UNINTERRUPTIBLE);
10437 		prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10438 					      I915_RESET_MODESET),
10439 				&wait_reset, TASK_UNINTERRUPTIBLE);
10440 
10441 
10442 		if (i915_sw_fence_done(&intel_state->commit_ready) ||
10443 		    test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
10444 			break;
10445 
10446 		schedule();
10447 	}
10448 	finish_wait(&intel_state->commit_ready.wait, &wait_fence);
10449 	finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10450 				  I915_RESET_MODESET),
10451 		    &wait_reset);
10452 }
10453 
10454 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
10455 {
10456 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10457 	struct intel_crtc *crtc;
10458 	int i;
10459 
10460 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10461 					    new_crtc_state, i)
10462 		intel_dsb_cleanup(old_crtc_state);
10463 }
10464 
10465 static void intel_atomic_cleanup_work(struct work_struct *work)
10466 {
10467 	struct intel_atomic_state *state =
10468 		container_of(work, struct intel_atomic_state, base.commit_work);
10469 	struct drm_i915_private *i915 = to_i915(state->base.dev);
10470 
10471 	intel_cleanup_dsbs(state);
10472 	drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
10473 	drm_atomic_helper_commit_cleanup_done(&state->base);
10474 	drm_atomic_state_put(&state->base);
10475 
10476 	intel_atomic_helper_free_state(i915);
10477 }
10478 
10479 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
10480 {
10481 	struct drm_i915_private *i915 = to_i915(state->base.dev);
10482 	struct intel_plane *plane;
10483 	struct intel_plane_state *plane_state;
10484 	int i;
10485 
10486 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10487 		struct drm_framebuffer *fb = plane_state->hw.fb;
10488 		int ret;
10489 
10490 		if (!fb ||
10491 		    fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
10492 			continue;
10493 
10494 		/*
10495 		 * The layout of the fast clear color value expected by HW
10496 		 * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
10497 		 * - 4 x 4 bytes per-channel value
10498 		 *   (in surface type specific float/int format provided by the fb user)
10499 		 * - 8 bytes native color value used by the display
10500 		 *   (converted/written by GPU during a fast clear operation using the
10501 		 *    above per-channel values)
10502 		 *
10503 		 * The commit's FB prepare hook already ensured that FB obj is pinned and the
10504 		 * caller made sure that the object is synced wrt. the related color clear value
10505 		 * GPU write on it.
10506 		 */
10507 		ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
10508 						     fb->offsets[2] + 16,
10509 						     &plane_state->ccval,
10510 						     sizeof(plane_state->ccval));
10511 		/* The above could only fail if the FB obj has an unexpected backing store type. */
10512 		drm_WARN_ON(&i915->drm, ret);
10513 	}
10514 }
10515 
10516 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
10517 {
10518 	struct drm_device *dev = state->base.dev;
10519 	struct drm_i915_private *dev_priv = to_i915(dev);
10520 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10521 	struct intel_crtc *crtc;
10522 	u64 put_domains[I915_MAX_PIPES] = {};
10523 	intel_wakeref_t wakeref = 0;
10524 	int i;
10525 
10526 	intel_atomic_commit_fence_wait(state);
10527 
10528 	drm_atomic_helper_wait_for_dependencies(&state->base);
10529 
10530 	if (state->modeset)
10531 		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
10532 
10533 	intel_atomic_prepare_plane_clear_colors(state);
10534 
10535 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10536 					    new_crtc_state, i) {
10537 		if (intel_crtc_needs_modeset(new_crtc_state) ||
10538 		    new_crtc_state->update_pipe) {
10539 
10540 			put_domains[crtc->pipe] =
10541 				modeset_get_crtc_power_domains(new_crtc_state);
10542 		}
10543 	}
10544 
10545 	intel_commit_modeset_disables(state);
10546 
10547 	/* FIXME: Eventually get rid of our crtc->config pointer */
10548 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10549 		crtc->config = new_crtc_state;
10550 
10551 	if (state->modeset) {
10552 		drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
10553 
10554 		intel_set_cdclk_pre_plane_update(state);
10555 
10556 		intel_modeset_verify_disabled(dev_priv, state);
10557 	}
10558 
10559 	intel_sagv_pre_plane_update(state);
10560 
10561 	/* Complete the events for pipes that have now been disabled */
10562 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10563 		bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10564 
10565 		/* Complete events for now disable pipes here. */
10566 		if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
10567 			spin_lock_irq(&dev->event_lock);
10568 			drm_crtc_send_vblank_event(&crtc->base,
10569 						   new_crtc_state->uapi.event);
10570 			spin_unlock_irq(&dev->event_lock);
10571 
10572 			new_crtc_state->uapi.event = NULL;
10573 		}
10574 	}
10575 
10576 	if (state->modeset)
10577 		intel_encoders_update_prepare(state);
10578 
10579 	intel_dbuf_pre_plane_update(state);
10580 
10581 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10582 		if (new_crtc_state->uapi.async_flip)
10583 			intel_crtc_enable_flip_done(state, crtc);
10584 	}
10585 
10586 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
10587 	dev_priv->display.commit_modeset_enables(state);
10588 
10589 	if (state->modeset) {
10590 		intel_encoders_update_complete(state);
10591 
10592 		intel_set_cdclk_post_plane_update(state);
10593 	}
10594 
10595 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
10596 	 * already, but still need the state for the delayed optimization. To
10597 	 * fix this:
10598 	 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
10599 	 * - schedule that vblank worker _before_ calling hw_done
10600 	 * - at the start of commit_tail, cancel it _synchrously
10601 	 * - switch over to the vblank wait helper in the core after that since
10602 	 *   we don't need out special handling any more.
10603 	 */
10604 	drm_atomic_helper_wait_for_flip_done(dev, &state->base);
10605 
10606 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10607 		if (new_crtc_state->uapi.async_flip)
10608 			intel_crtc_disable_flip_done(state, crtc);
10609 
10610 		if (new_crtc_state->hw.active &&
10611 		    !intel_crtc_needs_modeset(new_crtc_state) &&
10612 		    !new_crtc_state->preload_luts &&
10613 		    (new_crtc_state->uapi.color_mgmt_changed ||
10614 		     new_crtc_state->update_pipe))
10615 			intel_color_load_luts(new_crtc_state);
10616 	}
10617 
10618 	/*
10619 	 * Now that the vblank has passed, we can go ahead and program the
10620 	 * optimal watermarks on platforms that need two-step watermark
10621 	 * programming.
10622 	 *
10623 	 * TODO: Move this (and other cleanup) to an async worker eventually.
10624 	 */
10625 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10626 					    new_crtc_state, i) {
10627 		/*
10628 		 * Gen2 reports pipe underruns whenever all planes are disabled.
10629 		 * So re-enable underrun reporting after some planes get enabled.
10630 		 *
10631 		 * We do this before .optimize_watermarks() so that we have a
10632 		 * chance of catching underruns with the intermediate watermarks
10633 		 * vs. the new plane configuration.
10634 		 */
10635 		if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
10636 			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
10637 
10638 		if (dev_priv->display.optimize_watermarks)
10639 			dev_priv->display.optimize_watermarks(state, crtc);
10640 	}
10641 
10642 	intel_dbuf_post_plane_update(state);
10643 
10644 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10645 		intel_post_plane_update(state, crtc);
10646 
10647 		modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
10648 
10649 		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
10650 
10651 		/*
10652 		 * DSB cleanup is done in cleanup_work aligning with framebuffer
10653 		 * cleanup. So copy and reset the dsb structure to sync with
10654 		 * commit_done and later do dsb cleanup in cleanup_work.
10655 		 */
10656 		old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
10657 	}
10658 
10659 	/* Underruns don't always raise interrupts, so check manually */
10660 	intel_check_cpu_fifo_underruns(dev_priv);
10661 	intel_check_pch_fifo_underruns(dev_priv);
10662 
10663 	if (state->modeset)
10664 		intel_verify_planes(state);
10665 
10666 	intel_sagv_post_plane_update(state);
10667 
10668 	drm_atomic_helper_commit_hw_done(&state->base);
10669 
10670 	if (state->modeset) {
10671 		/* As one of the primary mmio accessors, KMS has a high
10672 		 * likelihood of triggering bugs in unclaimed access. After we
10673 		 * finish modesetting, see if an error has been flagged, and if
10674 		 * so enable debugging for the next modeset - and hope we catch
10675 		 * the culprit.
10676 		 */
10677 		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
10678 		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
10679 	}
10680 	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10681 
10682 	/*
10683 	 * Defer the cleanup of the old state to a separate worker to not
10684 	 * impede the current task (userspace for blocking modesets) that
10685 	 * are executed inline. For out-of-line asynchronous modesets/flips,
10686 	 * deferring to a new worker seems overkill, but we would place a
10687 	 * schedule point (cond_resched()) here anyway to keep latencies
10688 	 * down.
10689 	 */
10690 	INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
10691 	queue_work(system_highpri_wq, &state->base.commit_work);
10692 }
10693 
10694 static void intel_atomic_commit_work(struct work_struct *work)
10695 {
10696 	struct intel_atomic_state *state =
10697 		container_of(work, struct intel_atomic_state, base.commit_work);
10698 
10699 	intel_atomic_commit_tail(state);
10700 }
10701 
10702 static int __i915_sw_fence_call
10703 intel_atomic_commit_ready(struct i915_sw_fence *fence,
10704 			  enum i915_sw_fence_notify notify)
10705 {
10706 	struct intel_atomic_state *state =
10707 		container_of(fence, struct intel_atomic_state, commit_ready);
10708 
10709 	switch (notify) {
10710 	case FENCE_COMPLETE:
10711 		/* we do blocking waits in the worker, nothing to do here */
10712 		break;
10713 	case FENCE_FREE:
10714 		{
10715 			struct intel_atomic_helper *helper =
10716 				&to_i915(state->base.dev)->atomic_helper;
10717 
10718 			if (llist_add(&state->freed, &helper->free_list))
10719 				schedule_work(&helper->free_work);
10720 			break;
10721 		}
10722 	}
10723 
10724 	return NOTIFY_DONE;
10725 }
10726 
10727 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
10728 {
10729 	struct intel_plane_state *old_plane_state, *new_plane_state;
10730 	struct intel_plane *plane;
10731 	int i;
10732 
10733 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
10734 					     new_plane_state, i)
10735 		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
10736 					to_intel_frontbuffer(new_plane_state->hw.fb),
10737 					plane->frontbuffer_bit);
10738 }
10739 
10740 static int intel_atomic_commit(struct drm_device *dev,
10741 			       struct drm_atomic_state *_state,
10742 			       bool nonblock)
10743 {
10744 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
10745 	struct drm_i915_private *dev_priv = to_i915(dev);
10746 	int ret = 0;
10747 
10748 	state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
10749 
10750 	drm_atomic_state_get(&state->base);
10751 	i915_sw_fence_init(&state->commit_ready,
10752 			   intel_atomic_commit_ready);
10753 
10754 	/*
10755 	 * The intel_legacy_cursor_update() fast path takes care
10756 	 * of avoiding the vblank waits for simple cursor
10757 	 * movement and flips. For cursor on/off and size changes,
10758 	 * we want to perform the vblank waits so that watermark
10759 	 * updates happen during the correct frames. Gen9+ have
10760 	 * double buffered watermarks and so shouldn't need this.
10761 	 *
10762 	 * Unset state->legacy_cursor_update before the call to
10763 	 * drm_atomic_helper_setup_commit() because otherwise
10764 	 * drm_atomic_helper_wait_for_flip_done() is a noop and
10765 	 * we get FIFO underruns because we didn't wait
10766 	 * for vblank.
10767 	 *
10768 	 * FIXME doing watermarks and fb cleanup from a vblank worker
10769 	 * (assuming we had any) would solve these problems.
10770 	 */
10771 	if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
10772 		struct intel_crtc_state *new_crtc_state;
10773 		struct intel_crtc *crtc;
10774 		int i;
10775 
10776 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10777 			if (new_crtc_state->wm.need_postvbl_update ||
10778 			    new_crtc_state->update_wm_post)
10779 				state->base.legacy_cursor_update = false;
10780 	}
10781 
10782 	ret = intel_atomic_prepare_commit(state);
10783 	if (ret) {
10784 		drm_dbg_atomic(&dev_priv->drm,
10785 			       "Preparing state failed with %i\n", ret);
10786 		i915_sw_fence_commit(&state->commit_ready);
10787 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10788 		return ret;
10789 	}
10790 
10791 	ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
10792 	if (!ret)
10793 		ret = drm_atomic_helper_swap_state(&state->base, true);
10794 	if (!ret)
10795 		intel_atomic_swap_global_state(state);
10796 
10797 	if (ret) {
10798 		struct intel_crtc_state *new_crtc_state;
10799 		struct intel_crtc *crtc;
10800 		int i;
10801 
10802 		i915_sw_fence_commit(&state->commit_ready);
10803 
10804 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10805 			intel_dsb_cleanup(new_crtc_state);
10806 
10807 		drm_atomic_helper_cleanup_planes(dev, &state->base);
10808 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10809 		return ret;
10810 	}
10811 	intel_shared_dpll_swap_state(state);
10812 	intel_atomic_track_fbs(state);
10813 
10814 	drm_atomic_state_get(&state->base);
10815 	INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
10816 
10817 	i915_sw_fence_commit(&state->commit_ready);
10818 	if (nonblock && state->modeset) {
10819 		queue_work(dev_priv->modeset_wq, &state->base.commit_work);
10820 	} else if (nonblock) {
10821 		queue_work(dev_priv->flip_wq, &state->base.commit_work);
10822 	} else {
10823 		if (state->modeset)
10824 			flush_workqueue(dev_priv->modeset_wq);
10825 		intel_atomic_commit_tail(state);
10826 	}
10827 
10828 	return 0;
10829 }
10830 
10831 struct wait_rps_boost {
10832 	struct wait_queue_entry wait;
10833 
10834 	struct drm_crtc *crtc;
10835 	struct i915_request *request;
10836 };
10837 
10838 static int do_rps_boost(struct wait_queue_entry *_wait,
10839 			unsigned mode, int sync, void *key)
10840 {
10841 	struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
10842 	struct i915_request *rq = wait->request;
10843 
10844 	/*
10845 	 * If we missed the vblank, but the request is already running it
10846 	 * is reasonable to assume that it will complete before the next
10847 	 * vblank without our intervention, so leave RPS alone.
10848 	 */
10849 	if (!i915_request_started(rq))
10850 		intel_rps_boost(rq);
10851 	i915_request_put(rq);
10852 
10853 	drm_crtc_vblank_put(wait->crtc);
10854 
10855 	list_del(&wait->wait.entry);
10856 	kfree(wait);
10857 	return 1;
10858 }
10859 
10860 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
10861 				       struct dma_fence *fence)
10862 {
10863 	struct wait_rps_boost *wait;
10864 
10865 	if (!dma_fence_is_i915(fence))
10866 		return;
10867 
10868 	if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
10869 		return;
10870 
10871 	if (drm_crtc_vblank_get(crtc))
10872 		return;
10873 
10874 	wait = kmalloc(sizeof(*wait), GFP_KERNEL);
10875 	if (!wait) {
10876 		drm_crtc_vblank_put(crtc);
10877 		return;
10878 	}
10879 
10880 	wait->request = to_request(dma_fence_get(fence));
10881 	wait->crtc = crtc;
10882 
10883 	wait->wait.func = do_rps_boost;
10884 	wait->wait.flags = 0;
10885 
10886 	add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
10887 }
10888 
10889 int intel_plane_pin_fb(struct intel_plane_state *plane_state)
10890 {
10891 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
10892 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10893 	struct drm_framebuffer *fb = plane_state->hw.fb;
10894 	struct i915_vma *vma;
10895 	bool phys_cursor =
10896 		plane->id == PLANE_CURSOR &&
10897 		INTEL_INFO(dev_priv)->display.cursor_needs_physical;
10898 
10899 	if (!intel_fb_uses_dpt(fb)) {
10900 		vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
10901 						 &plane_state->view.gtt,
10902 						 intel_plane_uses_fence(plane_state),
10903 						 &plane_state->flags);
10904 		if (IS_ERR(vma))
10905 			return PTR_ERR(vma);
10906 
10907 		plane_state->ggtt_vma = vma;
10908 	} else {
10909 		struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
10910 
10911 		vma = intel_dpt_pin(intel_fb->dpt_vm);
10912 		if (IS_ERR(vma))
10913 			return PTR_ERR(vma);
10914 
10915 		plane_state->ggtt_vma = vma;
10916 
10917 		vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false,
10918 					   &plane_state->flags, intel_fb->dpt_vm);
10919 		if (IS_ERR(vma)) {
10920 			intel_dpt_unpin(intel_fb->dpt_vm);
10921 			plane_state->ggtt_vma = NULL;
10922 			return PTR_ERR(vma);
10923 		}
10924 
10925 		plane_state->dpt_vma = vma;
10926 
10927 		WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma);
10928 	}
10929 
10930 	return 0;
10931 }
10932 
10933 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
10934 {
10935 	struct drm_framebuffer *fb = old_plane_state->hw.fb;
10936 	struct i915_vma *vma;
10937 
10938 	if (!intel_fb_uses_dpt(fb)) {
10939 		vma = fetch_and_zero(&old_plane_state->ggtt_vma);
10940 		if (vma)
10941 			intel_unpin_fb_vma(vma, old_plane_state->flags);
10942 	} else {
10943 		struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
10944 
10945 		vma = fetch_and_zero(&old_plane_state->dpt_vma);
10946 		if (vma)
10947 			intel_unpin_fb_vma(vma, old_plane_state->flags);
10948 
10949 		vma = fetch_and_zero(&old_plane_state->ggtt_vma);
10950 		if (vma)
10951 			intel_dpt_unpin(intel_fb->dpt_vm);
10952 	}
10953 }
10954 
10955 /**
10956  * intel_prepare_plane_fb - Prepare fb for usage on plane
10957  * @_plane: drm plane to prepare for
10958  * @_new_plane_state: the plane state being prepared
10959  *
10960  * Prepares a framebuffer for usage on a display plane.  Generally this
10961  * involves pinning the underlying object and updating the frontbuffer tracking
10962  * bits.  Some older platforms need special physical address handling for
10963  * cursor planes.
10964  *
10965  * Returns 0 on success, negative error code on failure.
10966  */
10967 int
10968 intel_prepare_plane_fb(struct drm_plane *_plane,
10969 		       struct drm_plane_state *_new_plane_state)
10970 {
10971 	struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
10972 	struct intel_plane *plane = to_intel_plane(_plane);
10973 	struct intel_plane_state *new_plane_state =
10974 		to_intel_plane_state(_new_plane_state);
10975 	struct intel_atomic_state *state =
10976 		to_intel_atomic_state(new_plane_state->uapi.state);
10977 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10978 	const struct intel_plane_state *old_plane_state =
10979 		intel_atomic_get_old_plane_state(state, plane);
10980 	struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
10981 	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
10982 	int ret;
10983 
10984 	if (old_obj) {
10985 		const struct intel_crtc_state *crtc_state =
10986 			intel_atomic_get_new_crtc_state(state,
10987 							to_intel_crtc(old_plane_state->hw.crtc));
10988 
10989 		/* Big Hammer, we also need to ensure that any pending
10990 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
10991 		 * current scanout is retired before unpinning the old
10992 		 * framebuffer. Note that we rely on userspace rendering
10993 		 * into the buffer attached to the pipe they are waiting
10994 		 * on. If not, userspace generates a GPU hang with IPEHR
10995 		 * point to the MI_WAIT_FOR_EVENT.
10996 		 *
10997 		 * This should only fail upon a hung GPU, in which case we
10998 		 * can safely continue.
10999 		 */
11000 		if (intel_crtc_needs_modeset(crtc_state)) {
11001 			ret = i915_sw_fence_await_reservation(&state->commit_ready,
11002 							      old_obj->base.resv, NULL,
11003 							      false, 0,
11004 							      GFP_KERNEL);
11005 			if (ret < 0)
11006 				return ret;
11007 		}
11008 	}
11009 
11010 	if (new_plane_state->uapi.fence) { /* explicit fencing */
11011 		i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
11012 					     &attr);
11013 		ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
11014 						    new_plane_state->uapi.fence,
11015 						    i915_fence_timeout(dev_priv),
11016 						    GFP_KERNEL);
11017 		if (ret < 0)
11018 			return ret;
11019 	}
11020 
11021 	if (!obj)
11022 		return 0;
11023 
11024 
11025 	ret = intel_plane_pin_fb(new_plane_state);
11026 	if (ret)
11027 		return ret;
11028 
11029 	i915_gem_object_wait_priority(obj, 0, &attr);
11030 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
11031 
11032 	if (!new_plane_state->uapi.fence) { /* implicit fencing */
11033 		struct dma_fence *fence;
11034 
11035 		ret = i915_sw_fence_await_reservation(&state->commit_ready,
11036 						      obj->base.resv, NULL,
11037 						      false,
11038 						      i915_fence_timeout(dev_priv),
11039 						      GFP_KERNEL);
11040 		if (ret < 0)
11041 			goto unpin_fb;
11042 
11043 		fence = dma_resv_get_excl_rcu(obj->base.resv);
11044 		if (fence) {
11045 			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
11046 						   fence);
11047 			dma_fence_put(fence);
11048 		}
11049 	} else {
11050 		add_rps_boost_after_vblank(new_plane_state->hw.crtc,
11051 					   new_plane_state->uapi.fence);
11052 	}
11053 
11054 	/*
11055 	 * We declare pageflips to be interactive and so merit a small bias
11056 	 * towards upclocking to deliver the frame on time. By only changing
11057 	 * the RPS thresholds to sample more regularly and aim for higher
11058 	 * clocks we can hopefully deliver low power workloads (like kodi)
11059 	 * that are not quite steady state without resorting to forcing
11060 	 * maximum clocks following a vblank miss (see do_rps_boost()).
11061 	 */
11062 	if (!state->rps_interactive) {
11063 		intel_rps_mark_interactive(&dev_priv->gt.rps, true);
11064 		state->rps_interactive = true;
11065 	}
11066 
11067 	return 0;
11068 
11069 unpin_fb:
11070 	intel_plane_unpin_fb(new_plane_state);
11071 
11072 	return ret;
11073 }
11074 
11075 /**
11076  * intel_cleanup_plane_fb - Cleans up an fb after plane use
11077  * @plane: drm plane to clean up for
11078  * @_old_plane_state: the state from the previous modeset
11079  *
11080  * Cleans up a framebuffer that has just been removed from a plane.
11081  */
11082 void
11083 intel_cleanup_plane_fb(struct drm_plane *plane,
11084 		       struct drm_plane_state *_old_plane_state)
11085 {
11086 	struct intel_plane_state *old_plane_state =
11087 		to_intel_plane_state(_old_plane_state);
11088 	struct intel_atomic_state *state =
11089 		to_intel_atomic_state(old_plane_state->uapi.state);
11090 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
11091 	struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
11092 
11093 	if (!obj)
11094 		return;
11095 
11096 	if (state->rps_interactive) {
11097 		intel_rps_mark_interactive(&dev_priv->gt.rps, false);
11098 		state->rps_interactive = false;
11099 	}
11100 
11101 	/* Should only be called after a successful intel_prepare_plane_fb()! */
11102 	intel_plane_unpin_fb(old_plane_state);
11103 }
11104 
11105 /**
11106  * intel_plane_destroy - destroy a plane
11107  * @plane: plane to destroy
11108  *
11109  * Common destruction function for all types of planes (primary, cursor,
11110  * sprite).
11111  */
11112 void intel_plane_destroy(struct drm_plane *plane)
11113 {
11114 	drm_plane_cleanup(plane);
11115 	kfree(to_intel_plane(plane));
11116 }
11117 
11118 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
11119 {
11120 	struct intel_plane *plane;
11121 
11122 	for_each_intel_plane(&dev_priv->drm, plane) {
11123 		struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
11124 								  plane->pipe);
11125 
11126 		plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
11127 	}
11128 }
11129 
11130 
11131 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
11132 				      struct drm_file *file)
11133 {
11134 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
11135 	struct drm_crtc *drmmode_crtc;
11136 	struct intel_crtc *crtc;
11137 
11138 	drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
11139 	if (!drmmode_crtc)
11140 		return -ENOENT;
11141 
11142 	crtc = to_intel_crtc(drmmode_crtc);
11143 	pipe_from_crtc_id->pipe = crtc->pipe;
11144 
11145 	return 0;
11146 }
11147 
11148 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
11149 {
11150 	struct drm_device *dev = encoder->base.dev;
11151 	struct intel_encoder *source_encoder;
11152 	u32 possible_clones = 0;
11153 
11154 	for_each_intel_encoder(dev, source_encoder) {
11155 		if (encoders_cloneable(encoder, source_encoder))
11156 			possible_clones |= drm_encoder_mask(&source_encoder->base);
11157 	}
11158 
11159 	return possible_clones;
11160 }
11161 
11162 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
11163 {
11164 	struct drm_device *dev = encoder->base.dev;
11165 	struct intel_crtc *crtc;
11166 	u32 possible_crtcs = 0;
11167 
11168 	for_each_intel_crtc(dev, crtc) {
11169 		if (encoder->pipe_mask & BIT(crtc->pipe))
11170 			possible_crtcs |= drm_crtc_mask(&crtc->base);
11171 	}
11172 
11173 	return possible_crtcs;
11174 }
11175 
11176 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
11177 {
11178 	if (!IS_MOBILE(dev_priv))
11179 		return false;
11180 
11181 	if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
11182 		return false;
11183 
11184 	if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
11185 		return false;
11186 
11187 	return true;
11188 }
11189 
11190 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
11191 {
11192 	if (DISPLAY_VER(dev_priv) >= 9)
11193 		return false;
11194 
11195 	if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
11196 		return false;
11197 
11198 	if (HAS_PCH_LPT_H(dev_priv) &&
11199 	    intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
11200 		return false;
11201 
11202 	/* DDI E can't be used if DDI A requires 4 lanes */
11203 	if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
11204 		return false;
11205 
11206 	if (!dev_priv->vbt.int_crt_support)
11207 		return false;
11208 
11209 	return true;
11210 }
11211 
11212 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
11213 {
11214 	struct intel_encoder *encoder;
11215 	bool dpd_is_edp = false;
11216 
11217 	intel_pps_unlock_regs_wa(dev_priv);
11218 
11219 	if (!HAS_DISPLAY(dev_priv))
11220 		return;
11221 
11222 	if (IS_ALDERLAKE_S(dev_priv)) {
11223 		intel_ddi_init(dev_priv, PORT_A);
11224 		intel_ddi_init(dev_priv, PORT_TC1);
11225 		intel_ddi_init(dev_priv, PORT_TC2);
11226 		intel_ddi_init(dev_priv, PORT_TC3);
11227 		intel_ddi_init(dev_priv, PORT_TC4);
11228 	} else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
11229 		intel_ddi_init(dev_priv, PORT_A);
11230 		intel_ddi_init(dev_priv, PORT_B);
11231 		intel_ddi_init(dev_priv, PORT_TC1);
11232 		intel_ddi_init(dev_priv, PORT_TC2);
11233 	} else if (DISPLAY_VER(dev_priv) >= 12) {
11234 		intel_ddi_init(dev_priv, PORT_A);
11235 		intel_ddi_init(dev_priv, PORT_B);
11236 		intel_ddi_init(dev_priv, PORT_TC1);
11237 		intel_ddi_init(dev_priv, PORT_TC2);
11238 		intel_ddi_init(dev_priv, PORT_TC3);
11239 		intel_ddi_init(dev_priv, PORT_TC4);
11240 		intel_ddi_init(dev_priv, PORT_TC5);
11241 		intel_ddi_init(dev_priv, PORT_TC6);
11242 		icl_dsi_init(dev_priv);
11243 	} else if (IS_JSL_EHL(dev_priv)) {
11244 		intel_ddi_init(dev_priv, PORT_A);
11245 		intel_ddi_init(dev_priv, PORT_B);
11246 		intel_ddi_init(dev_priv, PORT_C);
11247 		intel_ddi_init(dev_priv, PORT_D);
11248 		icl_dsi_init(dev_priv);
11249 	} else if (DISPLAY_VER(dev_priv) == 11) {
11250 		intel_ddi_init(dev_priv, PORT_A);
11251 		intel_ddi_init(dev_priv, PORT_B);
11252 		intel_ddi_init(dev_priv, PORT_C);
11253 		intel_ddi_init(dev_priv, PORT_D);
11254 		intel_ddi_init(dev_priv, PORT_E);
11255 		intel_ddi_init(dev_priv, PORT_F);
11256 		icl_dsi_init(dev_priv);
11257 	} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
11258 		intel_ddi_init(dev_priv, PORT_A);
11259 		intel_ddi_init(dev_priv, PORT_B);
11260 		intel_ddi_init(dev_priv, PORT_C);
11261 		vlv_dsi_init(dev_priv);
11262 	} else if (DISPLAY_VER(dev_priv) >= 9) {
11263 		intel_ddi_init(dev_priv, PORT_A);
11264 		intel_ddi_init(dev_priv, PORT_B);
11265 		intel_ddi_init(dev_priv, PORT_C);
11266 		intel_ddi_init(dev_priv, PORT_D);
11267 		intel_ddi_init(dev_priv, PORT_E);
11268 		intel_ddi_init(dev_priv, PORT_F);
11269 	} else if (HAS_DDI(dev_priv)) {
11270 		u32 found;
11271 
11272 		if (intel_ddi_crt_present(dev_priv))
11273 			intel_crt_init(dev_priv);
11274 
11275 		/* Haswell uses DDI functions to detect digital outputs. */
11276 		found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
11277 		if (found)
11278 			intel_ddi_init(dev_priv, PORT_A);
11279 
11280 		found = intel_de_read(dev_priv, SFUSE_STRAP);
11281 		if (found & SFUSE_STRAP_DDIB_DETECTED)
11282 			intel_ddi_init(dev_priv, PORT_B);
11283 		if (found & SFUSE_STRAP_DDIC_DETECTED)
11284 			intel_ddi_init(dev_priv, PORT_C);
11285 		if (found & SFUSE_STRAP_DDID_DETECTED)
11286 			intel_ddi_init(dev_priv, PORT_D);
11287 		if (found & SFUSE_STRAP_DDIF_DETECTED)
11288 			intel_ddi_init(dev_priv, PORT_F);
11289 	} else if (HAS_PCH_SPLIT(dev_priv)) {
11290 		int found;
11291 
11292 		/*
11293 		 * intel_edp_init_connector() depends on this completing first,
11294 		 * to prevent the registration of both eDP and LVDS and the
11295 		 * incorrect sharing of the PPS.
11296 		 */
11297 		intel_lvds_init(dev_priv);
11298 		intel_crt_init(dev_priv);
11299 
11300 		dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
11301 
11302 		if (ilk_has_edp_a(dev_priv))
11303 			g4x_dp_init(dev_priv, DP_A, PORT_A);
11304 
11305 		if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
11306 			/* PCH SDVOB multiplex with HDMIB */
11307 			found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
11308 			if (!found)
11309 				g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
11310 			if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
11311 				g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
11312 		}
11313 
11314 		if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
11315 			g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
11316 
11317 		if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
11318 			g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
11319 
11320 		if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
11321 			g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
11322 
11323 		if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
11324 			g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
11325 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
11326 		bool has_edp, has_port;
11327 
11328 		if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
11329 			intel_crt_init(dev_priv);
11330 
11331 		/*
11332 		 * The DP_DETECTED bit is the latched state of the DDC
11333 		 * SDA pin at boot. However since eDP doesn't require DDC
11334 		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
11335 		 * eDP ports may have been muxed to an alternate function.
11336 		 * Thus we can't rely on the DP_DETECTED bit alone to detect
11337 		 * eDP ports. Consult the VBT as well as DP_DETECTED to
11338 		 * detect eDP ports.
11339 		 *
11340 		 * Sadly the straps seem to be missing sometimes even for HDMI
11341 		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
11342 		 * and VBT for the presence of the port. Additionally we can't
11343 		 * trust the port type the VBT declares as we've seen at least
11344 		 * HDMI ports that the VBT claim are DP or eDP.
11345 		 */
11346 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
11347 		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
11348 		if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
11349 			has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
11350 		if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
11351 			g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
11352 
11353 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
11354 		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
11355 		if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
11356 			has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
11357 		if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
11358 			g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
11359 
11360 		if (IS_CHERRYVIEW(dev_priv)) {
11361 			/*
11362 			 * eDP not supported on port D,
11363 			 * so no need to worry about it
11364 			 */
11365 			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
11366 			if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
11367 				g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
11368 			if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
11369 				g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
11370 		}
11371 
11372 		vlv_dsi_init(dev_priv);
11373 	} else if (IS_PINEVIEW(dev_priv)) {
11374 		intel_lvds_init(dev_priv);
11375 		intel_crt_init(dev_priv);
11376 	} else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
11377 		bool found = false;
11378 
11379 		if (IS_MOBILE(dev_priv))
11380 			intel_lvds_init(dev_priv);
11381 
11382 		intel_crt_init(dev_priv);
11383 
11384 		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11385 			drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
11386 			found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
11387 			if (!found && IS_G4X(dev_priv)) {
11388 				drm_dbg_kms(&dev_priv->drm,
11389 					    "probing HDMI on SDVOB\n");
11390 				g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
11391 			}
11392 
11393 			if (!found && IS_G4X(dev_priv))
11394 				g4x_dp_init(dev_priv, DP_B, PORT_B);
11395 		}
11396 
11397 		/* Before G4X SDVOC doesn't have its own detect register */
11398 
11399 		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11400 			drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
11401 			found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
11402 		}
11403 
11404 		if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
11405 
11406 			if (IS_G4X(dev_priv)) {
11407 				drm_dbg_kms(&dev_priv->drm,
11408 					    "probing HDMI on SDVOC\n");
11409 				g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
11410 			}
11411 			if (IS_G4X(dev_priv))
11412 				g4x_dp_init(dev_priv, DP_C, PORT_C);
11413 		}
11414 
11415 		if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
11416 			g4x_dp_init(dev_priv, DP_D, PORT_D);
11417 
11418 		if (SUPPORTS_TV(dev_priv))
11419 			intel_tv_init(dev_priv);
11420 	} else if (DISPLAY_VER(dev_priv) == 2) {
11421 		if (IS_I85X(dev_priv))
11422 			intel_lvds_init(dev_priv);
11423 
11424 		intel_crt_init(dev_priv);
11425 		intel_dvo_init(dev_priv);
11426 	}
11427 
11428 	for_each_intel_encoder(&dev_priv->drm, encoder) {
11429 		encoder->base.possible_crtcs =
11430 			intel_encoder_possible_crtcs(encoder);
11431 		encoder->base.possible_clones =
11432 			intel_encoder_possible_clones(encoder);
11433 	}
11434 
11435 	intel_init_pch_refclk(dev_priv);
11436 
11437 	drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
11438 }
11439 
11440 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
11441 {
11442 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11443 
11444 	drm_framebuffer_cleanup(fb);
11445 
11446 	if (intel_fb_uses_dpt(fb))
11447 		intel_dpt_destroy(intel_fb->dpt_vm);
11448 
11449 	intel_frontbuffer_put(intel_fb->frontbuffer);
11450 
11451 	kfree(intel_fb);
11452 }
11453 
11454 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
11455 						struct drm_file *file,
11456 						unsigned int *handle)
11457 {
11458 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11459 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
11460 
11461 	if (i915_gem_object_is_userptr(obj)) {
11462 		drm_dbg(&i915->drm,
11463 			"attempting to use a userptr for a framebuffer, denied\n");
11464 		return -EINVAL;
11465 	}
11466 
11467 	return drm_gem_handle_create(file, &obj->base, handle);
11468 }
11469 
11470 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
11471 					struct drm_file *file,
11472 					unsigned flags, unsigned color,
11473 					struct drm_clip_rect *clips,
11474 					unsigned num_clips)
11475 {
11476 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11477 
11478 	i915_gem_object_flush_if_display(obj);
11479 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
11480 
11481 	return 0;
11482 }
11483 
11484 static const struct drm_framebuffer_funcs intel_fb_funcs = {
11485 	.destroy = intel_user_framebuffer_destroy,
11486 	.create_handle = intel_user_framebuffer_create_handle,
11487 	.dirty = intel_user_framebuffer_dirty,
11488 };
11489 
11490 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
11491 				  struct drm_i915_gem_object *obj,
11492 				  struct drm_mode_fb_cmd2 *mode_cmd)
11493 {
11494 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
11495 	struct drm_framebuffer *fb = &intel_fb->base;
11496 	u32 max_stride;
11497 	unsigned int tiling, stride;
11498 	int ret = -EINVAL;
11499 	int i;
11500 
11501 	intel_fb->frontbuffer = intel_frontbuffer_get(obj);
11502 	if (!intel_fb->frontbuffer)
11503 		return -ENOMEM;
11504 
11505 	i915_gem_object_lock(obj, NULL);
11506 	tiling = i915_gem_object_get_tiling(obj);
11507 	stride = i915_gem_object_get_stride(obj);
11508 	i915_gem_object_unlock(obj);
11509 
11510 	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
11511 		/*
11512 		 * If there's a fence, enforce that
11513 		 * the fb modifier and tiling mode match.
11514 		 */
11515 		if (tiling != I915_TILING_NONE &&
11516 		    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
11517 			drm_dbg_kms(&dev_priv->drm,
11518 				    "tiling_mode doesn't match fb modifier\n");
11519 			goto err;
11520 		}
11521 	} else {
11522 		if (tiling == I915_TILING_X) {
11523 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
11524 		} else if (tiling == I915_TILING_Y) {
11525 			drm_dbg_kms(&dev_priv->drm,
11526 				    "No Y tiling for legacy addfb\n");
11527 			goto err;
11528 		}
11529 	}
11530 
11531 	if (!drm_any_plane_has_format(&dev_priv->drm,
11532 				      mode_cmd->pixel_format,
11533 				      mode_cmd->modifier[0])) {
11534 		drm_dbg_kms(&dev_priv->drm,
11535 			    "unsupported pixel format %p4cc / modifier 0x%llx\n",
11536 			    &mode_cmd->pixel_format, mode_cmd->modifier[0]);
11537 		goto err;
11538 	}
11539 
11540 	/*
11541 	 * gen2/3 display engine uses the fence if present,
11542 	 * so the tiling mode must match the fb modifier exactly.
11543 	 */
11544 	if (DISPLAY_VER(dev_priv) < 4 &&
11545 	    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
11546 		drm_dbg_kms(&dev_priv->drm,
11547 			    "tiling_mode must match fb modifier exactly on gen2/3\n");
11548 		goto err;
11549 	}
11550 
11551 	max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
11552 					 mode_cmd->modifier[0]);
11553 	if (mode_cmd->pitches[0] > max_stride) {
11554 		drm_dbg_kms(&dev_priv->drm,
11555 			    "%s pitch (%u) must be at most %d\n",
11556 			    mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
11557 			    "tiled" : "linear",
11558 			    mode_cmd->pitches[0], max_stride);
11559 		goto err;
11560 	}
11561 
11562 	/*
11563 	 * If there's a fence, enforce that
11564 	 * the fb pitch and fence stride match.
11565 	 */
11566 	if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
11567 		drm_dbg_kms(&dev_priv->drm,
11568 			    "pitch (%d) must match tiling stride (%d)\n",
11569 			    mode_cmd->pitches[0], stride);
11570 		goto err;
11571 	}
11572 
11573 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
11574 	if (mode_cmd->offsets[0] != 0) {
11575 		drm_dbg_kms(&dev_priv->drm,
11576 			    "plane 0 offset (0x%08x) must be 0\n",
11577 			    mode_cmd->offsets[0]);
11578 		goto err;
11579 	}
11580 
11581 	drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
11582 
11583 	for (i = 0; i < fb->format->num_planes; i++) {
11584 		u32 stride_alignment;
11585 
11586 		if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
11587 			drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
11588 				    i);
11589 			goto err;
11590 		}
11591 
11592 		stride_alignment = intel_fb_stride_alignment(fb, i);
11593 		if (fb->pitches[i] & (stride_alignment - 1)) {
11594 			drm_dbg_kms(&dev_priv->drm,
11595 				    "plane %d pitch (%d) must be at least %u byte aligned\n",
11596 				    i, fb->pitches[i], stride_alignment);
11597 			goto err;
11598 		}
11599 
11600 		if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
11601 			int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
11602 
11603 			if (fb->pitches[i] != ccs_aux_stride) {
11604 				drm_dbg_kms(&dev_priv->drm,
11605 					    "ccs aux plane %d pitch (%d) must be %d\n",
11606 					    i,
11607 					    fb->pitches[i], ccs_aux_stride);
11608 				goto err;
11609 			}
11610 		}
11611 
11612 		/* TODO: Add POT stride remapping support for CCS formats as well. */
11613 		if (IS_ALDERLAKE_P(dev_priv) &&
11614 		    mode_cmd->modifier[i] != DRM_FORMAT_MOD_LINEAR &&
11615 		    !intel_fb_needs_pot_stride_remap(intel_fb) &&
11616 		    !is_power_of_2(mode_cmd->pitches[i])) {
11617 			drm_dbg_kms(&dev_priv->drm,
11618 				    "plane %d pitch (%d) must be power of two for tiled buffers\n",
11619 				    i, mode_cmd->pitches[i]);
11620 			goto err;
11621 		}
11622 
11623 		fb->obj[i] = &obj->base;
11624 	}
11625 
11626 	ret = intel_fill_fb_info(dev_priv, intel_fb);
11627 	if (ret)
11628 		goto err;
11629 
11630 	if (intel_fb_uses_dpt(fb)) {
11631 		struct i915_address_space *vm;
11632 
11633 		vm = intel_dpt_create(intel_fb);
11634 		if (IS_ERR(vm)) {
11635 			ret = PTR_ERR(vm);
11636 			goto err;
11637 		}
11638 
11639 		intel_fb->dpt_vm = vm;
11640 	}
11641 
11642 	ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
11643 	if (ret) {
11644 		drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
11645 		goto err;
11646 	}
11647 
11648 	return 0;
11649 
11650 err:
11651 	intel_frontbuffer_put(intel_fb->frontbuffer);
11652 	return ret;
11653 }
11654 
11655 static struct drm_framebuffer *
11656 intel_user_framebuffer_create(struct drm_device *dev,
11657 			      struct drm_file *filp,
11658 			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
11659 {
11660 	struct drm_framebuffer *fb;
11661 	struct drm_i915_gem_object *obj;
11662 	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
11663 	struct drm_i915_private *i915;
11664 
11665 	obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
11666 	if (!obj)
11667 		return ERR_PTR(-ENOENT);
11668 
11669 	/* object is backed with LMEM for discrete */
11670 	i915 = to_i915(obj->base.dev);
11671 	if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj)) {
11672 		/* object is "remote", not in local memory */
11673 		i915_gem_object_put(obj);
11674 		return ERR_PTR(-EREMOTE);
11675 	}
11676 
11677 	fb = intel_framebuffer_create(obj, &mode_cmd);
11678 	i915_gem_object_put(obj);
11679 
11680 	return fb;
11681 }
11682 
11683 static enum drm_mode_status
11684 intel_mode_valid(struct drm_device *dev,
11685 		 const struct drm_display_mode *mode)
11686 {
11687 	struct drm_i915_private *dev_priv = to_i915(dev);
11688 	int hdisplay_max, htotal_max;
11689 	int vdisplay_max, vtotal_max;
11690 
11691 	/*
11692 	 * Can't reject DBLSCAN here because Xorg ddxen can add piles
11693 	 * of DBLSCAN modes to the output's mode list when they detect
11694 	 * the scaling mode property on the connector. And they don't
11695 	 * ask the kernel to validate those modes in any way until
11696 	 * modeset time at which point the client gets a protocol error.
11697 	 * So in order to not upset those clients we silently ignore the
11698 	 * DBLSCAN flag on such connectors. For other connectors we will
11699 	 * reject modes with the DBLSCAN flag in encoder->compute_config().
11700 	 * And we always reject DBLSCAN modes in connector->mode_valid()
11701 	 * as we never want such modes on the connector's mode list.
11702 	 */
11703 
11704 	if (mode->vscan > 1)
11705 		return MODE_NO_VSCAN;
11706 
11707 	if (mode->flags & DRM_MODE_FLAG_HSKEW)
11708 		return MODE_H_ILLEGAL;
11709 
11710 	if (mode->flags & (DRM_MODE_FLAG_CSYNC |
11711 			   DRM_MODE_FLAG_NCSYNC |
11712 			   DRM_MODE_FLAG_PCSYNC))
11713 		return MODE_HSYNC;
11714 
11715 	if (mode->flags & (DRM_MODE_FLAG_BCAST |
11716 			   DRM_MODE_FLAG_PIXMUX |
11717 			   DRM_MODE_FLAG_CLKDIV2))
11718 		return MODE_BAD;
11719 
11720 	/* Transcoder timing limits */
11721 	if (DISPLAY_VER(dev_priv) >= 11) {
11722 		hdisplay_max = 16384;
11723 		vdisplay_max = 8192;
11724 		htotal_max = 16384;
11725 		vtotal_max = 8192;
11726 	} else if (DISPLAY_VER(dev_priv) >= 9 ||
11727 		   IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
11728 		hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
11729 		vdisplay_max = 4096;
11730 		htotal_max = 8192;
11731 		vtotal_max = 8192;
11732 	} else if (DISPLAY_VER(dev_priv) >= 3) {
11733 		hdisplay_max = 4096;
11734 		vdisplay_max = 4096;
11735 		htotal_max = 8192;
11736 		vtotal_max = 8192;
11737 	} else {
11738 		hdisplay_max = 2048;
11739 		vdisplay_max = 2048;
11740 		htotal_max = 4096;
11741 		vtotal_max = 4096;
11742 	}
11743 
11744 	if (mode->hdisplay > hdisplay_max ||
11745 	    mode->hsync_start > htotal_max ||
11746 	    mode->hsync_end > htotal_max ||
11747 	    mode->htotal > htotal_max)
11748 		return MODE_H_ILLEGAL;
11749 
11750 	if (mode->vdisplay > vdisplay_max ||
11751 	    mode->vsync_start > vtotal_max ||
11752 	    mode->vsync_end > vtotal_max ||
11753 	    mode->vtotal > vtotal_max)
11754 		return MODE_V_ILLEGAL;
11755 
11756 	if (DISPLAY_VER(dev_priv) >= 5) {
11757 		if (mode->hdisplay < 64 ||
11758 		    mode->htotal - mode->hdisplay < 32)
11759 			return MODE_H_ILLEGAL;
11760 
11761 		if (mode->vtotal - mode->vdisplay < 5)
11762 			return MODE_V_ILLEGAL;
11763 	} else {
11764 		if (mode->htotal - mode->hdisplay < 32)
11765 			return MODE_H_ILLEGAL;
11766 
11767 		if (mode->vtotal - mode->vdisplay < 3)
11768 			return MODE_V_ILLEGAL;
11769 	}
11770 
11771 	return MODE_OK;
11772 }
11773 
11774 enum drm_mode_status
11775 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
11776 				const struct drm_display_mode *mode,
11777 				bool bigjoiner)
11778 {
11779 	int plane_width_max, plane_height_max;
11780 
11781 	/*
11782 	 * intel_mode_valid() should be
11783 	 * sufficient on older platforms.
11784 	 */
11785 	if (DISPLAY_VER(dev_priv) < 9)
11786 		return MODE_OK;
11787 
11788 	/*
11789 	 * Most people will probably want a fullscreen
11790 	 * plane so let's not advertize modes that are
11791 	 * too big for that.
11792 	 */
11793 	if (DISPLAY_VER(dev_priv) >= 11) {
11794 		plane_width_max = 5120 << bigjoiner;
11795 		plane_height_max = 4320;
11796 	} else {
11797 		plane_width_max = 5120;
11798 		plane_height_max = 4096;
11799 	}
11800 
11801 	if (mode->hdisplay > plane_width_max)
11802 		return MODE_H_ILLEGAL;
11803 
11804 	if (mode->vdisplay > plane_height_max)
11805 		return MODE_V_ILLEGAL;
11806 
11807 	return MODE_OK;
11808 }
11809 
11810 static const struct drm_mode_config_funcs intel_mode_funcs = {
11811 	.fb_create = intel_user_framebuffer_create,
11812 	.get_format_info = intel_get_format_info,
11813 	.output_poll_changed = intel_fbdev_output_poll_changed,
11814 	.mode_valid = intel_mode_valid,
11815 	.atomic_check = intel_atomic_check,
11816 	.atomic_commit = intel_atomic_commit,
11817 	.atomic_state_alloc = intel_atomic_state_alloc,
11818 	.atomic_state_clear = intel_atomic_state_clear,
11819 	.atomic_state_free = intel_atomic_state_free,
11820 };
11821 
11822 /**
11823  * intel_init_display_hooks - initialize the display modesetting hooks
11824  * @dev_priv: device private
11825  */
11826 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
11827 {
11828 	if (!HAS_DISPLAY(dev_priv))
11829 		return;
11830 
11831 	intel_init_cdclk_hooks(dev_priv);
11832 	intel_init_audio_hooks(dev_priv);
11833 
11834 	intel_dpll_init_clock_hook(dev_priv);
11835 
11836 	if (DISPLAY_VER(dev_priv) >= 9) {
11837 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
11838 		dev_priv->display.crtc_enable = hsw_crtc_enable;
11839 		dev_priv->display.crtc_disable = hsw_crtc_disable;
11840 	} else if (HAS_DDI(dev_priv)) {
11841 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
11842 		dev_priv->display.crtc_enable = hsw_crtc_enable;
11843 		dev_priv->display.crtc_disable = hsw_crtc_disable;
11844 	} else if (HAS_PCH_SPLIT(dev_priv)) {
11845 		dev_priv->display.get_pipe_config = ilk_get_pipe_config;
11846 		dev_priv->display.crtc_enable = ilk_crtc_enable;
11847 		dev_priv->display.crtc_disable = ilk_crtc_disable;
11848 	} else if (IS_CHERRYVIEW(dev_priv) ||
11849 		   IS_VALLEYVIEW(dev_priv)) {
11850 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11851 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
11852 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
11853 	} else {
11854 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11855 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
11856 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
11857 	}
11858 
11859 	intel_fdi_init_hook(dev_priv);
11860 
11861 	if (DISPLAY_VER(dev_priv) >= 9) {
11862 		dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
11863 		dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
11864 	} else {
11865 		dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
11866 		dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
11867 	}
11868 
11869 }
11870 
11871 void intel_modeset_init_hw(struct drm_i915_private *i915)
11872 {
11873 	struct intel_cdclk_state *cdclk_state;
11874 
11875 	if (!HAS_DISPLAY(i915))
11876 		return;
11877 
11878 	cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
11879 
11880 	intel_update_cdclk(i915);
11881 	intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
11882 	cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
11883 }
11884 
11885 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
11886 {
11887 	struct drm_plane *plane;
11888 	struct intel_crtc *crtc;
11889 
11890 	for_each_intel_crtc(state->dev, crtc) {
11891 		struct intel_crtc_state *crtc_state;
11892 
11893 		crtc_state = intel_atomic_get_crtc_state(state, crtc);
11894 		if (IS_ERR(crtc_state))
11895 			return PTR_ERR(crtc_state);
11896 
11897 		if (crtc_state->hw.active) {
11898 			/*
11899 			 * Preserve the inherited flag to avoid
11900 			 * taking the full modeset path.
11901 			 */
11902 			crtc_state->inherited = true;
11903 		}
11904 	}
11905 
11906 	drm_for_each_plane(plane, state->dev) {
11907 		struct drm_plane_state *plane_state;
11908 
11909 		plane_state = drm_atomic_get_plane_state(state, plane);
11910 		if (IS_ERR(plane_state))
11911 			return PTR_ERR(plane_state);
11912 	}
11913 
11914 	return 0;
11915 }
11916 
11917 /*
11918  * Calculate what we think the watermarks should be for the state we've read
11919  * out of the hardware and then immediately program those watermarks so that
11920  * we ensure the hardware settings match our internal state.
11921  *
11922  * We can calculate what we think WM's should be by creating a duplicate of the
11923  * current state (which was constructed during hardware readout) and running it
11924  * through the atomic check code to calculate new watermark values in the
11925  * state object.
11926  */
11927 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
11928 {
11929 	struct drm_atomic_state *state;
11930 	struct intel_atomic_state *intel_state;
11931 	struct intel_crtc *crtc;
11932 	struct intel_crtc_state *crtc_state;
11933 	struct drm_modeset_acquire_ctx ctx;
11934 	int ret;
11935 	int i;
11936 
11937 	/* Only supported on platforms that use atomic watermark design */
11938 	if (!dev_priv->display.optimize_watermarks)
11939 		return;
11940 
11941 	state = drm_atomic_state_alloc(&dev_priv->drm);
11942 	if (drm_WARN_ON(&dev_priv->drm, !state))
11943 		return;
11944 
11945 	intel_state = to_intel_atomic_state(state);
11946 
11947 	drm_modeset_acquire_init(&ctx, 0);
11948 
11949 retry:
11950 	state->acquire_ctx = &ctx;
11951 
11952 	/*
11953 	 * Hardware readout is the only time we don't want to calculate
11954 	 * intermediate watermarks (since we don't trust the current
11955 	 * watermarks).
11956 	 */
11957 	if (!HAS_GMCH(dev_priv))
11958 		intel_state->skip_intermediate_wm = true;
11959 
11960 	ret = sanitize_watermarks_add_affected(state);
11961 	if (ret)
11962 		goto fail;
11963 
11964 	ret = intel_atomic_check(&dev_priv->drm, state);
11965 	if (ret)
11966 		goto fail;
11967 
11968 	/* Write calculated watermark values back */
11969 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
11970 		crtc_state->wm.need_postvbl_update = true;
11971 		dev_priv->display.optimize_watermarks(intel_state, crtc);
11972 
11973 		to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
11974 	}
11975 
11976 fail:
11977 	if (ret == -EDEADLK) {
11978 		drm_atomic_state_clear(state);
11979 		drm_modeset_backoff(&ctx);
11980 		goto retry;
11981 	}
11982 
11983 	/*
11984 	 * If we fail here, it means that the hardware appears to be
11985 	 * programmed in a way that shouldn't be possible, given our
11986 	 * understanding of watermark requirements.  This might mean a
11987 	 * mistake in the hardware readout code or a mistake in the
11988 	 * watermark calculations for a given platform.  Raise a WARN
11989 	 * so that this is noticeable.
11990 	 *
11991 	 * If this actually happens, we'll have to just leave the
11992 	 * BIOS-programmed watermarks untouched and hope for the best.
11993 	 */
11994 	drm_WARN(&dev_priv->drm, ret,
11995 		 "Could not determine valid watermarks for inherited state\n");
11996 
11997 	drm_atomic_state_put(state);
11998 
11999 	drm_modeset_drop_locks(&ctx);
12000 	drm_modeset_acquire_fini(&ctx);
12001 }
12002 
12003 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
12004 {
12005 	if (IS_IRONLAKE(dev_priv)) {
12006 		u32 fdi_pll_clk =
12007 			intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
12008 
12009 		dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
12010 	} else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
12011 		dev_priv->fdi_pll_freq = 270000;
12012 	} else {
12013 		return;
12014 	}
12015 
12016 	drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
12017 }
12018 
12019 static int intel_initial_commit(struct drm_device *dev)
12020 {
12021 	struct drm_atomic_state *state = NULL;
12022 	struct drm_modeset_acquire_ctx ctx;
12023 	struct intel_crtc *crtc;
12024 	int ret = 0;
12025 
12026 	state = drm_atomic_state_alloc(dev);
12027 	if (!state)
12028 		return -ENOMEM;
12029 
12030 	drm_modeset_acquire_init(&ctx, 0);
12031 
12032 retry:
12033 	state->acquire_ctx = &ctx;
12034 
12035 	for_each_intel_crtc(dev, crtc) {
12036 		struct intel_crtc_state *crtc_state =
12037 			intel_atomic_get_crtc_state(state, crtc);
12038 
12039 		if (IS_ERR(crtc_state)) {
12040 			ret = PTR_ERR(crtc_state);
12041 			goto out;
12042 		}
12043 
12044 		if (crtc_state->hw.active) {
12045 			struct intel_encoder *encoder;
12046 
12047 			/*
12048 			 * We've not yet detected sink capabilities
12049 			 * (audio,infoframes,etc.) and thus we don't want to
12050 			 * force a full state recomputation yet. We want that to
12051 			 * happen only for the first real commit from userspace.
12052 			 * So preserve the inherited flag for the time being.
12053 			 */
12054 			crtc_state->inherited = true;
12055 
12056 			ret = drm_atomic_add_affected_planes(state, &crtc->base);
12057 			if (ret)
12058 				goto out;
12059 
12060 			/*
12061 			 * FIXME hack to force a LUT update to avoid the
12062 			 * plane update forcing the pipe gamma on without
12063 			 * having a proper LUT loaded. Remove once we
12064 			 * have readout for pipe gamma enable.
12065 			 */
12066 			crtc_state->uapi.color_mgmt_changed = true;
12067 
12068 			for_each_intel_encoder_mask(dev, encoder,
12069 						    crtc_state->uapi.encoder_mask) {
12070 				if (encoder->initial_fastset_check &&
12071 				    !encoder->initial_fastset_check(encoder, crtc_state)) {
12072 					ret = drm_atomic_add_affected_connectors(state,
12073 										 &crtc->base);
12074 					if (ret)
12075 						goto out;
12076 				}
12077 			}
12078 		}
12079 	}
12080 
12081 	ret = drm_atomic_commit(state);
12082 
12083 out:
12084 	if (ret == -EDEADLK) {
12085 		drm_atomic_state_clear(state);
12086 		drm_modeset_backoff(&ctx);
12087 		goto retry;
12088 	}
12089 
12090 	drm_atomic_state_put(state);
12091 
12092 	drm_modeset_drop_locks(&ctx);
12093 	drm_modeset_acquire_fini(&ctx);
12094 
12095 	return ret;
12096 }
12097 
12098 static void intel_mode_config_init(struct drm_i915_private *i915)
12099 {
12100 	struct drm_mode_config *mode_config = &i915->drm.mode_config;
12101 
12102 	drm_mode_config_init(&i915->drm);
12103 	INIT_LIST_HEAD(&i915->global_obj_list);
12104 
12105 	mode_config->min_width = 0;
12106 	mode_config->min_height = 0;
12107 
12108 	mode_config->preferred_depth = 24;
12109 	mode_config->prefer_shadow = 1;
12110 
12111 	mode_config->funcs = &intel_mode_funcs;
12112 
12113 	mode_config->async_page_flip = has_async_flips(i915);
12114 
12115 	/*
12116 	 * Maximum framebuffer dimensions, chosen to match
12117 	 * the maximum render engine surface size on gen4+.
12118 	 */
12119 	if (DISPLAY_VER(i915) >= 7) {
12120 		mode_config->max_width = 16384;
12121 		mode_config->max_height = 16384;
12122 	} else if (DISPLAY_VER(i915) >= 4) {
12123 		mode_config->max_width = 8192;
12124 		mode_config->max_height = 8192;
12125 	} else if (DISPLAY_VER(i915) == 3) {
12126 		mode_config->max_width = 4096;
12127 		mode_config->max_height = 4096;
12128 	} else {
12129 		mode_config->max_width = 2048;
12130 		mode_config->max_height = 2048;
12131 	}
12132 
12133 	if (IS_I845G(i915) || IS_I865G(i915)) {
12134 		mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
12135 		mode_config->cursor_height = 1023;
12136 	} else if (IS_I830(i915) || IS_I85X(i915) ||
12137 		   IS_I915G(i915) || IS_I915GM(i915)) {
12138 		mode_config->cursor_width = 64;
12139 		mode_config->cursor_height = 64;
12140 	} else {
12141 		mode_config->cursor_width = 256;
12142 		mode_config->cursor_height = 256;
12143 	}
12144 }
12145 
12146 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
12147 {
12148 	intel_atomic_global_obj_cleanup(i915);
12149 	drm_mode_config_cleanup(&i915->drm);
12150 }
12151 
12152 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
12153 {
12154 	if (plane_config->fb) {
12155 		struct drm_framebuffer *fb = &plane_config->fb->base;
12156 
12157 		/* We may only have the stub and not a full framebuffer */
12158 		if (drm_framebuffer_read_refcount(fb))
12159 			drm_framebuffer_put(fb);
12160 		else
12161 			kfree(fb);
12162 	}
12163 
12164 	if (plane_config->vma)
12165 		i915_vma_put(plane_config->vma);
12166 }
12167 
12168 /* part #1: call before irq install */
12169 int intel_modeset_init_noirq(struct drm_i915_private *i915)
12170 {
12171 	int ret;
12172 
12173 	if (i915_inject_probe_failure(i915))
12174 		return -ENODEV;
12175 
12176 	if (HAS_DISPLAY(i915)) {
12177 		ret = drm_vblank_init(&i915->drm,
12178 				      INTEL_NUM_PIPES(i915));
12179 		if (ret)
12180 			return ret;
12181 	}
12182 
12183 	intel_bios_init(i915);
12184 
12185 	ret = intel_vga_register(i915);
12186 	if (ret)
12187 		goto cleanup_bios;
12188 
12189 	/* FIXME: completely on the wrong abstraction layer */
12190 	intel_power_domains_init_hw(i915, false);
12191 
12192 	if (!HAS_DISPLAY(i915))
12193 		return 0;
12194 
12195 	intel_csr_ucode_init(i915);
12196 
12197 	i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
12198 	i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
12199 					WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
12200 
12201 	i915->framestart_delay = 1; /* 1-4 */
12202 
12203 	intel_mode_config_init(i915);
12204 
12205 	ret = intel_cdclk_init(i915);
12206 	if (ret)
12207 		goto cleanup_vga_client_pw_domain_csr;
12208 
12209 	ret = intel_dbuf_init(i915);
12210 	if (ret)
12211 		goto cleanup_vga_client_pw_domain_csr;
12212 
12213 	ret = intel_bw_init(i915);
12214 	if (ret)
12215 		goto cleanup_vga_client_pw_domain_csr;
12216 
12217 	init_llist_head(&i915->atomic_helper.free_list);
12218 	INIT_WORK(&i915->atomic_helper.free_work,
12219 		  intel_atomic_helper_free_state_worker);
12220 
12221 	intel_init_quirks(i915);
12222 
12223 	intel_fbc_init(i915);
12224 
12225 	return 0;
12226 
12227 cleanup_vga_client_pw_domain_csr:
12228 	intel_csr_ucode_fini(i915);
12229 	intel_power_domains_driver_remove(i915);
12230 	intel_vga_unregister(i915);
12231 cleanup_bios:
12232 	intel_bios_driver_remove(i915);
12233 
12234 	return ret;
12235 }
12236 
12237 /* part #2: call after irq install, but before gem init */
12238 int intel_modeset_init_nogem(struct drm_i915_private *i915)
12239 {
12240 	struct drm_device *dev = &i915->drm;
12241 	enum pipe pipe;
12242 	struct intel_crtc *crtc;
12243 	int ret;
12244 
12245 	if (!HAS_DISPLAY(i915))
12246 		return 0;
12247 
12248 	intel_init_pm(i915);
12249 
12250 	intel_panel_sanitize_ssc(i915);
12251 
12252 	intel_pps_setup(i915);
12253 
12254 	intel_gmbus_setup(i915);
12255 
12256 	drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
12257 		    INTEL_NUM_PIPES(i915),
12258 		    INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
12259 
12260 	for_each_pipe(i915, pipe) {
12261 		ret = intel_crtc_init(i915, pipe);
12262 		if (ret) {
12263 			intel_mode_config_cleanup(i915);
12264 			return ret;
12265 		}
12266 	}
12267 
12268 	intel_plane_possible_crtcs_init(i915);
12269 	intel_shared_dpll_init(dev);
12270 	intel_update_fdi_pll_freq(i915);
12271 
12272 	intel_update_czclk(i915);
12273 	intel_modeset_init_hw(i915);
12274 	intel_dpll_update_ref_clks(i915);
12275 
12276 	intel_hdcp_component_init(i915);
12277 
12278 	if (i915->max_cdclk_freq == 0)
12279 		intel_update_max_cdclk(i915);
12280 
12281 	/*
12282 	 * If the platform has HTI, we need to find out whether it has reserved
12283 	 * any display resources before we create our display outputs.
12284 	 */
12285 	if (INTEL_INFO(i915)->display.has_hti)
12286 		i915->hti_state = intel_de_read(i915, HDPORT_STATE);
12287 
12288 	/* Just disable it once at startup */
12289 	intel_vga_disable(i915);
12290 	intel_setup_outputs(i915);
12291 
12292 	drm_modeset_lock_all(dev);
12293 	intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
12294 	drm_modeset_unlock_all(dev);
12295 
12296 	for_each_intel_crtc(dev, crtc) {
12297 		struct intel_initial_plane_config plane_config = {};
12298 
12299 		if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
12300 			continue;
12301 
12302 		/*
12303 		 * Note that reserving the BIOS fb up front prevents us
12304 		 * from stuffing other stolen allocations like the ring
12305 		 * on top.  This prevents some ugliness at boot time, and
12306 		 * can even allow for smooth boot transitions if the BIOS
12307 		 * fb is large enough for the active pipe configuration.
12308 		 */
12309 		i915->display.get_initial_plane_config(crtc, &plane_config);
12310 
12311 		/*
12312 		 * If the fb is shared between multiple heads, we'll
12313 		 * just get the first one.
12314 		 */
12315 		intel_find_initial_plane_obj(crtc, &plane_config);
12316 
12317 		plane_config_fini(&plane_config);
12318 	}
12319 
12320 	/*
12321 	 * Make sure hardware watermarks really match the state we read out.
12322 	 * Note that we need to do this after reconstructing the BIOS fb's
12323 	 * since the watermark calculation done here will use pstate->fb.
12324 	 */
12325 	if (!HAS_GMCH(i915))
12326 		sanitize_watermarks(i915);
12327 
12328 	return 0;
12329 }
12330 
12331 /* part #3: call after gem init */
12332 int intel_modeset_init(struct drm_i915_private *i915)
12333 {
12334 	int ret;
12335 
12336 	if (!HAS_DISPLAY(i915))
12337 		return 0;
12338 
12339 	/*
12340 	 * Force all active planes to recompute their states. So that on
12341 	 * mode_setcrtc after probe, all the intel_plane_state variables
12342 	 * are already calculated and there is no assert_plane warnings
12343 	 * during bootup.
12344 	 */
12345 	ret = intel_initial_commit(&i915->drm);
12346 	if (ret)
12347 		drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
12348 
12349 	intel_overlay_setup(i915);
12350 
12351 	ret = intel_fbdev_init(&i915->drm);
12352 	if (ret)
12353 		return ret;
12354 
12355 	/* Only enable hotplug handling once the fbdev is fully set up. */
12356 	intel_hpd_init(i915);
12357 	intel_hpd_poll_disable(i915);
12358 
12359 	intel_init_ipc(i915);
12360 
12361 	return 0;
12362 }
12363 
12364 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
12365 {
12366 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12367 	/* 640x480@60Hz, ~25175 kHz */
12368 	struct dpll clock = {
12369 		.m1 = 18,
12370 		.m2 = 7,
12371 		.p1 = 13,
12372 		.p2 = 4,
12373 		.n = 2,
12374 	};
12375 	u32 dpll, fp;
12376 	int i;
12377 
12378 	drm_WARN_ON(&dev_priv->drm,
12379 		    i9xx_calc_dpll_params(48000, &clock) != 25154);
12380 
12381 	drm_dbg_kms(&dev_priv->drm,
12382 		    "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
12383 		    pipe_name(pipe), clock.vco, clock.dot);
12384 
12385 	fp = i9xx_dpll_compute_fp(&clock);
12386 	dpll = DPLL_DVO_2X_MODE |
12387 		DPLL_VGA_MODE_DIS |
12388 		((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
12389 		PLL_P2_DIVIDE_BY_4 |
12390 		PLL_REF_INPUT_DREFCLK |
12391 		DPLL_VCO_ENABLE;
12392 
12393 	intel_de_write(dev_priv, FP0(pipe), fp);
12394 	intel_de_write(dev_priv, FP1(pipe), fp);
12395 
12396 	intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
12397 	intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
12398 	intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
12399 	intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
12400 	intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
12401 	intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
12402 	intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
12403 
12404 	/*
12405 	 * Apparently we need to have VGA mode enabled prior to changing
12406 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
12407 	 * dividers, even though the register value does change.
12408 	 */
12409 	intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
12410 	intel_de_write(dev_priv, DPLL(pipe), dpll);
12411 
12412 	/* Wait for the clocks to stabilize. */
12413 	intel_de_posting_read(dev_priv, DPLL(pipe));
12414 	udelay(150);
12415 
12416 	/* The pixel multiplier can only be updated once the
12417 	 * DPLL is enabled and the clocks are stable.
12418 	 *
12419 	 * So write it again.
12420 	 */
12421 	intel_de_write(dev_priv, DPLL(pipe), dpll);
12422 
12423 	/* We do this three times for luck */
12424 	for (i = 0; i < 3 ; i++) {
12425 		intel_de_write(dev_priv, DPLL(pipe), dpll);
12426 		intel_de_posting_read(dev_priv, DPLL(pipe));
12427 		udelay(150); /* wait for warmup */
12428 	}
12429 
12430 	intel_de_write(dev_priv, PIPECONF(pipe),
12431 		       PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
12432 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
12433 
12434 	intel_wait_for_pipe_scanline_moving(crtc);
12435 }
12436 
12437 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
12438 {
12439 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12440 
12441 	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
12442 		    pipe_name(pipe));
12443 
12444 	drm_WARN_ON(&dev_priv->drm,
12445 		    intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
12446 		    DISPLAY_PLANE_ENABLE);
12447 	drm_WARN_ON(&dev_priv->drm,
12448 		    intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
12449 		    DISPLAY_PLANE_ENABLE);
12450 	drm_WARN_ON(&dev_priv->drm,
12451 		    intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
12452 		    DISPLAY_PLANE_ENABLE);
12453 	drm_WARN_ON(&dev_priv->drm,
12454 		    intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
12455 	drm_WARN_ON(&dev_priv->drm,
12456 		    intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
12457 
12458 	intel_de_write(dev_priv, PIPECONF(pipe), 0);
12459 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
12460 
12461 	intel_wait_for_pipe_scanline_stopped(crtc);
12462 
12463 	intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
12464 	intel_de_posting_read(dev_priv, DPLL(pipe));
12465 }
12466 
12467 static void
12468 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
12469 {
12470 	struct intel_crtc *crtc;
12471 
12472 	if (DISPLAY_VER(dev_priv) >= 4)
12473 		return;
12474 
12475 	for_each_intel_crtc(&dev_priv->drm, crtc) {
12476 		struct intel_plane *plane =
12477 			to_intel_plane(crtc->base.primary);
12478 		struct intel_crtc *plane_crtc;
12479 		enum pipe pipe;
12480 
12481 		if (!plane->get_hw_state(plane, &pipe))
12482 			continue;
12483 
12484 		if (pipe == crtc->pipe)
12485 			continue;
12486 
12487 		drm_dbg_kms(&dev_priv->drm,
12488 			    "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
12489 			    plane->base.base.id, plane->base.name);
12490 
12491 		plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12492 		intel_plane_disable_noatomic(plane_crtc, plane);
12493 	}
12494 }
12495 
12496 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
12497 {
12498 	struct drm_device *dev = crtc->base.dev;
12499 	struct intel_encoder *encoder;
12500 
12501 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
12502 		return true;
12503 
12504 	return false;
12505 }
12506 
12507 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
12508 {
12509 	struct drm_device *dev = encoder->base.dev;
12510 	struct intel_connector *connector;
12511 
12512 	for_each_connector_on_encoder(dev, &encoder->base, connector)
12513 		return connector;
12514 
12515 	return NULL;
12516 }
12517 
12518 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
12519 			      enum pipe pch_transcoder)
12520 {
12521 	return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
12522 		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
12523 }
12524 
12525 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
12526 {
12527 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12528 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12529 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
12530 
12531 	if (DISPLAY_VER(dev_priv) >= 9 ||
12532 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12533 		i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
12534 		u32 val;
12535 
12536 		if (transcoder_is_dsi(cpu_transcoder))
12537 			return;
12538 
12539 		val = intel_de_read(dev_priv, reg);
12540 		val &= ~HSW_FRAME_START_DELAY_MASK;
12541 		val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12542 		intel_de_write(dev_priv, reg, val);
12543 	} else {
12544 		i915_reg_t reg = PIPECONF(cpu_transcoder);
12545 		u32 val;
12546 
12547 		val = intel_de_read(dev_priv, reg);
12548 		val &= ~PIPECONF_FRAME_START_DELAY_MASK;
12549 		val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12550 		intel_de_write(dev_priv, reg, val);
12551 	}
12552 
12553 	if (!crtc_state->has_pch_encoder)
12554 		return;
12555 
12556 	if (HAS_PCH_IBX(dev_priv)) {
12557 		i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
12558 		u32 val;
12559 
12560 		val = intel_de_read(dev_priv, reg);
12561 		val &= ~TRANS_FRAME_START_DELAY_MASK;
12562 		val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12563 		intel_de_write(dev_priv, reg, val);
12564 	} else {
12565 		enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
12566 		i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
12567 		u32 val;
12568 
12569 		val = intel_de_read(dev_priv, reg);
12570 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
12571 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12572 		intel_de_write(dev_priv, reg, val);
12573 	}
12574 }
12575 
12576 static void intel_sanitize_crtc(struct intel_crtc *crtc,
12577 				struct drm_modeset_acquire_ctx *ctx)
12578 {
12579 	struct drm_device *dev = crtc->base.dev;
12580 	struct drm_i915_private *dev_priv = to_i915(dev);
12581 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
12582 
12583 	if (crtc_state->hw.active) {
12584 		struct intel_plane *plane;
12585 
12586 		/* Clear any frame start delays used for debugging left by the BIOS */
12587 		intel_sanitize_frame_start_delay(crtc_state);
12588 
12589 		/* Disable everything but the primary plane */
12590 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
12591 			const struct intel_plane_state *plane_state =
12592 				to_intel_plane_state(plane->base.state);
12593 
12594 			if (plane_state->uapi.visible &&
12595 			    plane->base.type != DRM_PLANE_TYPE_PRIMARY)
12596 				intel_plane_disable_noatomic(crtc, plane);
12597 		}
12598 
12599 		/*
12600 		 * Disable any background color set by the BIOS, but enable the
12601 		 * gamma and CSC to match how we program our planes.
12602 		 */
12603 		if (DISPLAY_VER(dev_priv) >= 9)
12604 			intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
12605 				       SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
12606 	}
12607 
12608 	/* Adjust the state of the output pipe according to whether we
12609 	 * have active connectors/encoders. */
12610 	if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
12611 	    !crtc_state->bigjoiner_slave)
12612 		intel_crtc_disable_noatomic(crtc, ctx);
12613 
12614 	if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
12615 		/*
12616 		 * We start out with underrun reporting disabled to avoid races.
12617 		 * For correct bookkeeping mark this on active crtcs.
12618 		 *
12619 		 * Also on gmch platforms we dont have any hardware bits to
12620 		 * disable the underrun reporting. Which means we need to start
12621 		 * out with underrun reporting disabled also on inactive pipes,
12622 		 * since otherwise we'll complain about the garbage we read when
12623 		 * e.g. coming up after runtime pm.
12624 		 *
12625 		 * No protection against concurrent access is required - at
12626 		 * worst a fifo underrun happens which also sets this to false.
12627 		 */
12628 		crtc->cpu_fifo_underrun_disabled = true;
12629 		/*
12630 		 * We track the PCH trancoder underrun reporting state
12631 		 * within the crtc. With crtc for pipe A housing the underrun
12632 		 * reporting state for PCH transcoder A, crtc for pipe B housing
12633 		 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
12634 		 * and marking underrun reporting as disabled for the non-existing
12635 		 * PCH transcoders B and C would prevent enabling the south
12636 		 * error interrupt (see cpt_can_enable_serr_int()).
12637 		 */
12638 		if (has_pch_trancoder(dev_priv, crtc->pipe))
12639 			crtc->pch_fifo_underrun_disabled = true;
12640 	}
12641 }
12642 
12643 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
12644 {
12645 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12646 
12647 	/*
12648 	 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
12649 	 * the hardware when a high res displays plugged in. DPLL P
12650 	 * divider is zero, and the pipe timings are bonkers. We'll
12651 	 * try to disable everything in that case.
12652 	 *
12653 	 * FIXME would be nice to be able to sanitize this state
12654 	 * without several WARNs, but for now let's take the easy
12655 	 * road.
12656 	 */
12657 	return IS_SANDYBRIDGE(dev_priv) &&
12658 		crtc_state->hw.active &&
12659 		crtc_state->shared_dpll &&
12660 		crtc_state->port_clock == 0;
12661 }
12662 
12663 static void intel_sanitize_encoder(struct intel_encoder *encoder)
12664 {
12665 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
12666 	struct intel_connector *connector;
12667 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
12668 	struct intel_crtc_state *crtc_state = crtc ?
12669 		to_intel_crtc_state(crtc->base.state) : NULL;
12670 
12671 	/* We need to check both for a crtc link (meaning that the
12672 	 * encoder is active and trying to read from a pipe) and the
12673 	 * pipe itself being active. */
12674 	bool has_active_crtc = crtc_state &&
12675 		crtc_state->hw.active;
12676 
12677 	if (crtc_state && has_bogus_dpll_config(crtc_state)) {
12678 		drm_dbg_kms(&dev_priv->drm,
12679 			    "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
12680 			    pipe_name(crtc->pipe));
12681 		has_active_crtc = false;
12682 	}
12683 
12684 	connector = intel_encoder_find_connector(encoder);
12685 	if (connector && !has_active_crtc) {
12686 		drm_dbg_kms(&dev_priv->drm,
12687 			    "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
12688 			    encoder->base.base.id,
12689 			    encoder->base.name);
12690 
12691 		/* Connector is active, but has no active pipe. This is
12692 		 * fallout from our resume register restoring. Disable
12693 		 * the encoder manually again. */
12694 		if (crtc_state) {
12695 			struct drm_encoder *best_encoder;
12696 
12697 			drm_dbg_kms(&dev_priv->drm,
12698 				    "[ENCODER:%d:%s] manually disabled\n",
12699 				    encoder->base.base.id,
12700 				    encoder->base.name);
12701 
12702 			/* avoid oopsing in case the hooks consult best_encoder */
12703 			best_encoder = connector->base.state->best_encoder;
12704 			connector->base.state->best_encoder = &encoder->base;
12705 
12706 			/* FIXME NULL atomic state passed! */
12707 			if (encoder->disable)
12708 				encoder->disable(NULL, encoder, crtc_state,
12709 						 connector->base.state);
12710 			if (encoder->post_disable)
12711 				encoder->post_disable(NULL, encoder, crtc_state,
12712 						      connector->base.state);
12713 
12714 			connector->base.state->best_encoder = best_encoder;
12715 		}
12716 		encoder->base.crtc = NULL;
12717 
12718 		/* Inconsistent output/port/pipe state happens presumably due to
12719 		 * a bug in one of the get_hw_state functions. Or someplace else
12720 		 * in our code, like the register restore mess on resume. Clamp
12721 		 * things to off as a safer default. */
12722 
12723 		connector->base.dpms = DRM_MODE_DPMS_OFF;
12724 		connector->base.encoder = NULL;
12725 	}
12726 
12727 	/* notify opregion of the sanitized encoder state */
12728 	intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
12729 
12730 	if (HAS_DDI(dev_priv))
12731 		intel_ddi_sanitize_encoder_pll_mapping(encoder);
12732 }
12733 
12734 /* FIXME read out full plane state for all planes */
12735 static void readout_plane_state(struct drm_i915_private *dev_priv)
12736 {
12737 	struct intel_plane *plane;
12738 	struct intel_crtc *crtc;
12739 
12740 	for_each_intel_plane(&dev_priv->drm, plane) {
12741 		struct intel_plane_state *plane_state =
12742 			to_intel_plane_state(plane->base.state);
12743 		struct intel_crtc_state *crtc_state;
12744 		enum pipe pipe = PIPE_A;
12745 		bool visible;
12746 
12747 		visible = plane->get_hw_state(plane, &pipe);
12748 
12749 		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12750 		crtc_state = to_intel_crtc_state(crtc->base.state);
12751 
12752 		intel_set_plane_visible(crtc_state, plane_state, visible);
12753 
12754 		drm_dbg_kms(&dev_priv->drm,
12755 			    "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
12756 			    plane->base.base.id, plane->base.name,
12757 			    enableddisabled(visible), pipe_name(pipe));
12758 	}
12759 
12760 	for_each_intel_crtc(&dev_priv->drm, crtc) {
12761 		struct intel_crtc_state *crtc_state =
12762 			to_intel_crtc_state(crtc->base.state);
12763 
12764 		fixup_plane_bitmasks(crtc_state);
12765 	}
12766 }
12767 
12768 static void intel_modeset_readout_hw_state(struct drm_device *dev)
12769 {
12770 	struct drm_i915_private *dev_priv = to_i915(dev);
12771 	struct intel_cdclk_state *cdclk_state =
12772 		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
12773 	struct intel_dbuf_state *dbuf_state =
12774 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
12775 	enum pipe pipe;
12776 	struct intel_crtc *crtc;
12777 	struct intel_encoder *encoder;
12778 	struct intel_connector *connector;
12779 	struct drm_connector_list_iter conn_iter;
12780 	u8 active_pipes = 0;
12781 
12782 	for_each_intel_crtc(dev, crtc) {
12783 		struct intel_crtc_state *crtc_state =
12784 			to_intel_crtc_state(crtc->base.state);
12785 
12786 		__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
12787 		intel_crtc_free_hw_state(crtc_state);
12788 		intel_crtc_state_reset(crtc_state, crtc);
12789 
12790 		intel_crtc_get_pipe_config(crtc_state);
12791 
12792 		crtc_state->hw.enable = crtc_state->hw.active;
12793 
12794 		crtc->base.enabled = crtc_state->hw.enable;
12795 		crtc->active = crtc_state->hw.active;
12796 
12797 		if (crtc_state->hw.active)
12798 			active_pipes |= BIT(crtc->pipe);
12799 
12800 		drm_dbg_kms(&dev_priv->drm,
12801 			    "[CRTC:%d:%s] hw state readout: %s\n",
12802 			    crtc->base.base.id, crtc->base.name,
12803 			    enableddisabled(crtc_state->hw.active));
12804 	}
12805 
12806 	dev_priv->active_pipes = cdclk_state->active_pipes =
12807 		dbuf_state->active_pipes = active_pipes;
12808 
12809 	readout_plane_state(dev_priv);
12810 
12811 	for_each_intel_encoder(dev, encoder) {
12812 		pipe = 0;
12813 
12814 		if (encoder->get_hw_state(encoder, &pipe)) {
12815 			struct intel_crtc_state *crtc_state;
12816 
12817 			crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12818 			crtc_state = to_intel_crtc_state(crtc->base.state);
12819 
12820 			encoder->base.crtc = &crtc->base;
12821 			intel_encoder_get_config(encoder, crtc_state);
12822 			if (encoder->sync_state)
12823 				encoder->sync_state(encoder, crtc_state);
12824 
12825 			/* read out to slave crtc as well for bigjoiner */
12826 			if (crtc_state->bigjoiner) {
12827 				/* encoder should read be linked to bigjoiner master */
12828 				WARN_ON(crtc_state->bigjoiner_slave);
12829 
12830 				crtc = crtc_state->bigjoiner_linked_crtc;
12831 				crtc_state = to_intel_crtc_state(crtc->base.state);
12832 				intel_encoder_get_config(encoder, crtc_state);
12833 			}
12834 		} else {
12835 			encoder->base.crtc = NULL;
12836 		}
12837 
12838 		drm_dbg_kms(&dev_priv->drm,
12839 			    "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
12840 			    encoder->base.base.id, encoder->base.name,
12841 			    enableddisabled(encoder->base.crtc),
12842 			    pipe_name(pipe));
12843 	}
12844 
12845 	intel_dpll_readout_hw_state(dev_priv);
12846 
12847 	drm_connector_list_iter_begin(dev, &conn_iter);
12848 	for_each_intel_connector_iter(connector, &conn_iter) {
12849 		if (connector->get_hw_state(connector)) {
12850 			struct intel_crtc_state *crtc_state;
12851 			struct intel_crtc *crtc;
12852 
12853 			connector->base.dpms = DRM_MODE_DPMS_ON;
12854 
12855 			encoder = intel_attached_encoder(connector);
12856 			connector->base.encoder = &encoder->base;
12857 
12858 			crtc = to_intel_crtc(encoder->base.crtc);
12859 			crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
12860 
12861 			if (crtc_state && crtc_state->hw.active) {
12862 				/*
12863 				 * This has to be done during hardware readout
12864 				 * because anything calling .crtc_disable may
12865 				 * rely on the connector_mask being accurate.
12866 				 */
12867 				crtc_state->uapi.connector_mask |=
12868 					drm_connector_mask(&connector->base);
12869 				crtc_state->uapi.encoder_mask |=
12870 					drm_encoder_mask(&encoder->base);
12871 			}
12872 		} else {
12873 			connector->base.dpms = DRM_MODE_DPMS_OFF;
12874 			connector->base.encoder = NULL;
12875 		}
12876 		drm_dbg_kms(&dev_priv->drm,
12877 			    "[CONNECTOR:%d:%s] hw state readout: %s\n",
12878 			    connector->base.base.id, connector->base.name,
12879 			    enableddisabled(connector->base.encoder));
12880 	}
12881 	drm_connector_list_iter_end(&conn_iter);
12882 
12883 	for_each_intel_crtc(dev, crtc) {
12884 		struct intel_bw_state *bw_state =
12885 			to_intel_bw_state(dev_priv->bw_obj.state);
12886 		struct intel_crtc_state *crtc_state =
12887 			to_intel_crtc_state(crtc->base.state);
12888 		struct intel_plane *plane;
12889 		int min_cdclk = 0;
12890 
12891 		if (crtc_state->bigjoiner_slave)
12892 			continue;
12893 
12894 		if (crtc_state->hw.active) {
12895 			/*
12896 			 * The initial mode needs to be set in order to keep
12897 			 * the atomic core happy. It wants a valid mode if the
12898 			 * crtc's enabled, so we do the above call.
12899 			 *
12900 			 * But we don't set all the derived state fully, hence
12901 			 * set a flag to indicate that a full recalculation is
12902 			 * needed on the next commit.
12903 			 */
12904 			crtc_state->inherited = true;
12905 
12906 			intel_crtc_update_active_timings(crtc_state);
12907 
12908 			intel_crtc_copy_hw_to_uapi_state(crtc_state);
12909 		}
12910 
12911 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
12912 			const struct intel_plane_state *plane_state =
12913 				to_intel_plane_state(plane->base.state);
12914 
12915 			/*
12916 			 * FIXME don't have the fb yet, so can't
12917 			 * use intel_plane_data_rate() :(
12918 			 */
12919 			if (plane_state->uapi.visible)
12920 				crtc_state->data_rate[plane->id] =
12921 					4 * crtc_state->pixel_rate;
12922 			/*
12923 			 * FIXME don't have the fb yet, so can't
12924 			 * use plane->min_cdclk() :(
12925 			 */
12926 			if (plane_state->uapi.visible && plane->min_cdclk) {
12927 				if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
12928 					crtc_state->min_cdclk[plane->id] =
12929 						DIV_ROUND_UP(crtc_state->pixel_rate, 2);
12930 				else
12931 					crtc_state->min_cdclk[plane->id] =
12932 						crtc_state->pixel_rate;
12933 			}
12934 			drm_dbg_kms(&dev_priv->drm,
12935 				    "[PLANE:%d:%s] min_cdclk %d kHz\n",
12936 				    plane->base.base.id, plane->base.name,
12937 				    crtc_state->min_cdclk[plane->id]);
12938 		}
12939 
12940 		if (crtc_state->hw.active) {
12941 			min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
12942 			if (drm_WARN_ON(dev, min_cdclk < 0))
12943 				min_cdclk = 0;
12944 		}
12945 
12946 		cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
12947 		cdclk_state->min_voltage_level[crtc->pipe] =
12948 			crtc_state->min_voltage_level;
12949 
12950 		intel_bw_crtc_update(bw_state, crtc_state);
12951 
12952 		intel_pipe_config_sanity_check(dev_priv, crtc_state);
12953 
12954 		/* discard our incomplete slave state, copy it from master */
12955 		if (crtc_state->bigjoiner && crtc_state->hw.active) {
12956 			struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
12957 			struct intel_crtc_state *slave_crtc_state =
12958 				to_intel_crtc_state(slave->base.state);
12959 
12960 			copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
12961 			slave->base.mode = crtc->base.mode;
12962 
12963 			cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
12964 			cdclk_state->min_voltage_level[slave->pipe] =
12965 				crtc_state->min_voltage_level;
12966 
12967 			for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
12968 				const struct intel_plane_state *plane_state =
12969 					to_intel_plane_state(plane->base.state);
12970 
12971 				/*
12972 				 * FIXME don't have the fb yet, so can't
12973 				 * use intel_plane_data_rate() :(
12974 				 */
12975 				if (plane_state->uapi.visible)
12976 					crtc_state->data_rate[plane->id] =
12977 						4 * crtc_state->pixel_rate;
12978 				else
12979 					crtc_state->data_rate[plane->id] = 0;
12980 			}
12981 
12982 			intel_bw_crtc_update(bw_state, slave_crtc_state);
12983 			drm_calc_timestamping_constants(&slave->base,
12984 							&slave_crtc_state->hw.adjusted_mode);
12985 		}
12986 	}
12987 }
12988 
12989 static void
12990 get_encoder_power_domains(struct drm_i915_private *dev_priv)
12991 {
12992 	struct intel_encoder *encoder;
12993 
12994 	for_each_intel_encoder(&dev_priv->drm, encoder) {
12995 		struct intel_crtc_state *crtc_state;
12996 
12997 		if (!encoder->get_power_domains)
12998 			continue;
12999 
13000 		/*
13001 		 * MST-primary and inactive encoders don't have a crtc state
13002 		 * and neither of these require any power domain references.
13003 		 */
13004 		if (!encoder->base.crtc)
13005 			continue;
13006 
13007 		crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
13008 		encoder->get_power_domains(encoder, crtc_state);
13009 	}
13010 }
13011 
13012 static void intel_early_display_was(struct drm_i915_private *dev_priv)
13013 {
13014 	/*
13015 	 * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
13016 	 * Also known as Wa_14010480278.
13017 	 */
13018 	if (IS_DISPLAY_VER(dev_priv, 10, 12))
13019 		intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
13020 			       intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
13021 
13022 	if (IS_HASWELL(dev_priv)) {
13023 		/*
13024 		 * WaRsPkgCStateDisplayPMReq:hsw
13025 		 * System hang if this isn't done before disabling all planes!
13026 		 */
13027 		intel_de_write(dev_priv, CHICKEN_PAR1_1,
13028 			       intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
13029 	}
13030 
13031 	if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
13032 		/* Display WA #1142:kbl,cfl,cml */
13033 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
13034 			     KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
13035 		intel_de_rmw(dev_priv, CHICKEN_MISC_2,
13036 			     KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
13037 			     KBL_ARB_FILL_SPARE_14);
13038 	}
13039 }
13040 
13041 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
13042 				       enum port port, i915_reg_t hdmi_reg)
13043 {
13044 	u32 val = intel_de_read(dev_priv, hdmi_reg);
13045 
13046 	if (val & SDVO_ENABLE ||
13047 	    (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
13048 		return;
13049 
13050 	drm_dbg_kms(&dev_priv->drm,
13051 		    "Sanitizing transcoder select for HDMI %c\n",
13052 		    port_name(port));
13053 
13054 	val &= ~SDVO_PIPE_SEL_MASK;
13055 	val |= SDVO_PIPE_SEL(PIPE_A);
13056 
13057 	intel_de_write(dev_priv, hdmi_reg, val);
13058 }
13059 
13060 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
13061 				     enum port port, i915_reg_t dp_reg)
13062 {
13063 	u32 val = intel_de_read(dev_priv, dp_reg);
13064 
13065 	if (val & DP_PORT_EN ||
13066 	    (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
13067 		return;
13068 
13069 	drm_dbg_kms(&dev_priv->drm,
13070 		    "Sanitizing transcoder select for DP %c\n",
13071 		    port_name(port));
13072 
13073 	val &= ~DP_PIPE_SEL_MASK;
13074 	val |= DP_PIPE_SEL(PIPE_A);
13075 
13076 	intel_de_write(dev_priv, dp_reg, val);
13077 }
13078 
13079 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
13080 {
13081 	/*
13082 	 * The BIOS may select transcoder B on some of the PCH
13083 	 * ports even it doesn't enable the port. This would trip
13084 	 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
13085 	 * Sanitize the transcoder select bits to prevent that. We
13086 	 * assume that the BIOS never actually enabled the port,
13087 	 * because if it did we'd actually have to toggle the port
13088 	 * on and back off to make the transcoder A select stick
13089 	 * (see. intel_dp_link_down(), intel_disable_hdmi(),
13090 	 * intel_disable_sdvo()).
13091 	 */
13092 	ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
13093 	ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
13094 	ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
13095 
13096 	/* PCH SDVOB multiplex with HDMIB */
13097 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
13098 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
13099 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
13100 }
13101 
13102 /* Scan out the current hw modeset state,
13103  * and sanitizes it to the current state
13104  */
13105 static void
13106 intel_modeset_setup_hw_state(struct drm_device *dev,
13107 			     struct drm_modeset_acquire_ctx *ctx)
13108 {
13109 	struct drm_i915_private *dev_priv = to_i915(dev);
13110 	struct intel_encoder *encoder;
13111 	struct intel_crtc *crtc;
13112 	intel_wakeref_t wakeref;
13113 
13114 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
13115 
13116 	intel_early_display_was(dev_priv);
13117 	intel_modeset_readout_hw_state(dev);
13118 
13119 	/* HW state is read out, now we need to sanitize this mess. */
13120 
13121 	/* Sanitize the TypeC port mode upfront, encoders depend on this */
13122 	for_each_intel_encoder(dev, encoder) {
13123 		enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
13124 
13125 		/* We need to sanitize only the MST primary port. */
13126 		if (encoder->type != INTEL_OUTPUT_DP_MST &&
13127 		    intel_phy_is_tc(dev_priv, phy))
13128 			intel_tc_port_sanitize(enc_to_dig_port(encoder));
13129 	}
13130 
13131 	get_encoder_power_domains(dev_priv);
13132 
13133 	if (HAS_PCH_IBX(dev_priv))
13134 		ibx_sanitize_pch_ports(dev_priv);
13135 
13136 	/*
13137 	 * intel_sanitize_plane_mapping() may need to do vblank
13138 	 * waits, so we need vblank interrupts restored beforehand.
13139 	 */
13140 	for_each_intel_crtc(&dev_priv->drm, crtc) {
13141 		struct intel_crtc_state *crtc_state =
13142 			to_intel_crtc_state(crtc->base.state);
13143 
13144 		drm_crtc_vblank_reset(&crtc->base);
13145 
13146 		if (crtc_state->hw.active)
13147 			intel_crtc_vblank_on(crtc_state);
13148 	}
13149 
13150 	intel_sanitize_plane_mapping(dev_priv);
13151 
13152 	for_each_intel_encoder(dev, encoder)
13153 		intel_sanitize_encoder(encoder);
13154 
13155 	for_each_intel_crtc(&dev_priv->drm, crtc) {
13156 		struct intel_crtc_state *crtc_state =
13157 			to_intel_crtc_state(crtc->base.state);
13158 
13159 		intel_sanitize_crtc(crtc, ctx);
13160 		intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
13161 	}
13162 
13163 	intel_modeset_update_connector_atomic_state(dev);
13164 
13165 	intel_dpll_sanitize_state(dev_priv);
13166 
13167 	if (IS_G4X(dev_priv)) {
13168 		g4x_wm_get_hw_state(dev_priv);
13169 		g4x_wm_sanitize(dev_priv);
13170 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
13171 		vlv_wm_get_hw_state(dev_priv);
13172 		vlv_wm_sanitize(dev_priv);
13173 	} else if (DISPLAY_VER(dev_priv) >= 9) {
13174 		skl_wm_get_hw_state(dev_priv);
13175 	} else if (HAS_PCH_SPLIT(dev_priv)) {
13176 		ilk_wm_get_hw_state(dev_priv);
13177 	}
13178 
13179 	for_each_intel_crtc(dev, crtc) {
13180 		struct intel_crtc_state *crtc_state =
13181 			to_intel_crtc_state(crtc->base.state);
13182 		u64 put_domains;
13183 
13184 		put_domains = modeset_get_crtc_power_domains(crtc_state);
13185 		if (drm_WARN_ON(dev, put_domains))
13186 			modeset_put_crtc_power_domains(crtc, put_domains);
13187 	}
13188 
13189 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
13190 }
13191 
13192 void intel_display_resume(struct drm_device *dev)
13193 {
13194 	struct drm_i915_private *dev_priv = to_i915(dev);
13195 	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
13196 	struct drm_modeset_acquire_ctx ctx;
13197 	int ret;
13198 
13199 	if (!HAS_DISPLAY(dev_priv))
13200 		return;
13201 
13202 	dev_priv->modeset_restore_state = NULL;
13203 	if (state)
13204 		state->acquire_ctx = &ctx;
13205 
13206 	drm_modeset_acquire_init(&ctx, 0);
13207 
13208 	while (1) {
13209 		ret = drm_modeset_lock_all_ctx(dev, &ctx);
13210 		if (ret != -EDEADLK)
13211 			break;
13212 
13213 		drm_modeset_backoff(&ctx);
13214 	}
13215 
13216 	if (!ret)
13217 		ret = __intel_display_resume(dev, state, &ctx);
13218 
13219 	intel_enable_ipc(dev_priv);
13220 	drm_modeset_drop_locks(&ctx);
13221 	drm_modeset_acquire_fini(&ctx);
13222 
13223 	if (ret)
13224 		drm_err(&dev_priv->drm,
13225 			"Restoring old state failed with %i\n", ret);
13226 	if (state)
13227 		drm_atomic_state_put(state);
13228 }
13229 
13230 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
13231 {
13232 	struct intel_connector *connector;
13233 	struct drm_connector_list_iter conn_iter;
13234 
13235 	/* Kill all the work that may have been queued by hpd. */
13236 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
13237 	for_each_intel_connector_iter(connector, &conn_iter) {
13238 		if (connector->modeset_retry_work.func)
13239 			cancel_work_sync(&connector->modeset_retry_work);
13240 		if (connector->hdcp.shim) {
13241 			cancel_delayed_work_sync(&connector->hdcp.check_work);
13242 			cancel_work_sync(&connector->hdcp.prop_work);
13243 		}
13244 	}
13245 	drm_connector_list_iter_end(&conn_iter);
13246 }
13247 
13248 /* part #1: call before irq uninstall */
13249 void intel_modeset_driver_remove(struct drm_i915_private *i915)
13250 {
13251 	if (!HAS_DISPLAY(i915))
13252 		return;
13253 
13254 	flush_workqueue(i915->flip_wq);
13255 	flush_workqueue(i915->modeset_wq);
13256 
13257 	flush_work(&i915->atomic_helper.free_work);
13258 	drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
13259 }
13260 
13261 /* part #2: call after irq uninstall */
13262 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
13263 {
13264 	if (!HAS_DISPLAY(i915))
13265 		return;
13266 
13267 	/*
13268 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
13269 	 * poll handlers. Hence disable polling after hpd handling is shut down.
13270 	 */
13271 	intel_hpd_poll_fini(i915);
13272 
13273 	/*
13274 	 * MST topology needs to be suspended so we don't have any calls to
13275 	 * fbdev after it's finalized. MST will be destroyed later as part of
13276 	 * drm_mode_config_cleanup()
13277 	 */
13278 	intel_dp_mst_suspend(i915);
13279 
13280 	/* poll work can call into fbdev, hence clean that up afterwards */
13281 	intel_fbdev_fini(i915);
13282 
13283 	intel_unregister_dsm_handler();
13284 
13285 	intel_fbc_global_disable(i915);
13286 
13287 	/* flush any delayed tasks or pending work */
13288 	flush_scheduled_work();
13289 
13290 	intel_hdcp_component_fini(i915);
13291 
13292 	intel_mode_config_cleanup(i915);
13293 
13294 	intel_overlay_cleanup(i915);
13295 
13296 	intel_gmbus_teardown(i915);
13297 
13298 	destroy_workqueue(i915->flip_wq);
13299 	destroy_workqueue(i915->modeset_wq);
13300 
13301 	intel_fbc_cleanup_cfb(i915);
13302 }
13303 
13304 /* part #3: call after gem init */
13305 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
13306 {
13307 	intel_csr_ucode_fini(i915);
13308 
13309 	intel_power_domains_driver_remove(i915);
13310 
13311 	intel_vga_unregister(i915);
13312 
13313 	intel_bios_driver_remove(i915);
13314 }
13315 
13316 void intel_display_driver_register(struct drm_i915_private *i915)
13317 {
13318 	if (!HAS_DISPLAY(i915))
13319 		return;
13320 
13321 	intel_display_debugfs_register(i915);
13322 
13323 	/* Must be done after probing outputs */
13324 	intel_opregion_register(i915);
13325 	acpi_video_register();
13326 
13327 	intel_audio_init(i915);
13328 
13329 	/*
13330 	 * Some ports require correctly set-up hpd registers for
13331 	 * detection to work properly (leading to ghost connected
13332 	 * connector status), e.g. VGA on gm45.  Hence we can only set
13333 	 * up the initial fbdev config after hpd irqs are fully
13334 	 * enabled. We do it last so that the async config cannot run
13335 	 * before the connectors are registered.
13336 	 */
13337 	intel_fbdev_initial_config_async(&i915->drm);
13338 
13339 	/*
13340 	 * We need to coordinate the hotplugs with the asynchronous
13341 	 * fbdev configuration, for which we use the
13342 	 * fbdev->async_cookie.
13343 	 */
13344 	drm_kms_helper_poll_init(&i915->drm);
13345 }
13346 
13347 void intel_display_driver_unregister(struct drm_i915_private *i915)
13348 {
13349 	if (!HAS_DISPLAY(i915))
13350 		return;
13351 
13352 	intel_fbdev_unregister(i915);
13353 	intel_audio_deinit(i915);
13354 
13355 	/*
13356 	 * After flushing the fbdev (incl. a late async config which
13357 	 * will have delayed queuing of a hotplug event), then flush
13358 	 * the hotplug events.
13359 	 */
13360 	drm_kms_helper_poll_fini(&i915->drm);
13361 	drm_atomic_helper_shutdown(&i915->drm);
13362 
13363 	acpi_video_unregister();
13364 	intel_opregion_unregister(i915);
13365 }
13366