xref: /openbmc/linux/drivers/gpu/drm/i915/display/skl_watermark.c (revision 25ebbc57ca56df3cf9149e9da6b1d3169c8487db)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include <drm/drm_blend.h>
7 
8 #include "i915_drv.h"
9 #include "i915_fixed.h"
10 #include "i915_reg.h"
11 #include "i9xx_wm.h"
12 #include "intel_atomic.h"
13 #include "intel_atomic_plane.h"
14 #include "intel_bw.h"
15 #include "intel_de.h"
16 #include "intel_display.h"
17 #include "intel_display_power.h"
18 #include "intel_display_types.h"
19 #include "intel_fb.h"
20 #include "intel_pcode.h"
21 #include "intel_wm.h"
22 #include "skl_watermark.h"
23 
24 static void skl_sagv_disable(struct drm_i915_private *i915);
25 
26 /* Stores plane specific WM parameters */
27 struct skl_wm_params {
28 	bool x_tiled, y_tiled;
29 	bool rc_surface;
30 	bool is_planar;
31 	u32 width;
32 	u8 cpp;
33 	u32 plane_pixel_rate;
34 	u32 y_min_scanlines;
35 	u32 plane_bytes_per_line;
36 	uint_fixed_16_16_t plane_blocks_per_line;
37 	uint_fixed_16_16_t y_tile_minimum;
38 	u32 linetime_us;
39 	u32 dbuf_block_size;
40 };
41 
42 u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915)
43 {
44 	u8 enabled_slices = 0;
45 	enum dbuf_slice slice;
46 
47 	for_each_dbuf_slice(i915, slice) {
48 		if (intel_de_read(i915, DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
49 			enabled_slices |= BIT(slice);
50 	}
51 
52 	return enabled_slices;
53 }
54 
55 /*
56  * FIXME: We still don't have the proper code detect if we need to apply the WA,
57  * so assume we'll always need it in order to avoid underruns.
58  */
59 static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
60 {
61 	return DISPLAY_VER(i915) == 9;
62 }
63 
64 static bool
65 intel_has_sagv(struct drm_i915_private *i915)
66 {
67 	return HAS_SAGV(i915) &&
68 		i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED;
69 }
70 
71 static u32
72 intel_sagv_block_time(struct drm_i915_private *i915)
73 {
74 	if (DISPLAY_VER(i915) >= 14) {
75 		u32 val;
76 
77 		val = intel_de_read(i915, MTL_LATENCY_SAGV);
78 
79 		return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val);
80 	} else if (DISPLAY_VER(i915) >= 12) {
81 		u32 val = 0;
82 		int ret;
83 
84 		ret = snb_pcode_read(&i915->uncore,
85 				     GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
86 				     &val, NULL);
87 		if (ret) {
88 			drm_dbg_kms(&i915->drm, "Couldn't read SAGV block time!\n");
89 			return 0;
90 		}
91 
92 		return val;
93 	} else if (DISPLAY_VER(i915) == 11) {
94 		return 10;
95 	} else if (HAS_SAGV(i915)) {
96 		return 30;
97 	} else {
98 		return 0;
99 	}
100 }
101 
102 static void intel_sagv_init(struct drm_i915_private *i915)
103 {
104 	if (!HAS_SAGV(i915))
105 		i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
106 
107 	/*
108 	 * Probe to see if we have working SAGV control.
109 	 * For icl+ this was already determined by intel_bw_init_hw().
110 	 */
111 	if (DISPLAY_VER(i915) < 11)
112 		skl_sagv_disable(i915);
113 
114 	drm_WARN_ON(&i915->drm, i915->display.sagv.status == I915_SAGV_UNKNOWN);
115 
116 	i915->display.sagv.block_time_us = intel_sagv_block_time(i915);
117 
118 	drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
119 		    str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us);
120 
121 	/* avoid overflow when adding with wm0 latency/etc. */
122 	if (drm_WARN(&i915->drm, i915->display.sagv.block_time_us > U16_MAX,
123 		     "Excessive SAGV block time %u, ignoring\n",
124 		     i915->display.sagv.block_time_us))
125 		i915->display.sagv.block_time_us = 0;
126 
127 	if (!intel_has_sagv(i915))
128 		i915->display.sagv.block_time_us = 0;
129 }
130 
131 /*
132  * SAGV dynamically adjusts the system agent voltage and clock frequencies
133  * depending on power and performance requirements. The display engine access
134  * to system memory is blocked during the adjustment time. Because of the
135  * blocking time, having this enabled can cause full system hangs and/or pipe
136  * underruns if we don't meet all of the following requirements:
137  *
138  *  - <= 1 pipe enabled
139  *  - All planes can enable watermarks for latencies >= SAGV engine block time
140  *  - We're not using an interlaced display configuration
141  */
142 static void skl_sagv_enable(struct drm_i915_private *i915)
143 {
144 	int ret;
145 
146 	if (!intel_has_sagv(i915))
147 		return;
148 
149 	if (i915->display.sagv.status == I915_SAGV_ENABLED)
150 		return;
151 
152 	drm_dbg_kms(&i915->drm, "Enabling SAGV\n");
153 	ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
154 			      GEN9_SAGV_ENABLE);
155 
156 	/* We don't need to wait for SAGV when enabling */
157 
158 	/*
159 	 * Some skl systems, pre-release machines in particular,
160 	 * don't actually have SAGV.
161 	 */
162 	if (IS_SKYLAKE(i915) && ret == -ENXIO) {
163 		drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
164 		i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
165 		return;
166 	} else if (ret < 0) {
167 		drm_err(&i915->drm, "Failed to enable SAGV\n");
168 		return;
169 	}
170 
171 	i915->display.sagv.status = I915_SAGV_ENABLED;
172 }
173 
174 static void skl_sagv_disable(struct drm_i915_private *i915)
175 {
176 	int ret;
177 
178 	if (!intel_has_sagv(i915))
179 		return;
180 
181 	if (i915->display.sagv.status == I915_SAGV_DISABLED)
182 		return;
183 
184 	drm_dbg_kms(&i915->drm, "Disabling SAGV\n");
185 	/* bspec says to keep retrying for at least 1 ms */
186 	ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
187 				GEN9_SAGV_DISABLE,
188 				GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
189 				1);
190 	/*
191 	 * Some skl systems, pre-release machines in particular,
192 	 * don't actually have SAGV.
193 	 */
194 	if (IS_SKYLAKE(i915) && ret == -ENXIO) {
195 		drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
196 		i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
197 		return;
198 	} else if (ret < 0) {
199 		drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret);
200 		return;
201 	}
202 
203 	i915->display.sagv.status = I915_SAGV_DISABLED;
204 }
205 
206 static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
207 {
208 	struct drm_i915_private *i915 = to_i915(state->base.dev);
209 	const struct intel_bw_state *new_bw_state =
210 		intel_atomic_get_new_bw_state(state);
211 
212 	if (!new_bw_state)
213 		return;
214 
215 	if (!intel_can_enable_sagv(i915, new_bw_state))
216 		skl_sagv_disable(i915);
217 }
218 
219 static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
220 {
221 	struct drm_i915_private *i915 = to_i915(state->base.dev);
222 	const struct intel_bw_state *new_bw_state =
223 		intel_atomic_get_new_bw_state(state);
224 
225 	if (!new_bw_state)
226 		return;
227 
228 	if (intel_can_enable_sagv(i915, new_bw_state))
229 		skl_sagv_enable(i915);
230 }
231 
232 static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
233 {
234 	struct drm_i915_private *i915 = to_i915(state->base.dev);
235 	const struct intel_bw_state *old_bw_state =
236 		intel_atomic_get_old_bw_state(state);
237 	const struct intel_bw_state *new_bw_state =
238 		intel_atomic_get_new_bw_state(state);
239 	u16 old_mask, new_mask;
240 
241 	if (!new_bw_state)
242 		return;
243 
244 	old_mask = old_bw_state->qgv_points_mask;
245 	new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
246 
247 	if (old_mask == new_mask)
248 		return;
249 
250 	WARN_ON(!new_bw_state->base.changed);
251 
252 	drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
253 		    old_mask, new_mask);
254 
255 	/*
256 	 * Restrict required qgv points before updating the configuration.
257 	 * According to BSpec we can't mask and unmask qgv points at the same
258 	 * time. Also masking should be done before updating the configuration
259 	 * and unmasking afterwards.
260 	 */
261 	icl_pcode_restrict_qgv_points(i915, new_mask);
262 }
263 
264 static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
265 {
266 	struct drm_i915_private *i915 = to_i915(state->base.dev);
267 	const struct intel_bw_state *old_bw_state =
268 		intel_atomic_get_old_bw_state(state);
269 	const struct intel_bw_state *new_bw_state =
270 		intel_atomic_get_new_bw_state(state);
271 	u16 old_mask, new_mask;
272 
273 	if (!new_bw_state)
274 		return;
275 
276 	old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
277 	new_mask = new_bw_state->qgv_points_mask;
278 
279 	if (old_mask == new_mask)
280 		return;
281 
282 	WARN_ON(!new_bw_state->base.changed);
283 
284 	drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
285 		    old_mask, new_mask);
286 
287 	/*
288 	 * Allow required qgv points after updating the configuration.
289 	 * According to BSpec we can't mask and unmask qgv points at the same
290 	 * time. Also masking should be done before updating the configuration
291 	 * and unmasking afterwards.
292 	 */
293 	icl_pcode_restrict_qgv_points(i915, new_mask);
294 }
295 
296 void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
297 {
298 	struct drm_i915_private *i915 = to_i915(state->base.dev);
299 
300 	/*
301 	 * Just return if we can't control SAGV or don't have it.
302 	 * This is different from situation when we have SAGV but just can't
303 	 * afford it due to DBuf limitation - in case if SAGV is completely
304 	 * disabled in a BIOS, we are not even allowed to send a PCode request,
305 	 * as it will throw an error. So have to check it here.
306 	 */
307 	if (!intel_has_sagv(i915))
308 		return;
309 
310 	if (DISPLAY_VER(i915) >= 11)
311 		icl_sagv_pre_plane_update(state);
312 	else
313 		skl_sagv_pre_plane_update(state);
314 }
315 
316 void intel_sagv_post_plane_update(struct intel_atomic_state *state)
317 {
318 	struct drm_i915_private *i915 = to_i915(state->base.dev);
319 
320 	/*
321 	 * Just return if we can't control SAGV or don't have it.
322 	 * This is different from situation when we have SAGV but just can't
323 	 * afford it due to DBuf limitation - in case if SAGV is completely
324 	 * disabled in a BIOS, we are not even allowed to send a PCode request,
325 	 * as it will throw an error. So have to check it here.
326 	 */
327 	if (!intel_has_sagv(i915))
328 		return;
329 
330 	if (DISPLAY_VER(i915) >= 11)
331 		icl_sagv_post_plane_update(state);
332 	else
333 		skl_sagv_post_plane_update(state);
334 }
335 
336 static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
337 {
338 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
339 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
340 	enum plane_id plane_id;
341 	int max_level = INT_MAX;
342 
343 	if (!intel_has_sagv(i915))
344 		return false;
345 
346 	if (!crtc_state->hw.active)
347 		return true;
348 
349 	if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE)
350 		return false;
351 
352 	for_each_plane_id_on_crtc(crtc, plane_id) {
353 		const struct skl_plane_wm *wm =
354 			&crtc_state->wm.skl.optimal.planes[plane_id];
355 		int level;
356 
357 		/* Skip this plane if it's not enabled */
358 		if (!wm->wm[0].enable)
359 			continue;
360 
361 		/* Find the highest enabled wm level for this plane */
362 		for (level = i915->display.wm.num_levels - 1;
363 		     !wm->wm[level].enable; --level)
364 		     { }
365 
366 		/* Highest common enabled wm level for all planes */
367 		max_level = min(level, max_level);
368 	}
369 
370 	/* No enabled planes? */
371 	if (max_level == INT_MAX)
372 		return true;
373 
374 	for_each_plane_id_on_crtc(crtc, plane_id) {
375 		const struct skl_plane_wm *wm =
376 			&crtc_state->wm.skl.optimal.planes[plane_id];
377 
378 		/*
379 		 * All enabled planes must have enabled a common wm level that
380 		 * can tolerate memory latencies higher than sagv_block_time_us
381 		 */
382 		if (wm->wm[0].enable && !wm->wm[max_level].can_sagv)
383 			return false;
384 	}
385 
386 	return true;
387 }
388 
389 static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
390 {
391 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
392 	enum plane_id plane_id;
393 
394 	if (!crtc_state->hw.active)
395 		return true;
396 
397 	for_each_plane_id_on_crtc(crtc, plane_id) {
398 		const struct skl_plane_wm *wm =
399 			&crtc_state->wm.skl.optimal.planes[plane_id];
400 
401 		if (wm->wm[0].enable && !wm->sagv.wm0.enable)
402 			return false;
403 	}
404 
405 	return true;
406 }
407 
408 static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
409 {
410 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
411 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
412 
413 	if (DISPLAY_VER(i915) >= 12)
414 		return tgl_crtc_can_enable_sagv(crtc_state);
415 	else
416 		return skl_crtc_can_enable_sagv(crtc_state);
417 }
418 
419 bool intel_can_enable_sagv(struct drm_i915_private *i915,
420 			   const struct intel_bw_state *bw_state)
421 {
422 	if (DISPLAY_VER(i915) < 11 &&
423 	    bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
424 		return false;
425 
426 	return bw_state->pipe_sagv_reject == 0;
427 }
428 
429 static int intel_compute_sagv_mask(struct intel_atomic_state *state)
430 {
431 	struct drm_i915_private *i915 = to_i915(state->base.dev);
432 	int ret;
433 	struct intel_crtc *crtc;
434 	struct intel_crtc_state *new_crtc_state;
435 	struct intel_bw_state *new_bw_state = NULL;
436 	const struct intel_bw_state *old_bw_state = NULL;
437 	int i;
438 
439 	for_each_new_intel_crtc_in_state(state, crtc,
440 					 new_crtc_state, i) {
441 		new_bw_state = intel_atomic_get_bw_state(state);
442 		if (IS_ERR(new_bw_state))
443 			return PTR_ERR(new_bw_state);
444 
445 		old_bw_state = intel_atomic_get_old_bw_state(state);
446 
447 		if (intel_crtc_can_enable_sagv(new_crtc_state))
448 			new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
449 		else
450 			new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
451 	}
452 
453 	if (!new_bw_state)
454 		return 0;
455 
456 	new_bw_state->active_pipes =
457 		intel_calc_active_pipes(state, old_bw_state->active_pipes);
458 
459 	if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
460 		ret = intel_atomic_lock_global_state(&new_bw_state->base);
461 		if (ret)
462 			return ret;
463 	}
464 
465 	if (intel_can_enable_sagv(i915, new_bw_state) !=
466 	    intel_can_enable_sagv(i915, old_bw_state)) {
467 		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
468 		if (ret)
469 			return ret;
470 	} else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
471 		ret = intel_atomic_lock_global_state(&new_bw_state->base);
472 		if (ret)
473 			return ret;
474 	}
475 
476 	for_each_new_intel_crtc_in_state(state, crtc,
477 					 new_crtc_state, i) {
478 		struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
479 
480 		/*
481 		 * We store use_sagv_wm in the crtc state rather than relying on
482 		 * that bw state since we have no convenient way to get at the
483 		 * latter from the plane commit hooks (especially in the legacy
484 		 * cursor case)
485 		 */
486 		pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) &&
487 			DISPLAY_VER(i915) >= 12 &&
488 			intel_can_enable_sagv(i915, new_bw_state);
489 	}
490 
491 	return 0;
492 }
493 
494 static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
495 			      u16 start, u16 end)
496 {
497 	entry->start = start;
498 	entry->end = end;
499 
500 	return end;
501 }
502 
503 static int intel_dbuf_slice_size(struct drm_i915_private *i915)
504 {
505 	return INTEL_INFO(i915)->display.dbuf.size /
506 		hweight8(INTEL_INFO(i915)->display.dbuf.slice_mask);
507 }
508 
509 static void
510 skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
511 			 struct skl_ddb_entry *ddb)
512 {
513 	int slice_size = intel_dbuf_slice_size(i915);
514 
515 	if (!slice_mask) {
516 		ddb->start = 0;
517 		ddb->end = 0;
518 		return;
519 	}
520 
521 	ddb->start = (ffs(slice_mask) - 1) * slice_size;
522 	ddb->end = fls(slice_mask) * slice_size;
523 
524 	WARN_ON(ddb->start >= ddb->end);
525 	WARN_ON(ddb->end > INTEL_INFO(i915)->display.dbuf.size);
526 }
527 
528 static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
529 {
530 	struct skl_ddb_entry ddb;
531 
532 	if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2)))
533 		slice_mask = BIT(DBUF_S1);
534 	else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
535 		slice_mask = BIT(DBUF_S3);
536 
537 	skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
538 
539 	return ddb.start;
540 }
541 
542 u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
543 			    const struct skl_ddb_entry *entry)
544 {
545 	int slice_size = intel_dbuf_slice_size(i915);
546 	enum dbuf_slice start_slice, end_slice;
547 	u8 slice_mask = 0;
548 
549 	if (!skl_ddb_entry_size(entry))
550 		return 0;
551 
552 	start_slice = entry->start / slice_size;
553 	end_slice = (entry->end - 1) / slice_size;
554 
555 	/*
556 	 * Per plane DDB entry can in a really worst case be on multiple slices
557 	 * but single entry is anyway contigious.
558 	 */
559 	while (start_slice <= end_slice) {
560 		slice_mask |= BIT(start_slice);
561 		start_slice++;
562 	}
563 
564 	return slice_mask;
565 }
566 
567 static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
568 {
569 	const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
570 	int hdisplay, vdisplay;
571 
572 	if (!crtc_state->hw.active)
573 		return 0;
574 
575 	/*
576 	 * Watermark/ddb requirement highly depends upon width of the
577 	 * framebuffer, So instead of allocating DDB equally among pipes
578 	 * distribute DDB based on resolution/width of the display.
579 	 */
580 	drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
581 
582 	return hdisplay;
583 }
584 
585 static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
586 				    enum pipe for_pipe,
587 				    unsigned int *weight_start,
588 				    unsigned int *weight_end,
589 				    unsigned int *weight_total)
590 {
591 	struct drm_i915_private *i915 =
592 		to_i915(dbuf_state->base.state->base.dev);
593 	enum pipe pipe;
594 
595 	*weight_start = 0;
596 	*weight_end = 0;
597 	*weight_total = 0;
598 
599 	for_each_pipe(i915, pipe) {
600 		int weight = dbuf_state->weight[pipe];
601 
602 		/*
603 		 * Do not account pipes using other slice sets
604 		 * luckily as of current BSpec slice sets do not partially
605 		 * intersect(pipes share either same one slice or same slice set
606 		 * i.e no partial intersection), so it is enough to check for
607 		 * equality for now.
608 		 */
609 		if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
610 			continue;
611 
612 		*weight_total += weight;
613 		if (pipe < for_pipe) {
614 			*weight_start += weight;
615 			*weight_end += weight;
616 		} else if (pipe == for_pipe) {
617 			*weight_end += weight;
618 		}
619 	}
620 }
621 
622 static int
623 skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
624 {
625 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
626 	unsigned int weight_total, weight_start, weight_end;
627 	const struct intel_dbuf_state *old_dbuf_state =
628 		intel_atomic_get_old_dbuf_state(state);
629 	struct intel_dbuf_state *new_dbuf_state =
630 		intel_atomic_get_new_dbuf_state(state);
631 	struct intel_crtc_state *crtc_state;
632 	struct skl_ddb_entry ddb_slices;
633 	enum pipe pipe = crtc->pipe;
634 	unsigned int mbus_offset = 0;
635 	u32 ddb_range_size;
636 	u32 dbuf_slice_mask;
637 	u32 start, end;
638 	int ret;
639 
640 	if (new_dbuf_state->weight[pipe] == 0) {
641 		skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0);
642 		goto out;
643 	}
644 
645 	dbuf_slice_mask = new_dbuf_state->slices[pipe];
646 
647 	skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices);
648 	mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask);
649 	ddb_range_size = skl_ddb_entry_size(&ddb_slices);
650 
651 	intel_crtc_dbuf_weights(new_dbuf_state, pipe,
652 				&weight_start, &weight_end, &weight_total);
653 
654 	start = ddb_range_size * weight_start / weight_total;
655 	end = ddb_range_size * weight_end / weight_total;
656 
657 	skl_ddb_entry_init(&new_dbuf_state->ddb[pipe],
658 			   ddb_slices.start - mbus_offset + start,
659 			   ddb_slices.start - mbus_offset + end);
660 
661 out:
662 	if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
663 	    skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
664 				&new_dbuf_state->ddb[pipe]))
665 		return 0;
666 
667 	ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
668 	if (ret)
669 		return ret;
670 
671 	crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
672 	if (IS_ERR(crtc_state))
673 		return PTR_ERR(crtc_state);
674 
675 	/*
676 	 * Used for checking overlaps, so we need absolute
677 	 * offsets instead of MBUS relative offsets.
678 	 */
679 	crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
680 	crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
681 
682 	drm_dbg_kms(&i915->drm,
683 		    "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
684 		    crtc->base.base.id, crtc->base.name,
685 		    old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
686 		    old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
687 		    new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
688 		    old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
689 
690 	return 0;
691 }
692 
693 static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
694 				 int width, const struct drm_format_info *format,
695 				 u64 modifier, unsigned int rotation,
696 				 u32 plane_pixel_rate, struct skl_wm_params *wp,
697 				 int color_plane);
698 
699 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
700 				 struct intel_plane *plane,
701 				 int level,
702 				 unsigned int latency,
703 				 const struct skl_wm_params *wp,
704 				 const struct skl_wm_level *result_prev,
705 				 struct skl_wm_level *result /* out */);
706 
707 static unsigned int
708 skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
709 		      int num_active)
710 {
711 	struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
712 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
713 	struct skl_wm_level wm = {};
714 	int ret, min_ddb_alloc = 0;
715 	struct skl_wm_params wp;
716 	int level;
717 
718 	ret = skl_compute_wm_params(crtc_state, 256,
719 				    drm_format_info(DRM_FORMAT_ARGB8888),
720 				    DRM_FORMAT_MOD_LINEAR,
721 				    DRM_MODE_ROTATE_0,
722 				    crtc_state->pixel_rate, &wp, 0);
723 	drm_WARN_ON(&i915->drm, ret);
724 
725 	for (level = 0; level < i915->display.wm.num_levels; level++) {
726 		unsigned int latency = i915->display.wm.skl_latency[level];
727 
728 		skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
729 		if (wm.min_ddb_alloc == U16_MAX)
730 			break;
731 
732 		min_ddb_alloc = wm.min_ddb_alloc;
733 	}
734 
735 	return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
736 }
737 
738 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
739 {
740 	skl_ddb_entry_init(entry,
741 			   REG_FIELD_GET(PLANE_BUF_START_MASK, reg),
742 			   REG_FIELD_GET(PLANE_BUF_END_MASK, reg));
743 	if (entry->end)
744 		entry->end++;
745 }
746 
747 static void
748 skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
749 			   const enum pipe pipe,
750 			   const enum plane_id plane_id,
751 			   struct skl_ddb_entry *ddb,
752 			   struct skl_ddb_entry *ddb_y)
753 {
754 	u32 val;
755 
756 	/* Cursor doesn't support NV12/planar, so no extra calculation needed */
757 	if (plane_id == PLANE_CURSOR) {
758 		val = intel_de_read(i915, CUR_BUF_CFG(pipe));
759 		skl_ddb_entry_init_from_hw(ddb, val);
760 		return;
761 	}
762 
763 	val = intel_de_read(i915, PLANE_BUF_CFG(pipe, plane_id));
764 	skl_ddb_entry_init_from_hw(ddb, val);
765 
766 	if (DISPLAY_VER(i915) >= 11)
767 		return;
768 
769 	val = intel_de_read(i915, PLANE_NV12_BUF_CFG(pipe, plane_id));
770 	skl_ddb_entry_init_from_hw(ddb_y, val);
771 }
772 
773 static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
774 				      struct skl_ddb_entry *ddb,
775 				      struct skl_ddb_entry *ddb_y)
776 {
777 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
778 	enum intel_display_power_domain power_domain;
779 	enum pipe pipe = crtc->pipe;
780 	intel_wakeref_t wakeref;
781 	enum plane_id plane_id;
782 
783 	power_domain = POWER_DOMAIN_PIPE(pipe);
784 	wakeref = intel_display_power_get_if_enabled(i915, power_domain);
785 	if (!wakeref)
786 		return;
787 
788 	for_each_plane_id_on_crtc(crtc, plane_id)
789 		skl_ddb_get_hw_plane_state(i915, pipe,
790 					   plane_id,
791 					   &ddb[plane_id],
792 					   &ddb_y[plane_id]);
793 
794 	intel_display_power_put(i915, power_domain, wakeref);
795 }
796 
797 struct dbuf_slice_conf_entry {
798 	u8 active_pipes;
799 	u8 dbuf_mask[I915_MAX_PIPES];
800 	bool join_mbus;
801 };
802 
803 /*
804  * Table taken from Bspec 12716
805  * Pipes do have some preferred DBuf slice affinity,
806  * plus there are some hardcoded requirements on how
807  * those should be distributed for multipipe scenarios.
808  * For more DBuf slices algorithm can get even more messy
809  * and less readable, so decided to use a table almost
810  * as is from BSpec itself - that way it is at least easier
811  * to compare, change and check.
812  */
813 static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
814 /* Autogenerated with igt/tools/intel_dbuf_map tool: */
815 {
816 	{
817 		.active_pipes = BIT(PIPE_A),
818 		.dbuf_mask = {
819 			[PIPE_A] = BIT(DBUF_S1),
820 		},
821 	},
822 	{
823 		.active_pipes = BIT(PIPE_B),
824 		.dbuf_mask = {
825 			[PIPE_B] = BIT(DBUF_S1),
826 		},
827 	},
828 	{
829 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
830 		.dbuf_mask = {
831 			[PIPE_A] = BIT(DBUF_S1),
832 			[PIPE_B] = BIT(DBUF_S2),
833 		},
834 	},
835 	{
836 		.active_pipes = BIT(PIPE_C),
837 		.dbuf_mask = {
838 			[PIPE_C] = BIT(DBUF_S2),
839 		},
840 	},
841 	{
842 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
843 		.dbuf_mask = {
844 			[PIPE_A] = BIT(DBUF_S1),
845 			[PIPE_C] = BIT(DBUF_S2),
846 		},
847 	},
848 	{
849 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
850 		.dbuf_mask = {
851 			[PIPE_B] = BIT(DBUF_S1),
852 			[PIPE_C] = BIT(DBUF_S2),
853 		},
854 	},
855 	{
856 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
857 		.dbuf_mask = {
858 			[PIPE_A] = BIT(DBUF_S1),
859 			[PIPE_B] = BIT(DBUF_S1),
860 			[PIPE_C] = BIT(DBUF_S2),
861 		},
862 	},
863 	{}
864 };
865 
866 /*
867  * Table taken from Bspec 49255
868  * Pipes do have some preferred DBuf slice affinity,
869  * plus there are some hardcoded requirements on how
870  * those should be distributed for multipipe scenarios.
871  * For more DBuf slices algorithm can get even more messy
872  * and less readable, so decided to use a table almost
873  * as is from BSpec itself - that way it is at least easier
874  * to compare, change and check.
875  */
876 static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
877 /* Autogenerated with igt/tools/intel_dbuf_map tool: */
878 {
879 	{
880 		.active_pipes = BIT(PIPE_A),
881 		.dbuf_mask = {
882 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
883 		},
884 	},
885 	{
886 		.active_pipes = BIT(PIPE_B),
887 		.dbuf_mask = {
888 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
889 		},
890 	},
891 	{
892 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
893 		.dbuf_mask = {
894 			[PIPE_A] = BIT(DBUF_S2),
895 			[PIPE_B] = BIT(DBUF_S1),
896 		},
897 	},
898 	{
899 		.active_pipes = BIT(PIPE_C),
900 		.dbuf_mask = {
901 			[PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
902 		},
903 	},
904 	{
905 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
906 		.dbuf_mask = {
907 			[PIPE_A] = BIT(DBUF_S1),
908 			[PIPE_C] = BIT(DBUF_S2),
909 		},
910 	},
911 	{
912 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
913 		.dbuf_mask = {
914 			[PIPE_B] = BIT(DBUF_S1),
915 			[PIPE_C] = BIT(DBUF_S2),
916 		},
917 	},
918 	{
919 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
920 		.dbuf_mask = {
921 			[PIPE_A] = BIT(DBUF_S1),
922 			[PIPE_B] = BIT(DBUF_S1),
923 			[PIPE_C] = BIT(DBUF_S2),
924 		},
925 	},
926 	{
927 		.active_pipes = BIT(PIPE_D),
928 		.dbuf_mask = {
929 			[PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
930 		},
931 	},
932 	{
933 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
934 		.dbuf_mask = {
935 			[PIPE_A] = BIT(DBUF_S1),
936 			[PIPE_D] = BIT(DBUF_S2),
937 		},
938 	},
939 	{
940 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
941 		.dbuf_mask = {
942 			[PIPE_B] = BIT(DBUF_S1),
943 			[PIPE_D] = BIT(DBUF_S2),
944 		},
945 	},
946 	{
947 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
948 		.dbuf_mask = {
949 			[PIPE_A] = BIT(DBUF_S1),
950 			[PIPE_B] = BIT(DBUF_S1),
951 			[PIPE_D] = BIT(DBUF_S2),
952 		},
953 	},
954 	{
955 		.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
956 		.dbuf_mask = {
957 			[PIPE_C] = BIT(DBUF_S1),
958 			[PIPE_D] = BIT(DBUF_S2),
959 		},
960 	},
961 	{
962 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
963 		.dbuf_mask = {
964 			[PIPE_A] = BIT(DBUF_S1),
965 			[PIPE_C] = BIT(DBUF_S2),
966 			[PIPE_D] = BIT(DBUF_S2),
967 		},
968 	},
969 	{
970 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
971 		.dbuf_mask = {
972 			[PIPE_B] = BIT(DBUF_S1),
973 			[PIPE_C] = BIT(DBUF_S2),
974 			[PIPE_D] = BIT(DBUF_S2),
975 		},
976 	},
977 	{
978 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
979 		.dbuf_mask = {
980 			[PIPE_A] = BIT(DBUF_S1),
981 			[PIPE_B] = BIT(DBUF_S1),
982 			[PIPE_C] = BIT(DBUF_S2),
983 			[PIPE_D] = BIT(DBUF_S2),
984 		},
985 	},
986 	{}
987 };
988 
989 static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
990 	{
991 		.active_pipes = BIT(PIPE_A),
992 		.dbuf_mask = {
993 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
994 		},
995 	},
996 	{
997 		.active_pipes = BIT(PIPE_B),
998 		.dbuf_mask = {
999 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1000 		},
1001 	},
1002 	{
1003 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
1004 		.dbuf_mask = {
1005 			[PIPE_A] = BIT(DBUF_S1),
1006 			[PIPE_B] = BIT(DBUF_S2),
1007 		},
1008 	},
1009 	{
1010 		.active_pipes = BIT(PIPE_C),
1011 		.dbuf_mask = {
1012 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1013 		},
1014 	},
1015 	{
1016 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
1017 		.dbuf_mask = {
1018 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1019 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1020 		},
1021 	},
1022 	{
1023 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
1024 		.dbuf_mask = {
1025 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1026 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1027 		},
1028 	},
1029 	{
1030 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
1031 		.dbuf_mask = {
1032 			[PIPE_A] = BIT(DBUF_S1),
1033 			[PIPE_B] = BIT(DBUF_S2),
1034 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1035 		},
1036 	},
1037 	{
1038 		.active_pipes = BIT(PIPE_D),
1039 		.dbuf_mask = {
1040 			[PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1041 		},
1042 	},
1043 	{
1044 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
1045 		.dbuf_mask = {
1046 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1047 			[PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1048 		},
1049 	},
1050 	{
1051 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
1052 		.dbuf_mask = {
1053 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1054 			[PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1055 		},
1056 	},
1057 	{
1058 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
1059 		.dbuf_mask = {
1060 			[PIPE_A] = BIT(DBUF_S1),
1061 			[PIPE_B] = BIT(DBUF_S2),
1062 			[PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1063 		},
1064 	},
1065 	{
1066 		.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
1067 		.dbuf_mask = {
1068 			[PIPE_C] = BIT(DBUF_S3),
1069 			[PIPE_D] = BIT(DBUF_S4),
1070 		},
1071 	},
1072 	{
1073 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
1074 		.dbuf_mask = {
1075 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1076 			[PIPE_C] = BIT(DBUF_S3),
1077 			[PIPE_D] = BIT(DBUF_S4),
1078 		},
1079 	},
1080 	{
1081 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1082 		.dbuf_mask = {
1083 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1084 			[PIPE_C] = BIT(DBUF_S3),
1085 			[PIPE_D] = BIT(DBUF_S4),
1086 		},
1087 	},
1088 	{
1089 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1090 		.dbuf_mask = {
1091 			[PIPE_A] = BIT(DBUF_S1),
1092 			[PIPE_B] = BIT(DBUF_S2),
1093 			[PIPE_C] = BIT(DBUF_S3),
1094 			[PIPE_D] = BIT(DBUF_S4),
1095 		},
1096 	},
1097 	{}
1098 };
1099 
1100 static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
1101 	/*
1102 	 * Keep the join_mbus cases first so check_mbus_joined()
1103 	 * will prefer them over the !join_mbus cases.
1104 	 */
1105 	{
1106 		.active_pipes = BIT(PIPE_A),
1107 		.dbuf_mask = {
1108 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
1109 		},
1110 		.join_mbus = true,
1111 	},
1112 	{
1113 		.active_pipes = BIT(PIPE_B),
1114 		.dbuf_mask = {
1115 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
1116 		},
1117 		.join_mbus = true,
1118 	},
1119 	{
1120 		.active_pipes = BIT(PIPE_A),
1121 		.dbuf_mask = {
1122 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1123 		},
1124 		.join_mbus = false,
1125 	},
1126 	{
1127 		.active_pipes = BIT(PIPE_B),
1128 		.dbuf_mask = {
1129 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1130 		},
1131 		.join_mbus = false,
1132 	},
1133 	{
1134 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
1135 		.dbuf_mask = {
1136 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1137 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1138 		},
1139 	},
1140 	{
1141 		.active_pipes = BIT(PIPE_C),
1142 		.dbuf_mask = {
1143 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1144 		},
1145 	},
1146 	{
1147 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
1148 		.dbuf_mask = {
1149 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1150 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1151 		},
1152 	},
1153 	{
1154 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
1155 		.dbuf_mask = {
1156 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1157 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1158 		},
1159 	},
1160 	{
1161 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
1162 		.dbuf_mask = {
1163 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1164 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1165 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1166 		},
1167 	},
1168 	{
1169 		.active_pipes = BIT(PIPE_D),
1170 		.dbuf_mask = {
1171 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1172 		},
1173 	},
1174 	{
1175 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
1176 		.dbuf_mask = {
1177 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1178 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1179 		},
1180 	},
1181 	{
1182 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
1183 		.dbuf_mask = {
1184 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1185 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1186 		},
1187 	},
1188 	{
1189 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
1190 		.dbuf_mask = {
1191 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1192 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1193 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1194 		},
1195 	},
1196 	{
1197 		.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
1198 		.dbuf_mask = {
1199 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1200 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1201 		},
1202 	},
1203 	{
1204 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
1205 		.dbuf_mask = {
1206 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1207 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1208 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1209 		},
1210 	},
1211 	{
1212 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1213 		.dbuf_mask = {
1214 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1215 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1216 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1217 		},
1218 	},
1219 	{
1220 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1221 		.dbuf_mask = {
1222 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1223 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1224 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1225 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1226 		},
1227 	},
1228 	{}
1229 
1230 };
1231 
1232 static bool check_mbus_joined(u8 active_pipes,
1233 			      const struct dbuf_slice_conf_entry *dbuf_slices)
1234 {
1235 	int i;
1236 
1237 	for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
1238 		if (dbuf_slices[i].active_pipes == active_pipes)
1239 			return dbuf_slices[i].join_mbus;
1240 	}
1241 	return false;
1242 }
1243 
1244 static bool adlp_check_mbus_joined(u8 active_pipes)
1245 {
1246 	return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
1247 }
1248 
1249 static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
1250 			      const struct dbuf_slice_conf_entry *dbuf_slices)
1251 {
1252 	int i;
1253 
1254 	for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
1255 		if (dbuf_slices[i].active_pipes == active_pipes &&
1256 		    dbuf_slices[i].join_mbus == join_mbus)
1257 			return dbuf_slices[i].dbuf_mask[pipe];
1258 	}
1259 	return 0;
1260 }
1261 
1262 /*
1263  * This function finds an entry with same enabled pipe configuration and
1264  * returns correspondent DBuf slice mask as stated in BSpec for particular
1265  * platform.
1266  */
1267 static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1268 {
1269 	/*
1270 	 * FIXME: For ICL this is still a bit unclear as prev BSpec revision
1271 	 * required calculating "pipe ratio" in order to determine
1272 	 * if one or two slices can be used for single pipe configurations
1273 	 * as additional constraint to the existing table.
1274 	 * However based on recent info, it should be not "pipe ratio"
1275 	 * but rather ratio between pixel_rate and cdclk with additional
1276 	 * constants, so for now we are using only table until this is
1277 	 * clarified. Also this is the reason why crtc_state param is
1278 	 * still here - we will need it once those additional constraints
1279 	 * pop up.
1280 	 */
1281 	return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1282 				   icl_allowed_dbufs);
1283 }
1284 
1285 static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1286 {
1287 	return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1288 				   tgl_allowed_dbufs);
1289 }
1290 
1291 static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1292 {
1293 	return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1294 				   adlp_allowed_dbufs);
1295 }
1296 
1297 static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1298 {
1299 	return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1300 				   dg2_allowed_dbufs);
1301 }
1302 
1303 static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
1304 {
1305 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1306 	enum pipe pipe = crtc->pipe;
1307 
1308 	if (IS_DG2(i915))
1309 		return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1310 	else if (DISPLAY_VER(i915) >= 13)
1311 		return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1312 	else if (DISPLAY_VER(i915) == 12)
1313 		return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1314 	else if (DISPLAY_VER(i915) == 11)
1315 		return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1316 	/*
1317 	 * For anything else just return one slice yet.
1318 	 * Should be extended for other platforms.
1319 	 */
1320 	return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
1321 }
1322 
1323 static bool
1324 use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
1325 		     struct intel_plane *plane)
1326 {
1327 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
1328 
1329 	return DISPLAY_VER(i915) >= 13 &&
1330 	       crtc_state->uapi.async_flip &&
1331 	       plane->async_flip;
1332 }
1333 
1334 static u64
1335 skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
1336 {
1337 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1338 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1339 	enum plane_id plane_id;
1340 	u64 data_rate = 0;
1341 
1342 	for_each_plane_id_on_crtc(crtc, plane_id) {
1343 		if (plane_id == PLANE_CURSOR)
1344 			continue;
1345 
1346 		data_rate += crtc_state->rel_data_rate[plane_id];
1347 
1348 		if (DISPLAY_VER(i915) < 11)
1349 			data_rate += crtc_state->rel_data_rate_y[plane_id];
1350 	}
1351 
1352 	return data_rate;
1353 }
1354 
1355 static const struct skl_wm_level *
1356 skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
1357 		   enum plane_id plane_id,
1358 		   int level)
1359 {
1360 	const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
1361 
1362 	if (level == 0 && pipe_wm->use_sagv_wm)
1363 		return &wm->sagv.wm0;
1364 
1365 	return &wm->wm[level];
1366 }
1367 
1368 static const struct skl_wm_level *
1369 skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
1370 		   enum plane_id plane_id)
1371 {
1372 	const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
1373 
1374 	if (pipe_wm->use_sagv_wm)
1375 		return &wm->sagv.trans_wm;
1376 
1377 	return &wm->trans_wm;
1378 }
1379 
1380 /*
1381  * We only disable the watermarks for each plane if
1382  * they exceed the ddb allocation of said plane. This
1383  * is done so that we don't end up touching cursor
1384  * watermarks needlessly when some other plane reduces
1385  * our max possible watermark level.
1386  *
1387  * Bspec has this to say about the PLANE_WM enable bit:
1388  * "All the watermarks at this level for all enabled
1389  *  planes must be enabled before the level will be used."
1390  * So this is actually safe to do.
1391  */
1392 static void
1393 skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb)
1394 {
1395 	if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb))
1396 		memset(wm, 0, sizeof(*wm));
1397 }
1398 
1399 static void
1400 skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
1401 			const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb)
1402 {
1403 	if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) ||
1404 	    uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) {
1405 		memset(wm, 0, sizeof(*wm));
1406 		memset(uv_wm, 0, sizeof(*uv_wm));
1407 	}
1408 }
1409 
1410 static bool skl_need_wm_copy_wa(struct drm_i915_private *i915, int level,
1411 				const struct skl_plane_wm *wm)
1412 {
1413 	/*
1414 	 * Wa_1408961008:icl, ehl
1415 	 * Wa_14012656716:tgl, adl
1416 	 * Wa_14017887344:icl
1417 	 * Wa_14017868169:adl, tgl
1418 	 * Due to some power saving optimizations, different subsystems
1419 	 * like PSR, might still use even disabled wm level registers,
1420 	 * for "reference", so lets keep at least the values sane.
1421 	 * Considering amount of WA requiring us to do similar things, was
1422 	 * decided to simply do it for all of the platforms, as those wm
1423 	 * levels are disabled, this isn't going to do harm anyway.
1424 	 */
1425 	return level > 0 && !wm->wm[level].enable;
1426 }
1427 
1428 struct skl_plane_ddb_iter {
1429 	u64 data_rate;
1430 	u16 start, size;
1431 };
1432 
1433 static void
1434 skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
1435 		       struct skl_ddb_entry *ddb,
1436 		       const struct skl_wm_level *wm,
1437 		       u64 data_rate)
1438 {
1439 	u16 size, extra = 0;
1440 
1441 	if (data_rate) {
1442 		extra = min_t(u16, iter->size,
1443 			      DIV64_U64_ROUND_UP(iter->size * data_rate,
1444 						 iter->data_rate));
1445 		iter->size -= extra;
1446 		iter->data_rate -= data_rate;
1447 	}
1448 
1449 	/*
1450 	 * Keep ddb entry of all disabled planes explicitly zeroed
1451 	 * to avoid skl_ddb_add_affected_planes() adding them to
1452 	 * the state when other planes change their allocations.
1453 	 */
1454 	size = wm->min_ddb_alloc + extra;
1455 	if (size)
1456 		iter->start = skl_ddb_entry_init(ddb, iter->start,
1457 						 iter->start + size);
1458 }
1459 
1460 static int
1461 skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
1462 			    struct intel_crtc *crtc)
1463 {
1464 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1465 	struct intel_crtc_state *crtc_state =
1466 		intel_atomic_get_new_crtc_state(state, crtc);
1467 	const struct intel_dbuf_state *dbuf_state =
1468 		intel_atomic_get_new_dbuf_state(state);
1469 	const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
1470 	int num_active = hweight8(dbuf_state->active_pipes);
1471 	struct skl_plane_ddb_iter iter;
1472 	enum plane_id plane_id;
1473 	u16 cursor_size;
1474 	u32 blocks;
1475 	int level;
1476 
1477 	/* Clear the partitioning for disabled planes. */
1478 	memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
1479 	memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
1480 
1481 	if (!crtc_state->hw.active)
1482 		return 0;
1483 
1484 	iter.start = alloc->start;
1485 	iter.size = skl_ddb_entry_size(alloc);
1486 	if (iter.size == 0)
1487 		return 0;
1488 
1489 	/* Allocate fixed number of blocks for cursor. */
1490 	cursor_size = skl_cursor_allocation(crtc_state, num_active);
1491 	iter.size -= cursor_size;
1492 	skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
1493 			   alloc->end - cursor_size, alloc->end);
1494 
1495 	iter.data_rate = skl_total_relative_data_rate(crtc_state);
1496 
1497 	/*
1498 	 * Find the highest watermark level for which we can satisfy the block
1499 	 * requirement of active planes.
1500 	 */
1501 	for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
1502 		blocks = 0;
1503 		for_each_plane_id_on_crtc(crtc, plane_id) {
1504 			const struct skl_plane_wm *wm =
1505 				&crtc_state->wm.skl.optimal.planes[plane_id];
1506 
1507 			if (plane_id == PLANE_CURSOR) {
1508 				const struct skl_ddb_entry *ddb =
1509 					&crtc_state->wm.skl.plane_ddb[plane_id];
1510 
1511 				if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
1512 					drm_WARN_ON(&i915->drm,
1513 						    wm->wm[level].min_ddb_alloc != U16_MAX);
1514 					blocks = U32_MAX;
1515 					break;
1516 				}
1517 				continue;
1518 			}
1519 
1520 			blocks += wm->wm[level].min_ddb_alloc;
1521 			blocks += wm->uv_wm[level].min_ddb_alloc;
1522 		}
1523 
1524 		if (blocks <= iter.size) {
1525 			iter.size -= blocks;
1526 			break;
1527 		}
1528 	}
1529 
1530 	if (level < 0) {
1531 		drm_dbg_kms(&i915->drm,
1532 			    "Requested display configuration exceeds system DDB limitations");
1533 		drm_dbg_kms(&i915->drm, "minimum required %d/%d\n",
1534 			    blocks, iter.size);
1535 		return -EINVAL;
1536 	}
1537 
1538 	/* avoid the WARN later when we don't allocate any extra DDB */
1539 	if (iter.data_rate == 0)
1540 		iter.size = 0;
1541 
1542 	/*
1543 	 * Grant each plane the blocks it requires at the highest achievable
1544 	 * watermark level, plus an extra share of the leftover blocks
1545 	 * proportional to its relative data rate.
1546 	 */
1547 	for_each_plane_id_on_crtc(crtc, plane_id) {
1548 		struct skl_ddb_entry *ddb =
1549 			&crtc_state->wm.skl.plane_ddb[plane_id];
1550 		struct skl_ddb_entry *ddb_y =
1551 			&crtc_state->wm.skl.plane_ddb_y[plane_id];
1552 		const struct skl_plane_wm *wm =
1553 			&crtc_state->wm.skl.optimal.planes[plane_id];
1554 
1555 		if (plane_id == PLANE_CURSOR)
1556 			continue;
1557 
1558 		if (DISPLAY_VER(i915) < 11 &&
1559 		    crtc_state->nv12_planes & BIT(plane_id)) {
1560 			skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
1561 					       crtc_state->rel_data_rate_y[plane_id]);
1562 			skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
1563 					       crtc_state->rel_data_rate[plane_id]);
1564 		} else {
1565 			skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
1566 					       crtc_state->rel_data_rate[plane_id]);
1567 		}
1568 	}
1569 	drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0);
1570 
1571 	/*
1572 	 * When we calculated watermark values we didn't know how high
1573 	 * of a level we'd actually be able to hit, so we just marked
1574 	 * all levels as "enabled."  Go back now and disable the ones
1575 	 * that aren't actually possible.
1576 	 */
1577 	for (level++; level < i915->display.wm.num_levels; level++) {
1578 		for_each_plane_id_on_crtc(crtc, plane_id) {
1579 			const struct skl_ddb_entry *ddb =
1580 				&crtc_state->wm.skl.plane_ddb[plane_id];
1581 			const struct skl_ddb_entry *ddb_y =
1582 				&crtc_state->wm.skl.plane_ddb_y[plane_id];
1583 			struct skl_plane_wm *wm =
1584 				&crtc_state->wm.skl.optimal.planes[plane_id];
1585 
1586 			if (DISPLAY_VER(i915) < 11 &&
1587 			    crtc_state->nv12_planes & BIT(plane_id))
1588 				skl_check_nv12_wm_level(&wm->wm[level],
1589 							&wm->uv_wm[level],
1590 							ddb_y, ddb);
1591 			else
1592 				skl_check_wm_level(&wm->wm[level], ddb);
1593 
1594 			if (skl_need_wm_copy_wa(i915, level, wm)) {
1595 				wm->wm[level].blocks = wm->wm[level - 1].blocks;
1596 				wm->wm[level].lines = wm->wm[level - 1].lines;
1597 				wm->wm[level].ignore_lines = wm->wm[level - 1].ignore_lines;
1598 			}
1599 		}
1600 	}
1601 
1602 	/*
1603 	 * Go back and disable the transition and SAGV watermarks
1604 	 * if it turns out we don't have enough DDB blocks for them.
1605 	 */
1606 	for_each_plane_id_on_crtc(crtc, plane_id) {
1607 		const struct skl_ddb_entry *ddb =
1608 			&crtc_state->wm.skl.plane_ddb[plane_id];
1609 		const struct skl_ddb_entry *ddb_y =
1610 			&crtc_state->wm.skl.plane_ddb_y[plane_id];
1611 		struct skl_plane_wm *wm =
1612 			&crtc_state->wm.skl.optimal.planes[plane_id];
1613 
1614 		if (DISPLAY_VER(i915) < 11 &&
1615 		    crtc_state->nv12_planes & BIT(plane_id)) {
1616 			skl_check_wm_level(&wm->trans_wm, ddb_y);
1617 		} else {
1618 			WARN_ON(skl_ddb_entry_size(ddb_y));
1619 
1620 			skl_check_wm_level(&wm->trans_wm, ddb);
1621 		}
1622 
1623 		skl_check_wm_level(&wm->sagv.wm0, ddb);
1624 		skl_check_wm_level(&wm->sagv.trans_wm, ddb);
1625 	}
1626 
1627 	return 0;
1628 }
1629 
1630 /*
1631  * The max latency should be 257 (max the punit can code is 255 and we add 2us
1632  * for the read latency) and cpp should always be <= 8, so that
1633  * should allow pixel_rate up to ~2 GHz which seems sufficient since max
1634  * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
1635  */
1636 static uint_fixed_16_16_t
1637 skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
1638 	       u8 cpp, u32 latency, u32 dbuf_block_size)
1639 {
1640 	u32 wm_intermediate_val;
1641 	uint_fixed_16_16_t ret;
1642 
1643 	if (latency == 0)
1644 		return FP_16_16_MAX;
1645 
1646 	wm_intermediate_val = latency * pixel_rate * cpp;
1647 	ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
1648 
1649 	if (DISPLAY_VER(i915) >= 10)
1650 		ret = add_fixed16_u32(ret, 1);
1651 
1652 	return ret;
1653 }
1654 
1655 static uint_fixed_16_16_t
1656 skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
1657 	       uint_fixed_16_16_t plane_blocks_per_line)
1658 {
1659 	u32 wm_intermediate_val;
1660 	uint_fixed_16_16_t ret;
1661 
1662 	if (latency == 0)
1663 		return FP_16_16_MAX;
1664 
1665 	wm_intermediate_val = latency * pixel_rate;
1666 	wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
1667 					   pipe_htotal * 1000);
1668 	ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
1669 	return ret;
1670 }
1671 
1672 static uint_fixed_16_16_t
1673 intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
1674 {
1675 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1676 	u32 pixel_rate;
1677 	u32 crtc_htotal;
1678 	uint_fixed_16_16_t linetime_us;
1679 
1680 	if (!crtc_state->hw.active)
1681 		return u32_to_fixed16(0);
1682 
1683 	pixel_rate = crtc_state->pixel_rate;
1684 
1685 	if (drm_WARN_ON(&i915->drm, pixel_rate == 0))
1686 		return u32_to_fixed16(0);
1687 
1688 	crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
1689 	linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
1690 
1691 	return linetime_us;
1692 }
1693 
1694 static int
1695 skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
1696 		      int width, const struct drm_format_info *format,
1697 		      u64 modifier, unsigned int rotation,
1698 		      u32 plane_pixel_rate, struct skl_wm_params *wp,
1699 		      int color_plane)
1700 {
1701 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1702 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1703 	u32 interm_pbpl;
1704 
1705 	/* only planar format has two planes */
1706 	if (color_plane == 1 &&
1707 	    !intel_format_info_is_yuv_semiplanar(format, modifier)) {
1708 		drm_dbg_kms(&i915->drm,
1709 			    "Non planar format have single plane\n");
1710 		return -EINVAL;
1711 	}
1712 
1713 	wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
1714 	wp->y_tiled = modifier != I915_FORMAT_MOD_X_TILED &&
1715 		intel_fb_is_tiled_modifier(modifier);
1716 	wp->rc_surface = intel_fb_is_ccs_modifier(modifier);
1717 	wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
1718 
1719 	wp->width = width;
1720 	if (color_plane == 1 && wp->is_planar)
1721 		wp->width /= 2;
1722 
1723 	wp->cpp = format->cpp[color_plane];
1724 	wp->plane_pixel_rate = plane_pixel_rate;
1725 
1726 	if (DISPLAY_VER(i915) >= 11 &&
1727 	    modifier == I915_FORMAT_MOD_Yf_TILED  && wp->cpp == 1)
1728 		wp->dbuf_block_size = 256;
1729 	else
1730 		wp->dbuf_block_size = 512;
1731 
1732 	if (drm_rotation_90_or_270(rotation)) {
1733 		switch (wp->cpp) {
1734 		case 1:
1735 			wp->y_min_scanlines = 16;
1736 			break;
1737 		case 2:
1738 			wp->y_min_scanlines = 8;
1739 			break;
1740 		case 4:
1741 			wp->y_min_scanlines = 4;
1742 			break;
1743 		default:
1744 			MISSING_CASE(wp->cpp);
1745 			return -EINVAL;
1746 		}
1747 	} else {
1748 		wp->y_min_scanlines = 4;
1749 	}
1750 
1751 	if (skl_needs_memory_bw_wa(i915))
1752 		wp->y_min_scanlines *= 2;
1753 
1754 	wp->plane_bytes_per_line = wp->width * wp->cpp;
1755 	if (wp->y_tiled) {
1756 		interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
1757 					   wp->y_min_scanlines,
1758 					   wp->dbuf_block_size);
1759 
1760 		if (DISPLAY_VER(i915) >= 10)
1761 			interm_pbpl++;
1762 
1763 		wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
1764 							wp->y_min_scanlines);
1765 	} else {
1766 		interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
1767 					   wp->dbuf_block_size);
1768 
1769 		if (!wp->x_tiled || DISPLAY_VER(i915) >= 10)
1770 			interm_pbpl++;
1771 
1772 		wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
1773 	}
1774 
1775 	wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
1776 					     wp->plane_blocks_per_line);
1777 
1778 	wp->linetime_us = fixed16_to_u32_round_up(intel_get_linetime_us(crtc_state));
1779 
1780 	return 0;
1781 }
1782 
1783 static int
1784 skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
1785 			    const struct intel_plane_state *plane_state,
1786 			    struct skl_wm_params *wp, int color_plane)
1787 {
1788 	const struct drm_framebuffer *fb = plane_state->hw.fb;
1789 	int width;
1790 
1791 	/*
1792 	 * Src coordinates are already rotated by 270 degrees for
1793 	 * the 90/270 degree plane rotation cases (to match the
1794 	 * GTT mapping), hence no need to account for rotation here.
1795 	 */
1796 	width = drm_rect_width(&plane_state->uapi.src) >> 16;
1797 
1798 	return skl_compute_wm_params(crtc_state, width,
1799 				     fb->format, fb->modifier,
1800 				     plane_state->hw.rotation,
1801 				     intel_plane_pixel_rate(crtc_state, plane_state),
1802 				     wp, color_plane);
1803 }
1804 
1805 static bool skl_wm_has_lines(struct drm_i915_private *i915, int level)
1806 {
1807 	if (DISPLAY_VER(i915) >= 10)
1808 		return true;
1809 
1810 	/* The number of lines are ignored for the level 0 watermark. */
1811 	return level > 0;
1812 }
1813 
1814 static int skl_wm_max_lines(struct drm_i915_private *i915)
1815 {
1816 	if (DISPLAY_VER(i915) >= 13)
1817 		return 255;
1818 	else
1819 		return 31;
1820 }
1821 
1822 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
1823 				 struct intel_plane *plane,
1824 				 int level,
1825 				 unsigned int latency,
1826 				 const struct skl_wm_params *wp,
1827 				 const struct skl_wm_level *result_prev,
1828 				 struct skl_wm_level *result /* out */)
1829 {
1830 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1831 	uint_fixed_16_16_t method1, method2;
1832 	uint_fixed_16_16_t selected_result;
1833 	u32 blocks, lines, min_ddb_alloc = 0;
1834 
1835 	if (latency == 0 ||
1836 	    (use_minimal_wm0_only(crtc_state, plane) && level > 0)) {
1837 		/* reject it */
1838 		result->min_ddb_alloc = U16_MAX;
1839 		return;
1840 	}
1841 
1842 	/*
1843 	 * WaIncreaseLatencyIPCEnabled: kbl,cfl
1844 	 * Display WA #1141: kbl,cfl
1845 	 */
1846 	if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
1847 	    skl_watermark_ipc_enabled(i915))
1848 		latency += 4;
1849 
1850 	if (skl_needs_memory_bw_wa(i915) && wp->x_tiled)
1851 		latency += 15;
1852 
1853 	method1 = skl_wm_method1(i915, wp->plane_pixel_rate,
1854 				 wp->cpp, latency, wp->dbuf_block_size);
1855 	method2 = skl_wm_method2(wp->plane_pixel_rate,
1856 				 crtc_state->hw.pipe_mode.crtc_htotal,
1857 				 latency,
1858 				 wp->plane_blocks_per_line);
1859 
1860 	if (wp->y_tiled) {
1861 		selected_result = max_fixed16(method2, wp->y_tile_minimum);
1862 	} else {
1863 		if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal /
1864 		     wp->dbuf_block_size < 1) &&
1865 		     (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
1866 			selected_result = method2;
1867 		} else if (latency >= wp->linetime_us) {
1868 			if (DISPLAY_VER(i915) == 9)
1869 				selected_result = min_fixed16(method1, method2);
1870 			else
1871 				selected_result = method2;
1872 		} else {
1873 			selected_result = method1;
1874 		}
1875 	}
1876 
1877 	blocks = fixed16_to_u32_round_up(selected_result) + 1;
1878 	/*
1879 	 * Lets have blocks at minimum equivalent to plane_blocks_per_line
1880 	 * as there will be at minimum one line for lines configuration. This
1881 	 * is a work around for FIFO underruns observed with resolutions like
1882 	 * 4k 60 Hz in single channel DRAM configurations.
1883 	 *
1884 	 * As per the Bspec 49325, if the ddb allocation can hold at least
1885 	 * one plane_blocks_per_line, we should have selected method2 in
1886 	 * the above logic. Assuming that modern versions have enough dbuf
1887 	 * and method2 guarantees blocks equivalent to at least 1 line,
1888 	 * select the blocks as plane_blocks_per_line.
1889 	 *
1890 	 * TODO: Revisit the logic when we have better understanding on DRAM
1891 	 * channels' impact on the level 0 memory latency and the relevant
1892 	 * wm calculations.
1893 	 */
1894 	if (skl_wm_has_lines(i915, level))
1895 		blocks = max(blocks,
1896 			     fixed16_to_u32_round_up(wp->plane_blocks_per_line));
1897 	lines = div_round_up_fixed16(selected_result,
1898 				     wp->plane_blocks_per_line);
1899 
1900 	if (DISPLAY_VER(i915) == 9) {
1901 		/* Display WA #1125: skl,bxt,kbl */
1902 		if (level == 0 && wp->rc_surface)
1903 			blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
1904 
1905 		/* Display WA #1126: skl,bxt,kbl */
1906 		if (level >= 1 && level <= 7) {
1907 			if (wp->y_tiled) {
1908 				blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
1909 				lines += wp->y_min_scanlines;
1910 			} else {
1911 				blocks++;
1912 			}
1913 
1914 			/*
1915 			 * Make sure result blocks for higher latency levels are
1916 			 * at least as high as level below the current level.
1917 			 * Assumption in DDB algorithm optimization for special
1918 			 * cases. Also covers Display WA #1125 for RC.
1919 			 */
1920 			if (result_prev->blocks > blocks)
1921 				blocks = result_prev->blocks;
1922 		}
1923 	}
1924 
1925 	if (DISPLAY_VER(i915) >= 11) {
1926 		if (wp->y_tiled) {
1927 			int extra_lines;
1928 
1929 			if (lines % wp->y_min_scanlines == 0)
1930 				extra_lines = wp->y_min_scanlines;
1931 			else
1932 				extra_lines = wp->y_min_scanlines * 2 -
1933 					lines % wp->y_min_scanlines;
1934 
1935 			min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines,
1936 								 wp->plane_blocks_per_line);
1937 		} else {
1938 			min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10);
1939 		}
1940 	}
1941 
1942 	if (!skl_wm_has_lines(i915, level))
1943 		lines = 0;
1944 
1945 	if (lines > skl_wm_max_lines(i915)) {
1946 		/* reject it */
1947 		result->min_ddb_alloc = U16_MAX;
1948 		return;
1949 	}
1950 
1951 	/*
1952 	 * If lines is valid, assume we can use this watermark level
1953 	 * for now.  We'll come back and disable it after we calculate the
1954 	 * DDB allocation if it turns out we don't actually have enough
1955 	 * blocks to satisfy it.
1956 	 */
1957 	result->blocks = blocks;
1958 	result->lines = lines;
1959 	/* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
1960 	result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
1961 	result->enable = true;
1962 
1963 	if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us)
1964 		result->can_sagv = latency >= i915->display.sagv.block_time_us;
1965 }
1966 
1967 static void
1968 skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
1969 		      struct intel_plane *plane,
1970 		      const struct skl_wm_params *wm_params,
1971 		      struct skl_wm_level *levels)
1972 {
1973 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1974 	struct skl_wm_level *result_prev = &levels[0];
1975 	int level;
1976 
1977 	for (level = 0; level < i915->display.wm.num_levels; level++) {
1978 		struct skl_wm_level *result = &levels[level];
1979 		unsigned int latency = i915->display.wm.skl_latency[level];
1980 
1981 		skl_compute_plane_wm(crtc_state, plane, level, latency,
1982 				     wm_params, result_prev, result);
1983 
1984 		result_prev = result;
1985 	}
1986 }
1987 
1988 static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
1989 				struct intel_plane *plane,
1990 				const struct skl_wm_params *wm_params,
1991 				struct skl_plane_wm *plane_wm)
1992 {
1993 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1994 	struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
1995 	struct skl_wm_level *levels = plane_wm->wm;
1996 	unsigned int latency = 0;
1997 
1998 	if (i915->display.sagv.block_time_us)
1999 		latency = i915->display.sagv.block_time_us + i915->display.wm.skl_latency[0];
2000 
2001 	skl_compute_plane_wm(crtc_state, plane, 0, latency,
2002 			     wm_params, &levels[0],
2003 			     sagv_wm);
2004 }
2005 
2006 static void skl_compute_transition_wm(struct drm_i915_private *i915,
2007 				      struct skl_wm_level *trans_wm,
2008 				      const struct skl_wm_level *wm0,
2009 				      const struct skl_wm_params *wp)
2010 {
2011 	u16 trans_min, trans_amount, trans_y_tile_min;
2012 	u16 wm0_blocks, trans_offset, blocks;
2013 
2014 	/* Transition WM don't make any sense if ipc is disabled */
2015 	if (!skl_watermark_ipc_enabled(i915))
2016 		return;
2017 
2018 	/*
2019 	 * WaDisableTWM:skl,kbl,cfl,bxt
2020 	 * Transition WM are not recommended by HW team for GEN9
2021 	 */
2022 	if (DISPLAY_VER(i915) == 9)
2023 		return;
2024 
2025 	if (DISPLAY_VER(i915) >= 11)
2026 		trans_min = 4;
2027 	else
2028 		trans_min = 14;
2029 
2030 	/* Display WA #1140: glk,cnl */
2031 	if (DISPLAY_VER(i915) == 10)
2032 		trans_amount = 0;
2033 	else
2034 		trans_amount = 10; /* This is configurable amount */
2035 
2036 	trans_offset = trans_min + trans_amount;
2037 
2038 	/*
2039 	 * The spec asks for Selected Result Blocks for wm0 (the real value),
2040 	 * not Result Blocks (the integer value). Pay attention to the capital
2041 	 * letters. The value wm_l0->blocks is actually Result Blocks, but
2042 	 * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
2043 	 * and since we later will have to get the ceiling of the sum in the
2044 	 * transition watermarks calculation, we can just pretend Selected
2045 	 * Result Blocks is Result Blocks minus 1 and it should work for the
2046 	 * current platforms.
2047 	 */
2048 	wm0_blocks = wm0->blocks - 1;
2049 
2050 	if (wp->y_tiled) {
2051 		trans_y_tile_min =
2052 			(u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
2053 		blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset;
2054 	} else {
2055 		blocks = wm0_blocks + trans_offset;
2056 	}
2057 	blocks++;
2058 
2059 	/*
2060 	 * Just assume we can enable the transition watermark.  After
2061 	 * computing the DDB we'll come back and disable it if that
2062 	 * assumption turns out to be false.
2063 	 */
2064 	trans_wm->blocks = blocks;
2065 	trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1);
2066 	trans_wm->enable = true;
2067 }
2068 
2069 static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
2070 				     const struct intel_plane_state *plane_state,
2071 				     struct intel_plane *plane, int color_plane)
2072 {
2073 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2074 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2075 	struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
2076 	struct skl_wm_params wm_params;
2077 	int ret;
2078 
2079 	ret = skl_compute_plane_wm_params(crtc_state, plane_state,
2080 					  &wm_params, color_plane);
2081 	if (ret)
2082 		return ret;
2083 
2084 	skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
2085 
2086 	skl_compute_transition_wm(i915, &wm->trans_wm,
2087 				  &wm->wm[0], &wm_params);
2088 
2089 	if (DISPLAY_VER(i915) >= 12) {
2090 		tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
2091 
2092 		skl_compute_transition_wm(i915, &wm->sagv.trans_wm,
2093 					  &wm->sagv.wm0, &wm_params);
2094 	}
2095 
2096 	return 0;
2097 }
2098 
2099 static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
2100 				 const struct intel_plane_state *plane_state,
2101 				 struct intel_plane *plane)
2102 {
2103 	struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
2104 	struct skl_wm_params wm_params;
2105 	int ret;
2106 
2107 	wm->is_planar = true;
2108 
2109 	/* uv plane watermarks must also be validated for NV12/Planar */
2110 	ret = skl_compute_plane_wm_params(crtc_state, plane_state,
2111 					  &wm_params, 1);
2112 	if (ret)
2113 		return ret;
2114 
2115 	skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm);
2116 
2117 	return 0;
2118 }
2119 
2120 static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
2121 			      const struct intel_plane_state *plane_state)
2122 {
2123 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2124 	enum plane_id plane_id = plane->id;
2125 	struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
2126 	const struct drm_framebuffer *fb = plane_state->hw.fb;
2127 	int ret;
2128 
2129 	memset(wm, 0, sizeof(*wm));
2130 
2131 	if (!intel_wm_plane_visible(crtc_state, plane_state))
2132 		return 0;
2133 
2134 	ret = skl_build_plane_wm_single(crtc_state, plane_state,
2135 					plane, 0);
2136 	if (ret)
2137 		return ret;
2138 
2139 	if (fb->format->is_yuv && fb->format->num_planes > 1) {
2140 		ret = skl_build_plane_wm_uv(crtc_state, plane_state,
2141 					    plane);
2142 		if (ret)
2143 			return ret;
2144 	}
2145 
2146 	return 0;
2147 }
2148 
2149 static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
2150 			      const struct intel_plane_state *plane_state)
2151 {
2152 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2153 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
2154 	enum plane_id plane_id = plane->id;
2155 	struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
2156 	int ret;
2157 
2158 	/* Watermarks calculated in master */
2159 	if (plane_state->planar_slave)
2160 		return 0;
2161 
2162 	memset(wm, 0, sizeof(*wm));
2163 
2164 	if (plane_state->planar_linked_plane) {
2165 		const struct drm_framebuffer *fb = plane_state->hw.fb;
2166 
2167 		drm_WARN_ON(&i915->drm,
2168 			    !intel_wm_plane_visible(crtc_state, plane_state));
2169 		drm_WARN_ON(&i915->drm, !fb->format->is_yuv ||
2170 			    fb->format->num_planes == 1);
2171 
2172 		ret = skl_build_plane_wm_single(crtc_state, plane_state,
2173 						plane_state->planar_linked_plane, 0);
2174 		if (ret)
2175 			return ret;
2176 
2177 		ret = skl_build_plane_wm_single(crtc_state, plane_state,
2178 						plane, 1);
2179 		if (ret)
2180 			return ret;
2181 	} else if (intel_wm_plane_visible(crtc_state, plane_state)) {
2182 		ret = skl_build_plane_wm_single(crtc_state, plane_state,
2183 						plane, 0);
2184 		if (ret)
2185 			return ret;
2186 	}
2187 
2188 	return 0;
2189 }
2190 
2191 static int skl_build_pipe_wm(struct intel_atomic_state *state,
2192 			     struct intel_crtc *crtc)
2193 {
2194 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2195 	struct intel_crtc_state *crtc_state =
2196 		intel_atomic_get_new_crtc_state(state, crtc);
2197 	const struct intel_plane_state *plane_state;
2198 	struct intel_plane *plane;
2199 	int ret, i;
2200 
2201 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2202 		/*
2203 		 * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc
2204 		 * instead but we don't populate that correctly for NV12 Y
2205 		 * planes so for now hack this.
2206 		 */
2207 		if (plane->pipe != crtc->pipe)
2208 			continue;
2209 
2210 		if (DISPLAY_VER(i915) >= 11)
2211 			ret = icl_build_plane_wm(crtc_state, plane_state);
2212 		else
2213 			ret = skl_build_plane_wm(crtc_state, plane_state);
2214 		if (ret)
2215 			return ret;
2216 	}
2217 
2218 	crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
2219 
2220 	return 0;
2221 }
2222 
2223 static void skl_ddb_entry_write(struct drm_i915_private *i915,
2224 				i915_reg_t reg,
2225 				const struct skl_ddb_entry *entry)
2226 {
2227 	if (entry->end)
2228 		intel_de_write_fw(i915, reg,
2229 				  PLANE_BUF_END(entry->end - 1) |
2230 				  PLANE_BUF_START(entry->start));
2231 	else
2232 		intel_de_write_fw(i915, reg, 0);
2233 }
2234 
2235 static void skl_write_wm_level(struct drm_i915_private *i915,
2236 			       i915_reg_t reg,
2237 			       const struct skl_wm_level *level)
2238 {
2239 	u32 val = 0;
2240 
2241 	if (level->enable)
2242 		val |= PLANE_WM_EN;
2243 	if (level->ignore_lines)
2244 		val |= PLANE_WM_IGNORE_LINES;
2245 	val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks);
2246 	val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
2247 
2248 	intel_de_write_fw(i915, reg, val);
2249 }
2250 
2251 void skl_write_plane_wm(struct intel_plane *plane,
2252 			const struct intel_crtc_state *crtc_state)
2253 {
2254 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
2255 	enum plane_id plane_id = plane->id;
2256 	enum pipe pipe = plane->pipe;
2257 	const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
2258 	const struct skl_ddb_entry *ddb =
2259 		&crtc_state->wm.skl.plane_ddb[plane_id];
2260 	const struct skl_ddb_entry *ddb_y =
2261 		&crtc_state->wm.skl.plane_ddb_y[plane_id];
2262 	int level;
2263 
2264 	for (level = 0; level < i915->display.wm.num_levels; level++)
2265 		skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level),
2266 				   skl_plane_wm_level(pipe_wm, plane_id, level));
2267 
2268 	skl_write_wm_level(i915, PLANE_WM_TRANS(pipe, plane_id),
2269 			   skl_plane_trans_wm(pipe_wm, plane_id));
2270 
2271 	if (HAS_HW_SAGV_WM(i915)) {
2272 		const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
2273 
2274 		skl_write_wm_level(i915, PLANE_WM_SAGV(pipe, plane_id),
2275 				   &wm->sagv.wm0);
2276 		skl_write_wm_level(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id),
2277 				   &wm->sagv.trans_wm);
2278 	}
2279 
2280 	skl_ddb_entry_write(i915,
2281 			    PLANE_BUF_CFG(pipe, plane_id), ddb);
2282 
2283 	if (DISPLAY_VER(i915) < 11)
2284 		skl_ddb_entry_write(i915,
2285 				    PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y);
2286 }
2287 
2288 void skl_write_cursor_wm(struct intel_plane *plane,
2289 			 const struct intel_crtc_state *crtc_state)
2290 {
2291 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
2292 	enum plane_id plane_id = plane->id;
2293 	enum pipe pipe = plane->pipe;
2294 	const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
2295 	const struct skl_ddb_entry *ddb =
2296 		&crtc_state->wm.skl.plane_ddb[plane_id];
2297 	int level;
2298 
2299 	for (level = 0; level < i915->display.wm.num_levels; level++)
2300 		skl_write_wm_level(i915, CUR_WM(pipe, level),
2301 				   skl_plane_wm_level(pipe_wm, plane_id, level));
2302 
2303 	skl_write_wm_level(i915, CUR_WM_TRANS(pipe),
2304 			   skl_plane_trans_wm(pipe_wm, plane_id));
2305 
2306 	if (HAS_HW_SAGV_WM(i915)) {
2307 		const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
2308 
2309 		skl_write_wm_level(i915, CUR_WM_SAGV(pipe),
2310 				   &wm->sagv.wm0);
2311 		skl_write_wm_level(i915, CUR_WM_SAGV_TRANS(pipe),
2312 				   &wm->sagv.trans_wm);
2313 	}
2314 
2315 	skl_ddb_entry_write(i915, CUR_BUF_CFG(pipe), ddb);
2316 }
2317 
2318 static bool skl_wm_level_equals(const struct skl_wm_level *l1,
2319 				const struct skl_wm_level *l2)
2320 {
2321 	return l1->enable == l2->enable &&
2322 		l1->ignore_lines == l2->ignore_lines &&
2323 		l1->lines == l2->lines &&
2324 		l1->blocks == l2->blocks;
2325 }
2326 
2327 static bool skl_plane_wm_equals(struct drm_i915_private *i915,
2328 				const struct skl_plane_wm *wm1,
2329 				const struct skl_plane_wm *wm2)
2330 {
2331 	int level;
2332 
2333 	for (level = 0; level < i915->display.wm.num_levels; level++) {
2334 		/*
2335 		 * We don't check uv_wm as the hardware doesn't actually
2336 		 * use it. It only gets used for calculating the required
2337 		 * ddb allocation.
2338 		 */
2339 		if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
2340 			return false;
2341 	}
2342 
2343 	return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) &&
2344 		skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) &&
2345 		skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm);
2346 }
2347 
2348 static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
2349 				    const struct skl_ddb_entry *b)
2350 {
2351 	return a->start < b->end && b->start < a->end;
2352 }
2353 
2354 static void skl_ddb_entry_union(struct skl_ddb_entry *a,
2355 				const struct skl_ddb_entry *b)
2356 {
2357 	if (a->end && b->end) {
2358 		a->start = min(a->start, b->start);
2359 		a->end = max(a->end, b->end);
2360 	} else if (b->end) {
2361 		a->start = b->start;
2362 		a->end = b->end;
2363 	}
2364 }
2365 
2366 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
2367 				 const struct skl_ddb_entry *entries,
2368 				 int num_entries, int ignore_idx)
2369 {
2370 	int i;
2371 
2372 	for (i = 0; i < num_entries; i++) {
2373 		if (i != ignore_idx &&
2374 		    skl_ddb_entries_overlap(ddb, &entries[i]))
2375 			return true;
2376 	}
2377 
2378 	return false;
2379 }
2380 
2381 static int
2382 skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
2383 			    struct intel_crtc_state *new_crtc_state)
2384 {
2385 	struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
2386 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2387 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2388 	struct intel_plane *plane;
2389 
2390 	for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2391 		struct intel_plane_state *plane_state;
2392 		enum plane_id plane_id = plane->id;
2393 
2394 		if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id],
2395 					&new_crtc_state->wm.skl.plane_ddb[plane_id]) &&
2396 		    skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
2397 					&new_crtc_state->wm.skl.plane_ddb_y[plane_id]))
2398 			continue;
2399 
2400 		plane_state = intel_atomic_get_plane_state(state, plane);
2401 		if (IS_ERR(plane_state))
2402 			return PTR_ERR(plane_state);
2403 
2404 		new_crtc_state->update_planes |= BIT(plane_id);
2405 		new_crtc_state->async_flip_planes = 0;
2406 		new_crtc_state->do_async_flip = false;
2407 	}
2408 
2409 	return 0;
2410 }
2411 
2412 static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
2413 {
2414 	struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev);
2415 	u8 enabled_slices;
2416 	enum pipe pipe;
2417 
2418 	/*
2419 	 * FIXME: For now we always enable slice S1 as per
2420 	 * the Bspec display initialization sequence.
2421 	 */
2422 	enabled_slices = BIT(DBUF_S1);
2423 
2424 	for_each_pipe(i915, pipe)
2425 		enabled_slices |= dbuf_state->slices[pipe];
2426 
2427 	return enabled_slices;
2428 }
2429 
2430 static int
2431 skl_compute_ddb(struct intel_atomic_state *state)
2432 {
2433 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2434 	const struct intel_dbuf_state *old_dbuf_state;
2435 	struct intel_dbuf_state *new_dbuf_state = NULL;
2436 	const struct intel_crtc_state *old_crtc_state;
2437 	struct intel_crtc_state *new_crtc_state;
2438 	struct intel_crtc *crtc;
2439 	int ret, i;
2440 
2441 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2442 		new_dbuf_state = intel_atomic_get_dbuf_state(state);
2443 		if (IS_ERR(new_dbuf_state))
2444 			return PTR_ERR(new_dbuf_state);
2445 
2446 		old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
2447 		break;
2448 	}
2449 
2450 	if (!new_dbuf_state)
2451 		return 0;
2452 
2453 	new_dbuf_state->active_pipes =
2454 		intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
2455 
2456 	if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
2457 		ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2458 		if (ret)
2459 			return ret;
2460 	}
2461 
2462 	if (HAS_MBUS_JOINING(i915))
2463 		new_dbuf_state->joined_mbus =
2464 			adlp_check_mbus_joined(new_dbuf_state->active_pipes);
2465 
2466 	for_each_intel_crtc(&i915->drm, crtc) {
2467 		enum pipe pipe = crtc->pipe;
2468 
2469 		new_dbuf_state->slices[pipe] =
2470 			skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
2471 						new_dbuf_state->joined_mbus);
2472 
2473 		if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
2474 			continue;
2475 
2476 		ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2477 		if (ret)
2478 			return ret;
2479 	}
2480 
2481 	new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
2482 
2483 	if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
2484 	    old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
2485 		ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
2486 		if (ret)
2487 			return ret;
2488 
2489 		if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
2490 			/* TODO: Implement vblank synchronized MBUS joining changes */
2491 			ret = intel_modeset_all_pipes(state, "MBUS joining change");
2492 			if (ret)
2493 				return ret;
2494 		}
2495 
2496 		drm_dbg_kms(&i915->drm,
2497 			    "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
2498 			    old_dbuf_state->enabled_slices,
2499 			    new_dbuf_state->enabled_slices,
2500 			    INTEL_INFO(i915)->display.dbuf.slice_mask,
2501 			    str_yes_no(old_dbuf_state->joined_mbus),
2502 			    str_yes_no(new_dbuf_state->joined_mbus));
2503 	}
2504 
2505 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2506 		enum pipe pipe = crtc->pipe;
2507 
2508 		new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
2509 
2510 		if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
2511 			continue;
2512 
2513 		ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2514 		if (ret)
2515 			return ret;
2516 	}
2517 
2518 	for_each_intel_crtc(&i915->drm, crtc) {
2519 		ret = skl_crtc_allocate_ddb(state, crtc);
2520 		if (ret)
2521 			return ret;
2522 	}
2523 
2524 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
2525 					    new_crtc_state, i) {
2526 		ret = skl_crtc_allocate_plane_ddb(state, crtc);
2527 		if (ret)
2528 			return ret;
2529 
2530 		ret = skl_ddb_add_affected_planes(old_crtc_state,
2531 						  new_crtc_state);
2532 		if (ret)
2533 			return ret;
2534 	}
2535 
2536 	return 0;
2537 }
2538 
2539 static char enast(bool enable)
2540 {
2541 	return enable ? '*' : ' ';
2542 }
2543 
2544 static void
2545 skl_print_wm_changes(struct intel_atomic_state *state)
2546 {
2547 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2548 	const struct intel_crtc_state *old_crtc_state;
2549 	const struct intel_crtc_state *new_crtc_state;
2550 	struct intel_plane *plane;
2551 	struct intel_crtc *crtc;
2552 	int i;
2553 
2554 	if (!drm_debug_enabled(DRM_UT_KMS))
2555 		return;
2556 
2557 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
2558 					    new_crtc_state, i) {
2559 		const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
2560 
2561 		old_pipe_wm = &old_crtc_state->wm.skl.optimal;
2562 		new_pipe_wm = &new_crtc_state->wm.skl.optimal;
2563 
2564 		for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2565 			enum plane_id plane_id = plane->id;
2566 			const struct skl_ddb_entry *old, *new;
2567 
2568 			old = &old_crtc_state->wm.skl.plane_ddb[plane_id];
2569 			new = &new_crtc_state->wm.skl.plane_ddb[plane_id];
2570 
2571 			if (skl_ddb_entry_equal(old, new))
2572 				continue;
2573 
2574 			drm_dbg_kms(&i915->drm,
2575 				    "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
2576 				    plane->base.base.id, plane->base.name,
2577 				    old->start, old->end, new->start, new->end,
2578 				    skl_ddb_entry_size(old), skl_ddb_entry_size(new));
2579 		}
2580 
2581 		for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2582 			enum plane_id plane_id = plane->id;
2583 			const struct skl_plane_wm *old_wm, *new_wm;
2584 
2585 			old_wm = &old_pipe_wm->planes[plane_id];
2586 			new_wm = &new_pipe_wm->planes[plane_id];
2587 
2588 			if (skl_plane_wm_equals(i915, old_wm, new_wm))
2589 				continue;
2590 
2591 			drm_dbg_kms(&i915->drm,
2592 				    "[PLANE:%d:%s]   level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
2593 				    " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
2594 				    plane->base.base.id, plane->base.name,
2595 				    enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
2596 				    enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
2597 				    enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
2598 				    enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
2599 				    enast(old_wm->trans_wm.enable),
2600 				    enast(old_wm->sagv.wm0.enable),
2601 				    enast(old_wm->sagv.trans_wm.enable),
2602 				    enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
2603 				    enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
2604 				    enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
2605 				    enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
2606 				    enast(new_wm->trans_wm.enable),
2607 				    enast(new_wm->sagv.wm0.enable),
2608 				    enast(new_wm->sagv.trans_wm.enable));
2609 
2610 			drm_dbg_kms(&i915->drm,
2611 				    "[PLANE:%d:%s]   lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
2612 				      " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
2613 				    plane->base.base.id, plane->base.name,
2614 				    enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
2615 				    enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
2616 				    enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
2617 				    enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
2618 				    enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
2619 				    enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
2620 				    enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
2621 				    enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
2622 				    enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
2623 				    enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
2624 				    enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
2625 				    enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
2626 				    enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
2627 				    enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
2628 				    enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
2629 				    enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
2630 				    enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
2631 				    enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
2632 				    enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
2633 				    enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
2634 				    enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
2635 				    enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
2636 
2637 			drm_dbg_kms(&i915->drm,
2638 				    "[PLANE:%d:%s]  blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
2639 				    " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
2640 				    plane->base.base.id, plane->base.name,
2641 				    old_wm->wm[0].blocks, old_wm->wm[1].blocks,
2642 				    old_wm->wm[2].blocks, old_wm->wm[3].blocks,
2643 				    old_wm->wm[4].blocks, old_wm->wm[5].blocks,
2644 				    old_wm->wm[6].blocks, old_wm->wm[7].blocks,
2645 				    old_wm->trans_wm.blocks,
2646 				    old_wm->sagv.wm0.blocks,
2647 				    old_wm->sagv.trans_wm.blocks,
2648 				    new_wm->wm[0].blocks, new_wm->wm[1].blocks,
2649 				    new_wm->wm[2].blocks, new_wm->wm[3].blocks,
2650 				    new_wm->wm[4].blocks, new_wm->wm[5].blocks,
2651 				    new_wm->wm[6].blocks, new_wm->wm[7].blocks,
2652 				    new_wm->trans_wm.blocks,
2653 				    new_wm->sagv.wm0.blocks,
2654 				    new_wm->sagv.trans_wm.blocks);
2655 
2656 			drm_dbg_kms(&i915->drm,
2657 				    "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
2658 				    " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
2659 				    plane->base.base.id, plane->base.name,
2660 				    old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
2661 				    old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
2662 				    old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
2663 				    old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
2664 				    old_wm->trans_wm.min_ddb_alloc,
2665 				    old_wm->sagv.wm0.min_ddb_alloc,
2666 				    old_wm->sagv.trans_wm.min_ddb_alloc,
2667 				    new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
2668 				    new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
2669 				    new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
2670 				    new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
2671 				    new_wm->trans_wm.min_ddb_alloc,
2672 				    new_wm->sagv.wm0.min_ddb_alloc,
2673 				    new_wm->sagv.trans_wm.min_ddb_alloc);
2674 		}
2675 	}
2676 }
2677 
2678 static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
2679 					 const struct skl_pipe_wm *old_pipe_wm,
2680 					 const struct skl_pipe_wm *new_pipe_wm)
2681 {
2682 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
2683 	int level;
2684 
2685 	for (level = 0; level < i915->display.wm.num_levels; level++) {
2686 		/*
2687 		 * We don't check uv_wm as the hardware doesn't actually
2688 		 * use it. It only gets used for calculating the required
2689 		 * ddb allocation.
2690 		 */
2691 		if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level),
2692 					 skl_plane_wm_level(new_pipe_wm, plane->id, level)))
2693 			return false;
2694 	}
2695 
2696 	if (HAS_HW_SAGV_WM(i915)) {
2697 		const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
2698 		const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
2699 
2700 		if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) ||
2701 		    !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm))
2702 			return false;
2703 	}
2704 
2705 	return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
2706 				   skl_plane_trans_wm(new_pipe_wm, plane->id));
2707 }
2708 
2709 /*
2710  * To make sure the cursor watermark registers are always consistent
2711  * with our computed state the following scenario needs special
2712  * treatment:
2713  *
2714  * 1. enable cursor
2715  * 2. move cursor entirely offscreen
2716  * 3. disable cursor
2717  *
2718  * Step 2. does call .disable_plane() but does not zero the watermarks
2719  * (since we consider an offscreen cursor still active for the purposes
2720  * of watermarks). Step 3. would not normally call .disable_plane()
2721  * because the actual plane visibility isn't changing, and we don't
2722  * deallocate the cursor ddb until the pipe gets disabled. So we must
2723  * force step 3. to call .disable_plane() to update the watermark
2724  * registers properly.
2725  *
2726  * Other planes do not suffer from this issues as their watermarks are
2727  * calculated based on the actual plane visibility. The only time this
2728  * can trigger for the other planes is during the initial readout as the
2729  * default value of the watermarks registers is not zero.
2730  */
2731 static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
2732 				      struct intel_crtc *crtc)
2733 {
2734 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2735 	const struct intel_crtc_state *old_crtc_state =
2736 		intel_atomic_get_old_crtc_state(state, crtc);
2737 	struct intel_crtc_state *new_crtc_state =
2738 		intel_atomic_get_new_crtc_state(state, crtc);
2739 	struct intel_plane *plane;
2740 
2741 	for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2742 		struct intel_plane_state *plane_state;
2743 		enum plane_id plane_id = plane->id;
2744 
2745 		/*
2746 		 * Force a full wm update for every plane on modeset.
2747 		 * Required because the reset value of the wm registers
2748 		 * is non-zero, whereas we want all disabled planes to
2749 		 * have zero watermarks. So if we turn off the relevant
2750 		 * power well the hardware state will go out of sync
2751 		 * with the software state.
2752 		 */
2753 		if (!intel_crtc_needs_modeset(new_crtc_state) &&
2754 		    skl_plane_selected_wm_equals(plane,
2755 						 &old_crtc_state->wm.skl.optimal,
2756 						 &new_crtc_state->wm.skl.optimal))
2757 			continue;
2758 
2759 		plane_state = intel_atomic_get_plane_state(state, plane);
2760 		if (IS_ERR(plane_state))
2761 			return PTR_ERR(plane_state);
2762 
2763 		new_crtc_state->update_planes |= BIT(plane_id);
2764 		new_crtc_state->async_flip_planes = 0;
2765 		new_crtc_state->do_async_flip = false;
2766 	}
2767 
2768 	return 0;
2769 }
2770 
2771 static int
2772 skl_compute_wm(struct intel_atomic_state *state)
2773 {
2774 	struct intel_crtc *crtc;
2775 	struct intel_crtc_state *new_crtc_state;
2776 	int ret, i;
2777 
2778 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2779 		ret = skl_build_pipe_wm(state, crtc);
2780 		if (ret)
2781 			return ret;
2782 	}
2783 
2784 	ret = skl_compute_ddb(state);
2785 	if (ret)
2786 		return ret;
2787 
2788 	ret = intel_compute_sagv_mask(state);
2789 	if (ret)
2790 		return ret;
2791 
2792 	/*
2793 	 * skl_compute_ddb() will have adjusted the final watermarks
2794 	 * based on how much ddb is available. Now we can actually
2795 	 * check if the final watermarks changed.
2796 	 */
2797 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2798 		ret = skl_wm_add_affected_planes(state, crtc);
2799 		if (ret)
2800 			return ret;
2801 	}
2802 
2803 	skl_print_wm_changes(state);
2804 
2805 	return 0;
2806 }
2807 
2808 static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
2809 {
2810 	level->enable = val & PLANE_WM_EN;
2811 	level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
2812 	level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
2813 	level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
2814 }
2815 
2816 static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
2817 				     struct skl_pipe_wm *out)
2818 {
2819 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2820 	enum pipe pipe = crtc->pipe;
2821 	enum plane_id plane_id;
2822 	int level;
2823 	u32 val;
2824 
2825 	for_each_plane_id_on_crtc(crtc, plane_id) {
2826 		struct skl_plane_wm *wm = &out->planes[plane_id];
2827 
2828 		for (level = 0; level < i915->display.wm.num_levels; level++) {
2829 			if (plane_id != PLANE_CURSOR)
2830 				val = intel_de_read(i915, PLANE_WM(pipe, plane_id, level));
2831 			else
2832 				val = intel_de_read(i915, CUR_WM(pipe, level));
2833 
2834 			skl_wm_level_from_reg_val(val, &wm->wm[level]);
2835 		}
2836 
2837 		if (plane_id != PLANE_CURSOR)
2838 			val = intel_de_read(i915, PLANE_WM_TRANS(pipe, plane_id));
2839 		else
2840 			val = intel_de_read(i915, CUR_WM_TRANS(pipe));
2841 
2842 		skl_wm_level_from_reg_val(val, &wm->trans_wm);
2843 
2844 		if (HAS_HW_SAGV_WM(i915)) {
2845 			if (plane_id != PLANE_CURSOR)
2846 				val = intel_de_read(i915, PLANE_WM_SAGV(pipe, plane_id));
2847 			else
2848 				val = intel_de_read(i915, CUR_WM_SAGV(pipe));
2849 
2850 			skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
2851 
2852 			if (plane_id != PLANE_CURSOR)
2853 				val = intel_de_read(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id));
2854 			else
2855 				val = intel_de_read(i915, CUR_WM_SAGV_TRANS(pipe));
2856 
2857 			skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
2858 		} else if (DISPLAY_VER(i915) >= 12) {
2859 			wm->sagv.wm0 = wm->wm[0];
2860 			wm->sagv.trans_wm = wm->trans_wm;
2861 		}
2862 	}
2863 }
2864 
2865 static void skl_wm_get_hw_state(struct drm_i915_private *i915)
2866 {
2867 	struct intel_dbuf_state *dbuf_state =
2868 		to_intel_dbuf_state(i915->display.dbuf.obj.state);
2869 	struct intel_crtc *crtc;
2870 
2871 	if (HAS_MBUS_JOINING(i915))
2872 		dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN;
2873 
2874 	for_each_intel_crtc(&i915->drm, crtc) {
2875 		struct intel_crtc_state *crtc_state =
2876 			to_intel_crtc_state(crtc->base.state);
2877 		enum pipe pipe = crtc->pipe;
2878 		unsigned int mbus_offset;
2879 		enum plane_id plane_id;
2880 		u8 slices;
2881 
2882 		memset(&crtc_state->wm.skl.optimal, 0,
2883 		       sizeof(crtc_state->wm.skl.optimal));
2884 		if (crtc_state->hw.active)
2885 			skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
2886 		crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
2887 
2888 		memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
2889 
2890 		for_each_plane_id_on_crtc(crtc, plane_id) {
2891 			struct skl_ddb_entry *ddb =
2892 				&crtc_state->wm.skl.plane_ddb[plane_id];
2893 			struct skl_ddb_entry *ddb_y =
2894 				&crtc_state->wm.skl.plane_ddb_y[plane_id];
2895 
2896 			if (!crtc_state->hw.active)
2897 				continue;
2898 
2899 			skl_ddb_get_hw_plane_state(i915, crtc->pipe,
2900 						   plane_id, ddb, ddb_y);
2901 
2902 			skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
2903 			skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
2904 		}
2905 
2906 		dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
2907 
2908 		/*
2909 		 * Used for checking overlaps, so we need absolute
2910 		 * offsets instead of MBUS relative offsets.
2911 		 */
2912 		slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
2913 						 dbuf_state->joined_mbus);
2914 		mbus_offset = mbus_ddb_offset(i915, slices);
2915 		crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
2916 		crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
2917 
2918 		/* The slices actually used by the planes on the pipe */
2919 		dbuf_state->slices[pipe] =
2920 			skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb);
2921 
2922 		drm_dbg_kms(&i915->drm,
2923 			    "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
2924 			    crtc->base.base.id, crtc->base.name,
2925 			    dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
2926 			    dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
2927 			    str_yes_no(dbuf_state->joined_mbus));
2928 	}
2929 
2930 	dbuf_state->enabled_slices = i915->display.dbuf.enabled_slices;
2931 }
2932 
2933 static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
2934 {
2935 	const struct intel_dbuf_state *dbuf_state =
2936 		to_intel_dbuf_state(i915->display.dbuf.obj.state);
2937 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
2938 	struct intel_crtc *crtc;
2939 
2940 	for_each_intel_crtc(&i915->drm, crtc) {
2941 		const struct intel_crtc_state *crtc_state =
2942 			to_intel_crtc_state(crtc->base.state);
2943 
2944 		entries[crtc->pipe] = crtc_state->wm.skl.ddb;
2945 	}
2946 
2947 	for_each_intel_crtc(&i915->drm, crtc) {
2948 		const struct intel_crtc_state *crtc_state =
2949 			to_intel_crtc_state(crtc->base.state);
2950 		u8 slices;
2951 
2952 		slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
2953 						 dbuf_state->joined_mbus);
2954 		if (dbuf_state->slices[crtc->pipe] & ~slices)
2955 			return true;
2956 
2957 		if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
2958 						I915_MAX_PIPES, crtc->pipe))
2959 			return true;
2960 	}
2961 
2962 	return false;
2963 }
2964 
2965 static void skl_wm_sanitize(struct drm_i915_private *i915)
2966 {
2967 	struct intel_crtc *crtc;
2968 
2969 	/*
2970 	 * On TGL/RKL (at least) the BIOS likes to assign the planes
2971 	 * to the wrong DBUF slices. This will cause an infinite loop
2972 	 * in skl_commit_modeset_enables() as it can't find a way to
2973 	 * transition between the old bogus DBUF layout to the new
2974 	 * proper DBUF layout without DBUF allocation overlaps between
2975 	 * the planes (which cannot be allowed or else the hardware
2976 	 * may hang). If we detect a bogus DBUF layout just turn off
2977 	 * all the planes so that skl_commit_modeset_enables() can
2978 	 * simply ignore them.
2979 	 */
2980 	if (!skl_dbuf_is_misconfigured(i915))
2981 		return;
2982 
2983 	drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
2984 
2985 	for_each_intel_crtc(&i915->drm, crtc) {
2986 		struct intel_plane *plane = to_intel_plane(crtc->base.primary);
2987 		const struct intel_plane_state *plane_state =
2988 			to_intel_plane_state(plane->base.state);
2989 		struct intel_crtc_state *crtc_state =
2990 			to_intel_crtc_state(crtc->base.state);
2991 
2992 		if (plane_state->uapi.visible)
2993 			intel_plane_disable_noatomic(crtc, plane);
2994 
2995 		drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
2996 
2997 		memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
2998 	}
2999 }
3000 
3001 static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
3002 {
3003 	skl_wm_get_hw_state(i915);
3004 	skl_wm_sanitize(i915);
3005 }
3006 
3007 void intel_wm_state_verify(struct intel_crtc *crtc,
3008 			   struct intel_crtc_state *new_crtc_state)
3009 {
3010 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3011 	struct skl_hw_state {
3012 		struct skl_ddb_entry ddb[I915_MAX_PLANES];
3013 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
3014 		struct skl_pipe_wm wm;
3015 	} *hw;
3016 	const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
3017 	struct intel_plane *plane;
3018 	u8 hw_enabled_slices;
3019 	int level;
3020 
3021 	if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
3022 		return;
3023 
3024 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3025 	if (!hw)
3026 		return;
3027 
3028 	skl_pipe_wm_get_hw_state(crtc, &hw->wm);
3029 
3030 	skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
3031 
3032 	hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915);
3033 
3034 	if (DISPLAY_VER(i915) >= 11 &&
3035 	    hw_enabled_slices != i915->display.dbuf.enabled_slices)
3036 		drm_err(&i915->drm,
3037 			"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
3038 			i915->display.dbuf.enabled_slices,
3039 			hw_enabled_slices);
3040 
3041 	for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
3042 		const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
3043 		const struct skl_wm_level *hw_wm_level, *sw_wm_level;
3044 
3045 		/* Watermarks */
3046 		for (level = 0; level < i915->display.wm.num_levels; level++) {
3047 			hw_wm_level = &hw->wm.planes[plane->id].wm[level];
3048 			sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
3049 
3050 			if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
3051 				continue;
3052 
3053 			drm_err(&i915->drm,
3054 				"[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3055 				plane->base.base.id, plane->base.name, level,
3056 				sw_wm_level->enable,
3057 				sw_wm_level->blocks,
3058 				sw_wm_level->lines,
3059 				hw_wm_level->enable,
3060 				hw_wm_level->blocks,
3061 				hw_wm_level->lines);
3062 		}
3063 
3064 		hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
3065 		sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
3066 
3067 		if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3068 			drm_err(&i915->drm,
3069 				"[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3070 				plane->base.base.id, plane->base.name,
3071 				sw_wm_level->enable,
3072 				sw_wm_level->blocks,
3073 				sw_wm_level->lines,
3074 				hw_wm_level->enable,
3075 				hw_wm_level->blocks,
3076 				hw_wm_level->lines);
3077 		}
3078 
3079 		hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
3080 		sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
3081 
3082 		if (HAS_HW_SAGV_WM(i915) &&
3083 		    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3084 			drm_err(&i915->drm,
3085 				"[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3086 				plane->base.base.id, plane->base.name,
3087 				sw_wm_level->enable,
3088 				sw_wm_level->blocks,
3089 				sw_wm_level->lines,
3090 				hw_wm_level->enable,
3091 				hw_wm_level->blocks,
3092 				hw_wm_level->lines);
3093 		}
3094 
3095 		hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
3096 		sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
3097 
3098 		if (HAS_HW_SAGV_WM(i915) &&
3099 		    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3100 			drm_err(&i915->drm,
3101 				"[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3102 				plane->base.base.id, plane->base.name,
3103 				sw_wm_level->enable,
3104 				sw_wm_level->blocks,
3105 				sw_wm_level->lines,
3106 				hw_wm_level->enable,
3107 				hw_wm_level->blocks,
3108 				hw_wm_level->lines);
3109 		}
3110 
3111 		/* DDB */
3112 		hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
3113 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
3114 
3115 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
3116 			drm_err(&i915->drm,
3117 				"[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
3118 				plane->base.base.id, plane->base.name,
3119 				sw_ddb_entry->start, sw_ddb_entry->end,
3120 				hw_ddb_entry->start, hw_ddb_entry->end);
3121 		}
3122 	}
3123 
3124 	kfree(hw);
3125 }
3126 
3127 bool skl_watermark_ipc_enabled(struct drm_i915_private *i915)
3128 {
3129 	return i915->display.wm.ipc_enabled;
3130 }
3131 
3132 void skl_watermark_ipc_update(struct drm_i915_private *i915)
3133 {
3134 	if (!HAS_IPC(i915))
3135 		return;
3136 
3137 	intel_de_rmw(i915, DISP_ARB_CTL2, DISP_IPC_ENABLE,
3138 		     skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0);
3139 }
3140 
3141 static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915)
3142 {
3143 	/* Display WA #0477 WaDisableIPC: skl */
3144 	if (IS_SKYLAKE(i915))
3145 		return false;
3146 
3147 	/* Display WA #1141: SKL:all KBL:all CFL */
3148 	if (IS_KABYLAKE(i915) ||
3149 	    IS_COFFEELAKE(i915) ||
3150 	    IS_COMETLAKE(i915))
3151 		return i915->dram_info.symmetric_memory;
3152 
3153 	return true;
3154 }
3155 
3156 void skl_watermark_ipc_init(struct drm_i915_private *i915)
3157 {
3158 	if (!HAS_IPC(i915))
3159 		return;
3160 
3161 	i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915);
3162 
3163 	skl_watermark_ipc_update(i915);
3164 }
3165 
3166 static void
3167 adjust_wm_latency(struct drm_i915_private *i915,
3168 		  u16 wm[], int num_levels, int read_latency)
3169 {
3170 	bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
3171 	int i, level;
3172 
3173 	/*
3174 	 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
3175 	 * need to be disabled. We make sure to sanitize the values out
3176 	 * of the punit to satisfy this requirement.
3177 	 */
3178 	for (level = 1; level < num_levels; level++) {
3179 		if (wm[level] == 0) {
3180 			for (i = level + 1; i < num_levels; i++)
3181 				wm[i] = 0;
3182 
3183 			num_levels = level;
3184 			break;
3185 		}
3186 	}
3187 
3188 	/*
3189 	 * WaWmMemoryReadLatency
3190 	 *
3191 	 * punit doesn't take into account the read latency so we need
3192 	 * to add proper adjustement to each valid level we retrieve
3193 	 * from the punit when level 0 response data is 0us.
3194 	 */
3195 	if (wm[0] == 0) {
3196 		for (level = 0; level < num_levels; level++)
3197 			wm[level] += read_latency;
3198 	}
3199 
3200 	/*
3201 	 * WA Level-0 adjustment for 16GB DIMMs: SKL+
3202 	 * If we could not get dimm info enable this WA to prevent from
3203 	 * any underrun. If not able to get Dimm info assume 16GB dimm
3204 	 * to avoid any underrun.
3205 	 */
3206 	if (wm_lv_0_adjust_needed)
3207 		wm[0] += 1;
3208 }
3209 
3210 static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
3211 {
3212 	int num_levels = i915->display.wm.num_levels;
3213 	u32 val;
3214 
3215 	val = intel_de_read(i915, MTL_LATENCY_LP0_LP1);
3216 	wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3217 	wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3218 
3219 	val = intel_de_read(i915, MTL_LATENCY_LP2_LP3);
3220 	wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3221 	wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3222 
3223 	val = intel_de_read(i915, MTL_LATENCY_LP4_LP5);
3224 	wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3225 	wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3226 
3227 	adjust_wm_latency(i915, wm, num_levels, 6);
3228 }
3229 
3230 static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
3231 {
3232 	int num_levels = i915->display.wm.num_levels;
3233 	int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2;
3234 	int mult = IS_DG2(i915) ? 2 : 1;
3235 	u32 val;
3236 	int ret;
3237 
3238 	/* read the first set of memory latencies[0:3] */
3239 	val = 0; /* data0 to be programmed to 0 for first set */
3240 	ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
3241 	if (ret) {
3242 		drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
3243 		return;
3244 	}
3245 
3246 	wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
3247 	wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
3248 	wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
3249 	wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
3250 
3251 	/* read the second set of memory latencies[4:7] */
3252 	val = 1; /* data0 to be programmed to 1 for second set */
3253 	ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
3254 	if (ret) {
3255 		drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
3256 		return;
3257 	}
3258 
3259 	wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
3260 	wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
3261 	wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
3262 	wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
3263 
3264 	adjust_wm_latency(i915, wm, num_levels, read_latency);
3265 }
3266 
3267 static void skl_setup_wm_latency(struct drm_i915_private *i915)
3268 {
3269 	if (HAS_HW_SAGV_WM(i915))
3270 		i915->display.wm.num_levels = 6;
3271 	else
3272 		i915->display.wm.num_levels = 8;
3273 
3274 	if (DISPLAY_VER(i915) >= 14)
3275 		mtl_read_wm_latency(i915, i915->display.wm.skl_latency);
3276 	else
3277 		skl_read_wm_latency(i915, i915->display.wm.skl_latency);
3278 
3279 	intel_print_wm_latency(i915, "Gen9 Plane", i915->display.wm.skl_latency);
3280 }
3281 
3282 static const struct intel_wm_funcs skl_wm_funcs = {
3283 	.compute_global_watermarks = skl_compute_wm,
3284 	.get_hw_state = skl_wm_get_hw_state_and_sanitize,
3285 };
3286 
3287 void skl_wm_init(struct drm_i915_private *i915)
3288 {
3289 	intel_sagv_init(i915);
3290 
3291 	skl_setup_wm_latency(i915);
3292 
3293 	i915->display.funcs.wm = &skl_wm_funcs;
3294 }
3295 
3296 static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
3297 {
3298 	struct intel_dbuf_state *dbuf_state;
3299 
3300 	dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
3301 	if (!dbuf_state)
3302 		return NULL;
3303 
3304 	return &dbuf_state->base;
3305 }
3306 
3307 static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
3308 				     struct intel_global_state *state)
3309 {
3310 	kfree(state);
3311 }
3312 
3313 static const struct intel_global_state_funcs intel_dbuf_funcs = {
3314 	.atomic_duplicate_state = intel_dbuf_duplicate_state,
3315 	.atomic_destroy_state = intel_dbuf_destroy_state,
3316 };
3317 
3318 struct intel_dbuf_state *
3319 intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
3320 {
3321 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3322 	struct intel_global_state *dbuf_state;
3323 
3324 	dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj);
3325 	if (IS_ERR(dbuf_state))
3326 		return ERR_CAST(dbuf_state);
3327 
3328 	return to_intel_dbuf_state(dbuf_state);
3329 }
3330 
3331 int intel_dbuf_init(struct drm_i915_private *i915)
3332 {
3333 	struct intel_dbuf_state *dbuf_state;
3334 
3335 	dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
3336 	if (!dbuf_state)
3337 		return -ENOMEM;
3338 
3339 	intel_atomic_global_obj_init(i915, &i915->display.dbuf.obj,
3340 				     &dbuf_state->base, &intel_dbuf_funcs);
3341 
3342 	return 0;
3343 }
3344 
3345 /*
3346  * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before
3347  * update the request state of all DBUS slices.
3348  */
3349 static void update_mbus_pre_enable(struct intel_atomic_state *state)
3350 {
3351 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3352 	u32 mbus_ctl, dbuf_min_tracker_val;
3353 	enum dbuf_slice slice;
3354 	const struct intel_dbuf_state *dbuf_state =
3355 		intel_atomic_get_new_dbuf_state(state);
3356 
3357 	if (!HAS_MBUS_JOINING(i915))
3358 		return;
3359 
3360 	/*
3361 	 * TODO: Implement vblank synchronized MBUS joining changes.
3362 	 * Must be properly coordinated with dbuf reprogramming.
3363 	 */
3364 	if (dbuf_state->joined_mbus) {
3365 		mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
3366 			MBUS_JOIN_PIPE_SELECT_NONE;
3367 		dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
3368 	} else {
3369 		mbus_ctl = MBUS_HASHING_MODE_2x2 |
3370 			MBUS_JOIN_PIPE_SELECT_NONE;
3371 		dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
3372 	}
3373 
3374 	intel_de_rmw(i915, MBUS_CTL,
3375 		     MBUS_HASHING_MODE_MASK | MBUS_JOIN |
3376 		     MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
3377 
3378 	for_each_dbuf_slice(i915, slice)
3379 		intel_de_rmw(i915, DBUF_CTL_S(slice),
3380 			     DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
3381 			     dbuf_min_tracker_val);
3382 }
3383 
3384 void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
3385 {
3386 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3387 	const struct intel_dbuf_state *new_dbuf_state =
3388 		intel_atomic_get_new_dbuf_state(state);
3389 	const struct intel_dbuf_state *old_dbuf_state =
3390 		intel_atomic_get_old_dbuf_state(state);
3391 
3392 	if (!new_dbuf_state ||
3393 	    (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
3394 	     new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
3395 		return;
3396 
3397 	WARN_ON(!new_dbuf_state->base.changed);
3398 
3399 	update_mbus_pre_enable(state);
3400 	gen9_dbuf_slices_update(i915,
3401 				old_dbuf_state->enabled_slices |
3402 				new_dbuf_state->enabled_slices);
3403 }
3404 
3405 void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
3406 {
3407 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3408 	const struct intel_dbuf_state *new_dbuf_state =
3409 		intel_atomic_get_new_dbuf_state(state);
3410 	const struct intel_dbuf_state *old_dbuf_state =
3411 		intel_atomic_get_old_dbuf_state(state);
3412 
3413 	if (!new_dbuf_state ||
3414 	    (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
3415 	     new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
3416 		return;
3417 
3418 	WARN_ON(!new_dbuf_state->base.changed);
3419 
3420 	gen9_dbuf_slices_update(i915,
3421 				new_dbuf_state->enabled_slices);
3422 }
3423 
3424 static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
3425 {
3426 	switch (pipe) {
3427 	case PIPE_A:
3428 		return !(active_pipes & BIT(PIPE_D));
3429 	case PIPE_D:
3430 		return !(active_pipes & BIT(PIPE_A));
3431 	case PIPE_B:
3432 		return !(active_pipes & BIT(PIPE_C));
3433 	case PIPE_C:
3434 		return !(active_pipes & BIT(PIPE_B));
3435 	default: /* to suppress compiler warning */
3436 		MISSING_CASE(pipe);
3437 		break;
3438 	}
3439 
3440 	return false;
3441 }
3442 
3443 void intel_mbus_dbox_update(struct intel_atomic_state *state)
3444 {
3445 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3446 	const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
3447 	const struct intel_crtc_state *new_crtc_state;
3448 	const struct intel_crtc *crtc;
3449 	u32 val = 0;
3450 	int i;
3451 
3452 	if (DISPLAY_VER(i915) < 11)
3453 		return;
3454 
3455 	new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
3456 	old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
3457 	if (!new_dbuf_state ||
3458 	    (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
3459 	     new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
3460 		return;
3461 
3462 	if (DISPLAY_VER(i915) >= 14)
3463 		val |= MBUS_DBOX_I_CREDIT(2);
3464 
3465 	if (DISPLAY_VER(i915) >= 12) {
3466 		val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
3467 		val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
3468 		val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
3469 	}
3470 
3471 	if (DISPLAY_VER(i915) >= 14)
3472 		val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(12) :
3473 						     MBUS_DBOX_A_CREDIT(8);
3474 	else if (IS_ALDERLAKE_P(i915))
3475 		/* Wa_22010947358:adl-p */
3476 		val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) :
3477 						     MBUS_DBOX_A_CREDIT(4);
3478 	else
3479 		val |= MBUS_DBOX_A_CREDIT(2);
3480 
3481 	if (DISPLAY_VER(i915) >= 14) {
3482 		val |= MBUS_DBOX_B_CREDIT(0xA);
3483 	} else if (IS_ALDERLAKE_P(i915)) {
3484 		val |= MBUS_DBOX_BW_CREDIT(2);
3485 		val |= MBUS_DBOX_B_CREDIT(8);
3486 	} else if (DISPLAY_VER(i915) >= 12) {
3487 		val |= MBUS_DBOX_BW_CREDIT(2);
3488 		val |= MBUS_DBOX_B_CREDIT(12);
3489 	} else {
3490 		val |= MBUS_DBOX_BW_CREDIT(1);
3491 		val |= MBUS_DBOX_B_CREDIT(8);
3492 	}
3493 
3494 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
3495 		u32 pipe_val = val;
3496 
3497 		if (!new_crtc_state->hw.active)
3498 			continue;
3499 
3500 		if (DISPLAY_VER(i915) >= 14) {
3501 			if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe,
3502 							      new_dbuf_state->active_pipes))
3503 				pipe_val |= MBUS_DBOX_BW_8CREDITS_MTL;
3504 			else
3505 				pipe_val |= MBUS_DBOX_BW_4CREDITS_MTL;
3506 		}
3507 
3508 		intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), pipe_val);
3509 	}
3510 }
3511 
3512 static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
3513 {
3514 	struct drm_i915_private *i915 = m->private;
3515 
3516 	seq_printf(m, "Isochronous Priority Control: %s\n",
3517 		   str_yes_no(skl_watermark_ipc_enabled(i915)));
3518 	return 0;
3519 }
3520 
3521 static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file)
3522 {
3523 	struct drm_i915_private *i915 = inode->i_private;
3524 
3525 	return single_open(file, skl_watermark_ipc_status_show, i915);
3526 }
3527 
3528 static ssize_t skl_watermark_ipc_status_write(struct file *file,
3529 					      const char __user *ubuf,
3530 					      size_t len, loff_t *offp)
3531 {
3532 	struct seq_file *m = file->private_data;
3533 	struct drm_i915_private *i915 = m->private;
3534 	intel_wakeref_t wakeref;
3535 	bool enable;
3536 	int ret;
3537 
3538 	ret = kstrtobool_from_user(ubuf, len, &enable);
3539 	if (ret < 0)
3540 		return ret;
3541 
3542 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
3543 		if (!skl_watermark_ipc_enabled(i915) && enable)
3544 			drm_info(&i915->drm,
3545 				 "Enabling IPC: WM will be proper only after next commit\n");
3546 		i915->display.wm.ipc_enabled = enable;
3547 		skl_watermark_ipc_update(i915);
3548 	}
3549 
3550 	return len;
3551 }
3552 
3553 static const struct file_operations skl_watermark_ipc_status_fops = {
3554 	.owner = THIS_MODULE,
3555 	.open = skl_watermark_ipc_status_open,
3556 	.read = seq_read,
3557 	.llseek = seq_lseek,
3558 	.release = single_release,
3559 	.write = skl_watermark_ipc_status_write
3560 };
3561 
3562 static int intel_sagv_status_show(struct seq_file *m, void *unused)
3563 {
3564 	struct drm_i915_private *i915 = m->private;
3565 	static const char * const sagv_status[] = {
3566 		[I915_SAGV_UNKNOWN] = "unknown",
3567 		[I915_SAGV_DISABLED] = "disabled",
3568 		[I915_SAGV_ENABLED] = "enabled",
3569 		[I915_SAGV_NOT_CONTROLLED] = "not controlled",
3570 	};
3571 
3572 	seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915)));
3573 	seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]);
3574 	seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us);
3575 
3576 	return 0;
3577 }
3578 
3579 DEFINE_SHOW_ATTRIBUTE(intel_sagv_status);
3580 
3581 void skl_watermark_debugfs_register(struct drm_i915_private *i915)
3582 {
3583 	struct drm_minor *minor = i915->drm.primary;
3584 
3585 	if (HAS_IPC(i915))
3586 		debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
3587 				    &skl_watermark_ipc_status_fops);
3588 
3589 	if (HAS_SAGV(i915))
3590 		debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915,
3591 				    &intel_sagv_status_fops);
3592 }
3593