1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /**
25  * DOC: Frame Buffer Compression (FBC)
26  *
27  * FBC tries to save memory bandwidth (and so power consumption) by
28  * compressing the amount of memory used by the display. It is total
29  * transparent to user space and completely handled in the kernel.
30  *
31  * The benefits of FBC are mostly visible with solid backgrounds and
32  * variation-less patterns. It comes from keeping the memory footprint small
33  * and having fewer memory pages opened and accessed for refreshing the display.
34  *
35  * i915 is responsible to reserve stolen memory for FBC and configure its
36  * offset on proper registers. The hardware takes care of all
37  * compress/decompress. However there are many known cases where we have to
38  * forcibly disable it to allow proper screen updates.
39  */
40 
41 #include <drm/drm_fourcc.h>
42 
43 #include "i915_drv.h"
44 #include "intel_drv.h"
45 #include "intel_fbc.h"
46 #include "intel_frontbuffer.h"
47 
48 static inline bool fbc_supported(struct drm_i915_private *dev_priv)
49 {
50 	return HAS_FBC(dev_priv);
51 }
52 
53 static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
54 {
55 	return INTEL_GEN(dev_priv) <= 3;
56 }
57 
58 /*
59  * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
60  * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
61  * origin so the x and y offsets can actually fit the registers. As a
62  * consequence, the fence doesn't really start exactly at the display plane
63  * address we program because it starts at the real start of the buffer, so we
64  * have to take this into consideration here.
65  */
66 static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
67 {
68 	return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y;
69 }
70 
71 /*
72  * For SKL+, the plane source size used by the hardware is based on the value we
73  * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
74  * we wrote to PIPESRC.
75  */
76 static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
77 					    int *width, int *height)
78 {
79 	if (width)
80 		*width = cache->plane.src_w;
81 	if (height)
82 		*height = cache->plane.src_h;
83 }
84 
85 static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
86 					struct intel_fbc_state_cache *cache)
87 {
88 	int lines;
89 
90 	intel_fbc_get_plane_source_size(cache, NULL, &lines);
91 	if (IS_GEN(dev_priv, 7))
92 		lines = min(lines, 2048);
93 	else if (INTEL_GEN(dev_priv) >= 8)
94 		lines = min(lines, 2560);
95 
96 	/* Hardware needs the full buffer stride, not just the active area. */
97 	return lines * cache->fb.stride;
98 }
99 
100 static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
101 {
102 	u32 fbc_ctl;
103 
104 	/* Disable compression */
105 	fbc_ctl = I915_READ(FBC_CONTROL);
106 	if ((fbc_ctl & FBC_CTL_EN) == 0)
107 		return;
108 
109 	fbc_ctl &= ~FBC_CTL_EN;
110 	I915_WRITE(FBC_CONTROL, fbc_ctl);
111 
112 	/* Wait for compressing bit to clear */
113 	if (intel_wait_for_register(&dev_priv->uncore,
114 				    FBC_STATUS, FBC_STAT_COMPRESSING, 0,
115 				    10)) {
116 		DRM_DEBUG_KMS("FBC idle timed out\n");
117 		return;
118 	}
119 }
120 
121 static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
122 {
123 	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
124 	int cfb_pitch;
125 	int i;
126 	u32 fbc_ctl;
127 
128 	/* Note: fbc.threshold == 1 for i8xx */
129 	cfb_pitch = params->cfb_size / FBC_LL_SIZE;
130 	if (params->fb.stride < cfb_pitch)
131 		cfb_pitch = params->fb.stride;
132 
133 	/* FBC_CTL wants 32B or 64B units */
134 	if (IS_GEN(dev_priv, 2))
135 		cfb_pitch = (cfb_pitch / 32) - 1;
136 	else
137 		cfb_pitch = (cfb_pitch / 64) - 1;
138 
139 	/* Clear old tags */
140 	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
141 		I915_WRITE(FBC_TAG(i), 0);
142 
143 	if (IS_GEN(dev_priv, 4)) {
144 		u32 fbc_ctl2;
145 
146 		/* Set it up... */
147 		fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
148 		fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
149 		I915_WRITE(FBC_CONTROL2, fbc_ctl2);
150 		I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
151 	}
152 
153 	/* enable it... */
154 	fbc_ctl = I915_READ(FBC_CONTROL);
155 	fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
156 	fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
157 	if (IS_I945GM(dev_priv))
158 		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
159 	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
160 	fbc_ctl |= params->vma->fence->id;
161 	I915_WRITE(FBC_CONTROL, fbc_ctl);
162 }
163 
164 static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
165 {
166 	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
167 }
168 
169 static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
170 {
171 	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
172 	u32 dpfc_ctl;
173 
174 	dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN;
175 	if (params->fb.format->cpp[0] == 2)
176 		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
177 	else
178 		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
179 
180 	if (params->flags & PLANE_HAS_FENCE) {
181 		dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
182 		I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
183 	} else {
184 		I915_WRITE(DPFC_FENCE_YOFF, 0);
185 	}
186 
187 	/* enable it... */
188 	I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
189 }
190 
191 static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
192 {
193 	u32 dpfc_ctl;
194 
195 	/* Disable compression */
196 	dpfc_ctl = I915_READ(DPFC_CONTROL);
197 	if (dpfc_ctl & DPFC_CTL_EN) {
198 		dpfc_ctl &= ~DPFC_CTL_EN;
199 		I915_WRITE(DPFC_CONTROL, dpfc_ctl);
200 	}
201 }
202 
203 static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
204 {
205 	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
206 }
207 
208 /* This function forces a CFB recompression through the nuke operation. */
209 static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
210 {
211 	I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
212 	POSTING_READ(MSG_FBC_REND_STATE);
213 }
214 
215 static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
216 {
217 	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
218 	u32 dpfc_ctl;
219 	int threshold = dev_priv->fbc.threshold;
220 
221 	dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane);
222 	if (params->fb.format->cpp[0] == 2)
223 		threshold++;
224 
225 	switch (threshold) {
226 	case 4:
227 	case 3:
228 		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
229 		break;
230 	case 2:
231 		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
232 		break;
233 	case 1:
234 		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
235 		break;
236 	}
237 
238 	if (params->flags & PLANE_HAS_FENCE) {
239 		dpfc_ctl |= DPFC_CTL_FENCE_EN;
240 		if (IS_GEN(dev_priv, 5))
241 			dpfc_ctl |= params->vma->fence->id;
242 		if (IS_GEN(dev_priv, 6)) {
243 			I915_WRITE(SNB_DPFC_CTL_SA,
244 				   SNB_CPU_FENCE_ENABLE |
245 				   params->vma->fence->id);
246 			I915_WRITE(DPFC_CPU_FENCE_OFFSET,
247 				   params->crtc.fence_y_offset);
248 		}
249 	} else {
250 		if (IS_GEN(dev_priv, 6)) {
251 			I915_WRITE(SNB_DPFC_CTL_SA, 0);
252 			I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
253 		}
254 	}
255 
256 	I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
257 	I915_WRITE(ILK_FBC_RT_BASE,
258 		   i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
259 	/* enable it... */
260 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
261 
262 	intel_fbc_recompress(dev_priv);
263 }
264 
265 static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
266 {
267 	u32 dpfc_ctl;
268 
269 	/* Disable compression */
270 	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
271 	if (dpfc_ctl & DPFC_CTL_EN) {
272 		dpfc_ctl &= ~DPFC_CTL_EN;
273 		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
274 	}
275 }
276 
277 static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
278 {
279 	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
280 }
281 
282 static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
283 {
284 	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
285 	u32 dpfc_ctl;
286 	int threshold = dev_priv->fbc.threshold;
287 
288 	/* Display WA #0529: skl, kbl, bxt. */
289 	if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) {
290 		u32 val = I915_READ(CHICKEN_MISC_4);
291 
292 		val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
293 
294 		if (i915_gem_object_get_tiling(params->vma->obj) !=
295 		    I915_TILING_X)
296 			val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
297 
298 		I915_WRITE(CHICKEN_MISC_4, val);
299 	}
300 
301 	dpfc_ctl = 0;
302 	if (IS_IVYBRIDGE(dev_priv))
303 		dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane);
304 
305 	if (params->fb.format->cpp[0] == 2)
306 		threshold++;
307 
308 	switch (threshold) {
309 	case 4:
310 	case 3:
311 		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
312 		break;
313 	case 2:
314 		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
315 		break;
316 	case 1:
317 		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
318 		break;
319 	}
320 
321 	if (params->flags & PLANE_HAS_FENCE) {
322 		dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
323 		I915_WRITE(SNB_DPFC_CTL_SA,
324 			   SNB_CPU_FENCE_ENABLE |
325 			   params->vma->fence->id);
326 		I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
327 	} else {
328 		I915_WRITE(SNB_DPFC_CTL_SA,0);
329 		I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
330 	}
331 
332 	if (dev_priv->fbc.false_color)
333 		dpfc_ctl |= FBC_CTL_FALSE_COLOR;
334 
335 	if (IS_IVYBRIDGE(dev_priv)) {
336 		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
337 		I915_WRITE(ILK_DISPLAY_CHICKEN1,
338 			   I915_READ(ILK_DISPLAY_CHICKEN1) |
339 			   ILK_FBCQ_DIS);
340 	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
341 		/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
342 		I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
343 			   I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
344 			   HSW_FBCQ_DIS);
345 	}
346 
347 	if (IS_GEN(dev_priv, 11))
348 		/* Wa_1409120013:icl,ehl */
349 		I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
350 
351 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
352 
353 	intel_fbc_recompress(dev_priv);
354 }
355 
356 static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
357 {
358 	if (INTEL_GEN(dev_priv) >= 5)
359 		return ilk_fbc_is_active(dev_priv);
360 	else if (IS_GM45(dev_priv))
361 		return g4x_fbc_is_active(dev_priv);
362 	else
363 		return i8xx_fbc_is_active(dev_priv);
364 }
365 
366 static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
367 {
368 	struct intel_fbc *fbc = &dev_priv->fbc;
369 
370 	fbc->active = true;
371 
372 	if (INTEL_GEN(dev_priv) >= 7)
373 		gen7_fbc_activate(dev_priv);
374 	else if (INTEL_GEN(dev_priv) >= 5)
375 		ilk_fbc_activate(dev_priv);
376 	else if (IS_GM45(dev_priv))
377 		g4x_fbc_activate(dev_priv);
378 	else
379 		i8xx_fbc_activate(dev_priv);
380 }
381 
382 static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
383 {
384 	struct intel_fbc *fbc = &dev_priv->fbc;
385 
386 	fbc->active = false;
387 
388 	if (INTEL_GEN(dev_priv) >= 5)
389 		ilk_fbc_deactivate(dev_priv);
390 	else if (IS_GM45(dev_priv))
391 		g4x_fbc_deactivate(dev_priv);
392 	else
393 		i8xx_fbc_deactivate(dev_priv);
394 }
395 
396 /**
397  * intel_fbc_is_active - Is FBC active?
398  * @dev_priv: i915 device instance
399  *
400  * This function is used to verify the current state of FBC.
401  *
402  * FIXME: This should be tracked in the plane config eventually
403  * instead of queried at runtime for most callers.
404  */
405 bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
406 {
407 	return dev_priv->fbc.active;
408 }
409 
410 static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
411 				 const char *reason)
412 {
413 	struct intel_fbc *fbc = &dev_priv->fbc;
414 
415 	WARN_ON(!mutex_is_locked(&fbc->lock));
416 
417 	if (fbc->active)
418 		intel_fbc_hw_deactivate(dev_priv);
419 
420 	fbc->no_fbc_reason = reason;
421 }
422 
423 static bool multiple_pipes_ok(struct intel_crtc *crtc,
424 			      struct intel_plane_state *plane_state)
425 {
426 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
427 	struct intel_fbc *fbc = &dev_priv->fbc;
428 	enum pipe pipe = crtc->pipe;
429 
430 	/* Don't even bother tracking anything we don't need. */
431 	if (!no_fbc_on_multiple_pipes(dev_priv))
432 		return true;
433 
434 	if (plane_state->base.visible)
435 		fbc->visible_pipes_mask |= (1 << pipe);
436 	else
437 		fbc->visible_pipes_mask &= ~(1 << pipe);
438 
439 	return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0;
440 }
441 
442 static int find_compression_threshold(struct drm_i915_private *dev_priv,
443 				      struct drm_mm_node *node,
444 				      int size,
445 				      int fb_cpp)
446 {
447 	int compression_threshold = 1;
448 	int ret;
449 	u64 end;
450 
451 	/* The FBC hardware for BDW/SKL doesn't have access to the stolen
452 	 * reserved range size, so it always assumes the maximum (8mb) is used.
453 	 * If we enable FBC using a CFB on that memory range we'll get FIFO
454 	 * underruns, even if that range is not reserved by the BIOS. */
455 	if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv))
456 		end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024;
457 	else
458 		end = U64_MAX;
459 
460 	/* HACK: This code depends on what we will do in *_enable_fbc. If that
461 	 * code changes, this code needs to change as well.
462 	 *
463 	 * The enable_fbc code will attempt to use one of our 2 compression
464 	 * thresholds, therefore, in that case, we only have 1 resort.
465 	 */
466 
467 	/* Try to over-allocate to reduce reallocations and fragmentation. */
468 	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
469 						   4096, 0, end);
470 	if (ret == 0)
471 		return compression_threshold;
472 
473 again:
474 	/* HW's ability to limit the CFB is 1:4 */
475 	if (compression_threshold > 4 ||
476 	    (fb_cpp == 2 && compression_threshold == 2))
477 		return 0;
478 
479 	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
480 						   4096, 0, end);
481 	if (ret && INTEL_GEN(dev_priv) <= 4) {
482 		return 0;
483 	} else if (ret) {
484 		compression_threshold <<= 1;
485 		goto again;
486 	} else {
487 		return compression_threshold;
488 	}
489 }
490 
491 static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
492 {
493 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
494 	struct intel_fbc *fbc = &dev_priv->fbc;
495 	struct drm_mm_node *uninitialized_var(compressed_llb);
496 	int size, fb_cpp, ret;
497 
498 	WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
499 
500 	size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
501 	fb_cpp = fbc->state_cache.fb.format->cpp[0];
502 
503 	ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
504 					 size, fb_cpp);
505 	if (!ret)
506 		goto err_llb;
507 	else if (ret > 1) {
508 		DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
509 
510 	}
511 
512 	fbc->threshold = ret;
513 
514 	if (INTEL_GEN(dev_priv) >= 5)
515 		I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
516 	else if (IS_GM45(dev_priv)) {
517 		I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
518 	} else {
519 		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
520 		if (!compressed_llb)
521 			goto err_fb;
522 
523 		ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
524 						  4096, 4096);
525 		if (ret)
526 			goto err_fb;
527 
528 		fbc->compressed_llb = compressed_llb;
529 
530 		GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
531 					     fbc->compressed_fb.start,
532 					     U32_MAX));
533 		GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
534 					     fbc->compressed_llb->start,
535 					     U32_MAX));
536 		I915_WRITE(FBC_CFB_BASE,
537 			   dev_priv->dsm.start + fbc->compressed_fb.start);
538 		I915_WRITE(FBC_LL_BASE,
539 			   dev_priv->dsm.start + compressed_llb->start);
540 	}
541 
542 	DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
543 		      fbc->compressed_fb.size, fbc->threshold);
544 
545 	return 0;
546 
547 err_fb:
548 	kfree(compressed_llb);
549 	i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
550 err_llb:
551 	if (drm_mm_initialized(&dev_priv->mm.stolen))
552 		pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
553 	return -ENOSPC;
554 }
555 
556 static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
557 {
558 	struct intel_fbc *fbc = &dev_priv->fbc;
559 
560 	if (drm_mm_node_allocated(&fbc->compressed_fb))
561 		i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
562 
563 	if (fbc->compressed_llb) {
564 		i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
565 		kfree(fbc->compressed_llb);
566 	}
567 }
568 
569 void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
570 {
571 	struct intel_fbc *fbc = &dev_priv->fbc;
572 
573 	if (!fbc_supported(dev_priv))
574 		return;
575 
576 	mutex_lock(&fbc->lock);
577 	__intel_fbc_cleanup_cfb(dev_priv);
578 	mutex_unlock(&fbc->lock);
579 }
580 
581 static bool stride_is_valid(struct drm_i915_private *dev_priv,
582 			    unsigned int stride)
583 {
584 	/* This should have been caught earlier. */
585 	if (WARN_ON_ONCE((stride & (64 - 1)) != 0))
586 		return false;
587 
588 	/* Below are the additional FBC restrictions. */
589 	if (stride < 512)
590 		return false;
591 
592 	if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3))
593 		return stride == 4096 || stride == 8192;
594 
595 	if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
596 		return false;
597 
598 	if (stride > 16384)
599 		return false;
600 
601 	return true;
602 }
603 
604 static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
605 				  u32 pixel_format)
606 {
607 	switch (pixel_format) {
608 	case DRM_FORMAT_XRGB8888:
609 	case DRM_FORMAT_XBGR8888:
610 		return true;
611 	case DRM_FORMAT_XRGB1555:
612 	case DRM_FORMAT_RGB565:
613 		/* 16bpp not supported on gen2 */
614 		if (IS_GEN(dev_priv, 2))
615 			return false;
616 		/* WaFbcOnly1to1Ratio:ctg */
617 		if (IS_G4X(dev_priv))
618 			return false;
619 		return true;
620 	default:
621 		return false;
622 	}
623 }
624 
625 /*
626  * For some reason, the hardware tracking starts looking at whatever we
627  * programmed as the display plane base address register. It does not look at
628  * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
629  * variables instead of just looking at the pipe/plane size.
630  */
631 static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
632 {
633 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
634 	struct intel_fbc *fbc = &dev_priv->fbc;
635 	unsigned int effective_w, effective_h, max_w, max_h;
636 
637 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
638 		max_w = 5120;
639 		max_h = 4096;
640 	} else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
641 		max_w = 4096;
642 		max_h = 4096;
643 	} else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
644 		max_w = 4096;
645 		max_h = 2048;
646 	} else {
647 		max_w = 2048;
648 		max_h = 1536;
649 	}
650 
651 	intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
652 					&effective_h);
653 	effective_w += fbc->state_cache.plane.adjusted_x;
654 	effective_h += fbc->state_cache.plane.adjusted_y;
655 
656 	return effective_w <= max_w && effective_h <= max_h;
657 }
658 
659 static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
660 					 struct intel_crtc_state *crtc_state,
661 					 struct intel_plane_state *plane_state)
662 {
663 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
664 	struct intel_fbc *fbc = &dev_priv->fbc;
665 	struct intel_fbc_state_cache *cache = &fbc->state_cache;
666 	struct drm_framebuffer *fb = plane_state->base.fb;
667 
668 	cache->vma = NULL;
669 	cache->flags = 0;
670 
671 	cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
672 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
673 		cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
674 
675 	cache->plane.rotation = plane_state->base.rotation;
676 	/*
677 	 * Src coordinates are already rotated by 270 degrees for
678 	 * the 90/270 degree plane rotation cases (to match the
679 	 * GTT mapping), hence no need to account for rotation here.
680 	 */
681 	cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
682 	cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
683 	cache->plane.visible = plane_state->base.visible;
684 	cache->plane.adjusted_x = plane_state->color_plane[0].x;
685 	cache->plane.adjusted_y = plane_state->color_plane[0].y;
686 	cache->plane.y = plane_state->base.src.y1 >> 16;
687 
688 	cache->plane.pixel_blend_mode = plane_state->base.pixel_blend_mode;
689 
690 	if (!cache->plane.visible)
691 		return;
692 
693 	cache->fb.format = fb->format;
694 	cache->fb.stride = fb->pitches[0];
695 
696 	cache->vma = plane_state->vma;
697 	cache->flags = plane_state->flags;
698 	if (WARN_ON(cache->flags & PLANE_HAS_FENCE && !cache->vma->fence))
699 		cache->flags &= ~PLANE_HAS_FENCE;
700 }
701 
702 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
703 {
704 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
705 	struct intel_fbc *fbc = &dev_priv->fbc;
706 	struct intel_fbc_state_cache *cache = &fbc->state_cache;
707 
708 	/* We don't need to use a state cache here since this information is
709 	 * global for all CRTC.
710 	 */
711 	if (fbc->underrun_detected) {
712 		fbc->no_fbc_reason = "underrun detected";
713 		return false;
714 	}
715 
716 	if (!cache->vma) {
717 		fbc->no_fbc_reason = "primary plane not visible";
718 		return false;
719 	}
720 
721 	if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) {
722 		fbc->no_fbc_reason = "incompatible mode";
723 		return false;
724 	}
725 
726 	if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
727 		fbc->no_fbc_reason = "mode too large for compression";
728 		return false;
729 	}
730 
731 	/* The use of a CPU fence is mandatory in order to detect writes
732 	 * by the CPU to the scanout and trigger updates to the FBC.
733 	 *
734 	 * Note that is possible for a tiled surface to be unmappable (and
735 	 * so have no fence associated with it) due to aperture constaints
736 	 * at the time of pinning.
737 	 *
738 	 * FIXME with 90/270 degree rotation we should use the fence on
739 	 * the normal GTT view (the rotated view doesn't even have a
740 	 * fence). Would need changes to the FBC fence Y offset as well.
741 	 * For now this will effecively disable FBC with 90/270 degree
742 	 * rotation.
743 	 */
744 	if (!(cache->flags & PLANE_HAS_FENCE)) {
745 		fbc->no_fbc_reason = "framebuffer not tiled or fenced";
746 		return false;
747 	}
748 	if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
749 	    cache->plane.rotation != DRM_MODE_ROTATE_0) {
750 		fbc->no_fbc_reason = "rotation unsupported";
751 		return false;
752 	}
753 
754 	if (!stride_is_valid(dev_priv, cache->fb.stride)) {
755 		fbc->no_fbc_reason = "framebuffer stride not supported";
756 		return false;
757 	}
758 
759 	if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
760 		fbc->no_fbc_reason = "pixel format is invalid";
761 		return false;
762 	}
763 
764 	if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
765 	    cache->fb.format->has_alpha) {
766 		fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
767 		return false;
768 	}
769 
770 	/* WaFbcExceedCdClockThreshold:hsw,bdw */
771 	if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
772 	    cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
773 		fbc->no_fbc_reason = "pixel rate is too big";
774 		return false;
775 	}
776 
777 	/* It is possible for the required CFB size change without a
778 	 * crtc->disable + crtc->enable since it is possible to change the
779 	 * stride without triggering a full modeset. Since we try to
780 	 * over-allocate the CFB, there's a chance we may keep FBC enabled even
781 	 * if this happens, but if we exceed the current CFB size we'll have to
782 	 * disable FBC. Notice that it would be possible to disable FBC, wait
783 	 * for a frame, free the stolen node, then try to reenable FBC in case
784 	 * we didn't get any invalidate/deactivate calls, but this would require
785 	 * a lot of tracking just for a specific case. If we conclude it's an
786 	 * important case, we can implement it later. */
787 	if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
788 	    fbc->compressed_fb.size * fbc->threshold) {
789 		fbc->no_fbc_reason = "CFB requirements changed";
790 		return false;
791 	}
792 
793 	/*
794 	 * Work around a problem on GEN9+ HW, where enabling FBC on a plane
795 	 * having a Y offset that isn't divisible by 4 causes FIFO underrun
796 	 * and screen flicker.
797 	 */
798 	if (IS_GEN_RANGE(dev_priv, 9, 10) &&
799 	    (fbc->state_cache.plane.adjusted_y & 3)) {
800 		fbc->no_fbc_reason = "plane Y offset is misaligned";
801 		return false;
802 	}
803 
804 	return true;
805 }
806 
807 static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
808 {
809 	struct intel_fbc *fbc = &dev_priv->fbc;
810 
811 	if (intel_vgpu_active(dev_priv)) {
812 		fbc->no_fbc_reason = "VGPU is active";
813 		return false;
814 	}
815 
816 	if (!i915_modparams.enable_fbc) {
817 		fbc->no_fbc_reason = "disabled per module param or by default";
818 		return false;
819 	}
820 
821 	if (fbc->underrun_detected) {
822 		fbc->no_fbc_reason = "underrun detected";
823 		return false;
824 	}
825 
826 	return true;
827 }
828 
829 static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
830 				     struct intel_fbc_reg_params *params)
831 {
832 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
833 	struct intel_fbc *fbc = &dev_priv->fbc;
834 	struct intel_fbc_state_cache *cache = &fbc->state_cache;
835 
836 	/* Since all our fields are integer types, use memset here so the
837 	 * comparison function can rely on memcmp because the padding will be
838 	 * zero. */
839 	memset(params, 0, sizeof(*params));
840 
841 	params->vma = cache->vma;
842 	params->flags = cache->flags;
843 
844 	params->crtc.pipe = crtc->pipe;
845 	params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
846 	params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
847 
848 	params->fb.format = cache->fb.format;
849 	params->fb.stride = cache->fb.stride;
850 
851 	params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
852 
853 	if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
854 		params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
855 						32 * fbc->threshold) * 8;
856 }
857 
858 void intel_fbc_pre_update(struct intel_crtc *crtc,
859 			  struct intel_crtc_state *crtc_state,
860 			  struct intel_plane_state *plane_state)
861 {
862 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
863 	struct intel_fbc *fbc = &dev_priv->fbc;
864 	const char *reason = "update pending";
865 
866 	if (!fbc_supported(dev_priv))
867 		return;
868 
869 	mutex_lock(&fbc->lock);
870 
871 	if (!multiple_pipes_ok(crtc, plane_state)) {
872 		reason = "more than one pipe active";
873 		goto deactivate;
874 	}
875 
876 	if (!fbc->enabled || fbc->crtc != crtc)
877 		goto unlock;
878 
879 	intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
880 	fbc->flip_pending = true;
881 
882 deactivate:
883 	intel_fbc_deactivate(dev_priv, reason);
884 unlock:
885 	mutex_unlock(&fbc->lock);
886 }
887 
888 /**
889  * __intel_fbc_disable - disable FBC
890  * @dev_priv: i915 device instance
891  *
892  * This is the low level function that actually disables FBC. Callers should
893  * grab the FBC lock.
894  */
895 static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
896 {
897 	struct intel_fbc *fbc = &dev_priv->fbc;
898 	struct intel_crtc *crtc = fbc->crtc;
899 
900 	WARN_ON(!mutex_is_locked(&fbc->lock));
901 	WARN_ON(!fbc->enabled);
902 	WARN_ON(fbc->active);
903 
904 	DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
905 
906 	__intel_fbc_cleanup_cfb(dev_priv);
907 
908 	fbc->enabled = false;
909 	fbc->crtc = NULL;
910 }
911 
912 static void __intel_fbc_post_update(struct intel_crtc *crtc)
913 {
914 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
915 	struct intel_fbc *fbc = &dev_priv->fbc;
916 
917 	WARN_ON(!mutex_is_locked(&fbc->lock));
918 
919 	if (!fbc->enabled || fbc->crtc != crtc)
920 		return;
921 
922 	fbc->flip_pending = false;
923 	WARN_ON(fbc->active);
924 
925 	if (!i915_modparams.enable_fbc) {
926 		intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
927 		__intel_fbc_disable(dev_priv);
928 
929 		return;
930 	}
931 
932 	intel_fbc_get_reg_params(crtc, &fbc->params);
933 
934 	if (!intel_fbc_can_activate(crtc))
935 		return;
936 
937 	if (!fbc->busy_bits) {
938 		intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
939 		intel_fbc_hw_activate(dev_priv);
940 	} else
941 		intel_fbc_deactivate(dev_priv, "frontbuffer write");
942 }
943 
944 void intel_fbc_post_update(struct intel_crtc *crtc)
945 {
946 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
947 	struct intel_fbc *fbc = &dev_priv->fbc;
948 
949 	if (!fbc_supported(dev_priv))
950 		return;
951 
952 	mutex_lock(&fbc->lock);
953 	__intel_fbc_post_update(crtc);
954 	mutex_unlock(&fbc->lock);
955 }
956 
957 static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
958 {
959 	if (fbc->enabled)
960 		return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
961 	else
962 		return fbc->possible_framebuffer_bits;
963 }
964 
965 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
966 			  unsigned int frontbuffer_bits,
967 			  enum fb_op_origin origin)
968 {
969 	struct intel_fbc *fbc = &dev_priv->fbc;
970 
971 	if (!fbc_supported(dev_priv))
972 		return;
973 
974 	if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
975 		return;
976 
977 	mutex_lock(&fbc->lock);
978 
979 	fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
980 
981 	if (fbc->enabled && fbc->busy_bits)
982 		intel_fbc_deactivate(dev_priv, "frontbuffer write");
983 
984 	mutex_unlock(&fbc->lock);
985 }
986 
987 void intel_fbc_flush(struct drm_i915_private *dev_priv,
988 		     unsigned int frontbuffer_bits, enum fb_op_origin origin)
989 {
990 	struct intel_fbc *fbc = &dev_priv->fbc;
991 
992 	if (!fbc_supported(dev_priv))
993 		return;
994 
995 	mutex_lock(&fbc->lock);
996 
997 	fbc->busy_bits &= ~frontbuffer_bits;
998 
999 	if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
1000 		goto out;
1001 
1002 	if (!fbc->busy_bits && fbc->enabled &&
1003 	    (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
1004 		if (fbc->active)
1005 			intel_fbc_recompress(dev_priv);
1006 		else if (!fbc->flip_pending)
1007 			__intel_fbc_post_update(fbc->crtc);
1008 	}
1009 
1010 out:
1011 	mutex_unlock(&fbc->lock);
1012 }
1013 
1014 /**
1015  * intel_fbc_choose_crtc - select a CRTC to enable FBC on
1016  * @dev_priv: i915 device instance
1017  * @state: the atomic state structure
1018  *
1019  * This function looks at the proposed state for CRTCs and planes, then chooses
1020  * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
1021  * true.
1022  *
1023  * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
1024  * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
1025  */
1026 void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
1027 			   struct intel_atomic_state *state)
1028 {
1029 	struct intel_fbc *fbc = &dev_priv->fbc;
1030 	struct intel_plane *plane;
1031 	struct intel_plane_state *plane_state;
1032 	bool crtc_chosen = false;
1033 	int i;
1034 
1035 	mutex_lock(&fbc->lock);
1036 
1037 	/* Does this atomic commit involve the CRTC currently tied to FBC? */
1038 	if (fbc->crtc &&
1039 	    !intel_atomic_get_new_crtc_state(state, fbc->crtc))
1040 		goto out;
1041 
1042 	if (!intel_fbc_can_enable(dev_priv))
1043 		goto out;
1044 
1045 	/* Simply choose the first CRTC that is compatible and has a visible
1046 	 * plane. We could go for fancier schemes such as checking the plane
1047 	 * size, but this would just affect the few platforms that don't tie FBC
1048 	 * to pipe or plane A. */
1049 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1050 		struct intel_crtc_state *crtc_state;
1051 		struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
1052 
1053 		if (!plane->has_fbc)
1054 			continue;
1055 
1056 		if (!plane_state->base.visible)
1057 			continue;
1058 
1059 		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1060 
1061 		crtc_state->enable_fbc = true;
1062 		crtc_chosen = true;
1063 		break;
1064 	}
1065 
1066 	if (!crtc_chosen)
1067 		fbc->no_fbc_reason = "no suitable CRTC for FBC";
1068 
1069 out:
1070 	mutex_unlock(&fbc->lock);
1071 }
1072 
1073 /**
1074  * intel_fbc_enable: tries to enable FBC on the CRTC
1075  * @crtc: the CRTC
1076  * @crtc_state: corresponding &drm_crtc_state for @crtc
1077  * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc
1078  *
1079  * This function checks if the given CRTC was chosen for FBC, then enables it if
1080  * possible. Notice that it doesn't activate FBC. It is valid to call
1081  * intel_fbc_enable multiple times for the same pipe without an
1082  * intel_fbc_disable in the middle, as long as it is deactivated.
1083  */
1084 void intel_fbc_enable(struct intel_crtc *crtc,
1085 		      struct intel_crtc_state *crtc_state,
1086 		      struct intel_plane_state *plane_state)
1087 {
1088 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1089 	struct intel_fbc *fbc = &dev_priv->fbc;
1090 
1091 	if (!fbc_supported(dev_priv))
1092 		return;
1093 
1094 	mutex_lock(&fbc->lock);
1095 
1096 	if (fbc->enabled) {
1097 		WARN_ON(fbc->crtc == NULL);
1098 		if (fbc->crtc == crtc) {
1099 			WARN_ON(!crtc_state->enable_fbc);
1100 			WARN_ON(fbc->active);
1101 		}
1102 		goto out;
1103 	}
1104 
1105 	if (!crtc_state->enable_fbc)
1106 		goto out;
1107 
1108 	WARN_ON(fbc->active);
1109 	WARN_ON(fbc->crtc != NULL);
1110 
1111 	intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
1112 	if (intel_fbc_alloc_cfb(crtc)) {
1113 		fbc->no_fbc_reason = "not enough stolen memory";
1114 		goto out;
1115 	}
1116 
1117 	DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
1118 	fbc->no_fbc_reason = "FBC enabled but not active yet\n";
1119 
1120 	fbc->enabled = true;
1121 	fbc->crtc = crtc;
1122 out:
1123 	mutex_unlock(&fbc->lock);
1124 }
1125 
1126 /**
1127  * intel_fbc_disable - disable FBC if it's associated with crtc
1128  * @crtc: the CRTC
1129  *
1130  * This function disables FBC if it's associated with the provided CRTC.
1131  */
1132 void intel_fbc_disable(struct intel_crtc *crtc)
1133 {
1134 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1135 	struct intel_fbc *fbc = &dev_priv->fbc;
1136 
1137 	if (!fbc_supported(dev_priv))
1138 		return;
1139 
1140 	mutex_lock(&fbc->lock);
1141 	if (fbc->crtc == crtc)
1142 		__intel_fbc_disable(dev_priv);
1143 	mutex_unlock(&fbc->lock);
1144 }
1145 
1146 /**
1147  * intel_fbc_global_disable - globally disable FBC
1148  * @dev_priv: i915 device instance
1149  *
1150  * This function disables FBC regardless of which CRTC is associated with it.
1151  */
1152 void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
1153 {
1154 	struct intel_fbc *fbc = &dev_priv->fbc;
1155 
1156 	if (!fbc_supported(dev_priv))
1157 		return;
1158 
1159 	mutex_lock(&fbc->lock);
1160 	if (fbc->enabled) {
1161 		WARN_ON(fbc->crtc->active);
1162 		__intel_fbc_disable(dev_priv);
1163 	}
1164 	mutex_unlock(&fbc->lock);
1165 }
1166 
1167 static void intel_fbc_underrun_work_fn(struct work_struct *work)
1168 {
1169 	struct drm_i915_private *dev_priv =
1170 		container_of(work, struct drm_i915_private, fbc.underrun_work);
1171 	struct intel_fbc *fbc = &dev_priv->fbc;
1172 
1173 	mutex_lock(&fbc->lock);
1174 
1175 	/* Maybe we were scheduled twice. */
1176 	if (fbc->underrun_detected || !fbc->enabled)
1177 		goto out;
1178 
1179 	DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
1180 	fbc->underrun_detected = true;
1181 
1182 	intel_fbc_deactivate(dev_priv, "FIFO underrun");
1183 out:
1184 	mutex_unlock(&fbc->lock);
1185 }
1186 
1187 /*
1188  * intel_fbc_reset_underrun - reset FBC fifo underrun status.
1189  * @dev_priv: i915 device instance
1190  *
1191  * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
1192  * want to re-enable FBC after an underrun to increase test coverage.
1193  */
1194 int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
1195 {
1196 	int ret;
1197 
1198 	cancel_work_sync(&dev_priv->fbc.underrun_work);
1199 
1200 	ret = mutex_lock_interruptible(&dev_priv->fbc.lock);
1201 	if (ret)
1202 		return ret;
1203 
1204 	if (dev_priv->fbc.underrun_detected) {
1205 		DRM_DEBUG_KMS("Re-allowing FBC after fifo underrun\n");
1206 		dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared";
1207 	}
1208 
1209 	dev_priv->fbc.underrun_detected = false;
1210 	mutex_unlock(&dev_priv->fbc.lock);
1211 
1212 	return 0;
1213 }
1214 
1215 /**
1216  * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
1217  * @dev_priv: i915 device instance
1218  *
1219  * Without FBC, most underruns are harmless and don't really cause too many
1220  * problems, except for an annoying message on dmesg. With FBC, underruns can
1221  * become black screens or even worse, especially when paired with bad
1222  * watermarks. So in order for us to be on the safe side, completely disable FBC
1223  * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
1224  * already suggests that watermarks may be bad, so try to be as safe as
1225  * possible.
1226  *
1227  * This function is called from the IRQ handler.
1228  */
1229 void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
1230 {
1231 	struct intel_fbc *fbc = &dev_priv->fbc;
1232 
1233 	if (!fbc_supported(dev_priv))
1234 		return;
1235 
1236 	/* There's no guarantee that underrun_detected won't be set to true
1237 	 * right after this check and before the work is scheduled, but that's
1238 	 * not a problem since we'll check it again under the work function
1239 	 * while FBC is locked. This check here is just to prevent us from
1240 	 * unnecessarily scheduling the work, and it relies on the fact that we
1241 	 * never switch underrun_detect back to false after it's true. */
1242 	if (READ_ONCE(fbc->underrun_detected))
1243 		return;
1244 
1245 	schedule_work(&fbc->underrun_work);
1246 }
1247 
1248 /**
1249  * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
1250  * @dev_priv: i915 device instance
1251  *
1252  * The FBC code needs to track CRTC visibility since the older platforms can't
1253  * have FBC enabled while multiple pipes are used. This function does the
1254  * initial setup at driver load to make sure FBC is matching the real hardware.
1255  */
1256 void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
1257 {
1258 	struct intel_crtc *crtc;
1259 
1260 	/* Don't even bother tracking anything if we don't need. */
1261 	if (!no_fbc_on_multiple_pipes(dev_priv))
1262 		return;
1263 
1264 	for_each_intel_crtc(&dev_priv->drm, crtc)
1265 		if (intel_crtc_active(crtc) &&
1266 		    crtc->base.primary->state->visible)
1267 			dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
1268 }
1269 
1270 /*
1271  * The DDX driver changes its behavior depending on the value it reads from
1272  * i915.enable_fbc, so sanitize it by translating the default value into either
1273  * 0 or 1 in order to allow it to know what's going on.
1274  *
1275  * Notice that this is done at driver initialization and we still allow user
1276  * space to change the value during runtime without sanitizing it again. IGT
1277  * relies on being able to change i915.enable_fbc at runtime.
1278  */
1279 static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
1280 {
1281 	if (i915_modparams.enable_fbc >= 0)
1282 		return !!i915_modparams.enable_fbc;
1283 
1284 	if (!HAS_FBC(dev_priv))
1285 		return 0;
1286 
1287 	/* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
1288 	if (IS_GEMINILAKE(dev_priv))
1289 		return 0;
1290 
1291 	if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
1292 		return 1;
1293 
1294 	return 0;
1295 }
1296 
1297 static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
1298 {
1299 	/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
1300 	if (intel_vtd_active() &&
1301 	    (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
1302 		DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
1303 		return true;
1304 	}
1305 
1306 	return false;
1307 }
1308 
1309 /**
1310  * intel_fbc_init - Initialize FBC
1311  * @dev_priv: the i915 device
1312  *
1313  * This function might be called during PM init process.
1314  */
1315 void intel_fbc_init(struct drm_i915_private *dev_priv)
1316 {
1317 	struct intel_fbc *fbc = &dev_priv->fbc;
1318 
1319 	INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
1320 	mutex_init(&fbc->lock);
1321 	fbc->enabled = false;
1322 	fbc->active = false;
1323 
1324 	if (need_fbc_vtd_wa(dev_priv))
1325 		mkwrite_device_info(dev_priv)->display.has_fbc = false;
1326 
1327 	i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
1328 	DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
1329 		      i915_modparams.enable_fbc);
1330 
1331 	if (!HAS_FBC(dev_priv)) {
1332 		fbc->no_fbc_reason = "unsupported by this chipset";
1333 		return;
1334 	}
1335 
1336 	/* This value was pulled out of someone's hat */
1337 	if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
1338 		I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
1339 
1340 	/* We still don't have any sort of hardware state readout for FBC, so
1341 	 * deactivate it in case the BIOS activated it to make sure software
1342 	 * matches the hardware state. */
1343 	if (intel_fbc_hw_is_active(dev_priv))
1344 		intel_fbc_hw_deactivate(dev_priv);
1345 }
1346