1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 /** 25 * DOC: Frame Buffer Compression (FBC) 26 * 27 * FBC tries to save memory bandwidth (and so power consumption) by 28 * compressing the amount of memory used by the display. It is total 29 * transparent to user space and completely handled in the kernel. 30 * 31 * The benefits of FBC are mostly visible with solid backgrounds and 32 * variation-less patterns. It comes from keeping the memory footprint small 33 * and having fewer memory pages opened and accessed for refreshing the display. 34 * 35 * i915 is responsible to reserve stolen memory for FBC and configure its 36 * offset on proper registers. The hardware takes care of all 37 * compress/decompress. However there are many known cases where we have to 38 * forcibly disable it to allow proper screen updates. 39 */ 40 41 #include <drm/drm_fourcc.h> 42 43 #include "i915_drv.h" 44 #include "intel_display_types.h" 45 #include "intel_fbc.h" 46 #include "intel_frontbuffer.h" 47 48 static inline bool fbc_supported(struct drm_i915_private *dev_priv) 49 { 50 return HAS_FBC(dev_priv); 51 } 52 53 /* 54 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the 55 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's 56 * origin so the x and y offsets can actually fit the registers. As a 57 * consequence, the fence doesn't really start exactly at the display plane 58 * address we program because it starts at the real start of the buffer, so we 59 * have to take this into consideration here. 60 */ 61 static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc) 62 { 63 return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y; 64 } 65 66 /* 67 * For SKL+, the plane source size used by the hardware is based on the value we 68 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value 69 * we wrote to PIPESRC. 70 */ 71 static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache *cache, 72 int *width, int *height) 73 { 74 if (width) 75 *width = cache->plane.src_w; 76 if (height) 77 *height = cache->plane.src_h; 78 } 79 80 static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, 81 const struct intel_fbc_state_cache *cache) 82 { 83 int lines; 84 85 intel_fbc_get_plane_source_size(cache, NULL, &lines); 86 if (IS_GEN(dev_priv, 7)) 87 lines = min(lines, 2048); 88 else if (INTEL_GEN(dev_priv) >= 8) 89 lines = min(lines, 2560); 90 91 /* Hardware needs the full buffer stride, not just the active area. */ 92 return lines * cache->fb.stride; 93 } 94 95 static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) 96 { 97 u32 fbc_ctl; 98 99 /* Disable compression */ 100 fbc_ctl = I915_READ(FBC_CONTROL); 101 if ((fbc_ctl & FBC_CTL_EN) == 0) 102 return; 103 104 fbc_ctl &= ~FBC_CTL_EN; 105 I915_WRITE(FBC_CONTROL, fbc_ctl); 106 107 /* Wait for compressing bit to clear */ 108 if (intel_de_wait_for_clear(dev_priv, FBC_STATUS, 109 FBC_STAT_COMPRESSING, 10)) { 110 DRM_DEBUG_KMS("FBC idle timed out\n"); 111 return; 112 } 113 } 114 115 static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) 116 { 117 struct intel_fbc_reg_params *params = &dev_priv->fbc.params; 118 int cfb_pitch; 119 int i; 120 u32 fbc_ctl; 121 122 /* Note: fbc.threshold == 1 for i8xx */ 123 cfb_pitch = params->cfb_size / FBC_LL_SIZE; 124 if (params->fb.stride < cfb_pitch) 125 cfb_pitch = params->fb.stride; 126 127 /* FBC_CTL wants 32B or 64B units */ 128 if (IS_GEN(dev_priv, 2)) 129 cfb_pitch = (cfb_pitch / 32) - 1; 130 else 131 cfb_pitch = (cfb_pitch / 64) - 1; 132 133 /* Clear old tags */ 134 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 135 I915_WRITE(FBC_TAG(i), 0); 136 137 if (IS_GEN(dev_priv, 4)) { 138 u32 fbc_ctl2; 139 140 /* Set it up... */ 141 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM; 142 fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane); 143 if (params->fence_id >= 0) 144 fbc_ctl2 |= FBC_CTL_CPU_FENCE; 145 I915_WRITE(FBC_CONTROL2, fbc_ctl2); 146 I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset); 147 } 148 149 /* enable it... */ 150 fbc_ctl = I915_READ(FBC_CONTROL); 151 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT; 152 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; 153 if (IS_I945GM(dev_priv)) 154 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 155 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 156 if (params->fence_id >= 0) 157 fbc_ctl |= params->fence_id; 158 I915_WRITE(FBC_CONTROL, fbc_ctl); 159 } 160 161 static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) 162 { 163 return I915_READ(FBC_CONTROL) & FBC_CTL_EN; 164 } 165 166 static void g4x_fbc_activate(struct drm_i915_private *dev_priv) 167 { 168 struct intel_fbc_reg_params *params = &dev_priv->fbc.params; 169 u32 dpfc_ctl; 170 171 dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN; 172 if (params->fb.format->cpp[0] == 2) 173 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 174 else 175 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 176 177 if (params->fence_id >= 0) { 178 dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id; 179 I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); 180 } else { 181 I915_WRITE(DPFC_FENCE_YOFF, 0); 182 } 183 184 /* enable it... */ 185 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 186 } 187 188 static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) 189 { 190 u32 dpfc_ctl; 191 192 /* Disable compression */ 193 dpfc_ctl = I915_READ(DPFC_CONTROL); 194 if (dpfc_ctl & DPFC_CTL_EN) { 195 dpfc_ctl &= ~DPFC_CTL_EN; 196 I915_WRITE(DPFC_CONTROL, dpfc_ctl); 197 } 198 } 199 200 static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv) 201 { 202 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 203 } 204 205 /* This function forces a CFB recompression through the nuke operation. */ 206 static void intel_fbc_recompress(struct drm_i915_private *dev_priv) 207 { 208 I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE); 209 POSTING_READ(MSG_FBC_REND_STATE); 210 } 211 212 static void ilk_fbc_activate(struct drm_i915_private *dev_priv) 213 { 214 struct intel_fbc_reg_params *params = &dev_priv->fbc.params; 215 u32 dpfc_ctl; 216 int threshold = dev_priv->fbc.threshold; 217 218 dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane); 219 if (params->fb.format->cpp[0] == 2) 220 threshold++; 221 222 switch (threshold) { 223 case 4: 224 case 3: 225 dpfc_ctl |= DPFC_CTL_LIMIT_4X; 226 break; 227 case 2: 228 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 229 break; 230 case 1: 231 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 232 break; 233 } 234 235 if (params->fence_id >= 0) { 236 dpfc_ctl |= DPFC_CTL_FENCE_EN; 237 if (IS_GEN(dev_priv, 5)) 238 dpfc_ctl |= params->fence_id; 239 if (IS_GEN(dev_priv, 6)) { 240 I915_WRITE(SNB_DPFC_CTL_SA, 241 SNB_CPU_FENCE_ENABLE | 242 params->fence_id); 243 I915_WRITE(DPFC_CPU_FENCE_OFFSET, 244 params->crtc.fence_y_offset); 245 } 246 } else { 247 if (IS_GEN(dev_priv, 6)) { 248 I915_WRITE(SNB_DPFC_CTL_SA, 0); 249 I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0); 250 } 251 } 252 253 I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); 254 /* enable it... */ 255 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 256 257 intel_fbc_recompress(dev_priv); 258 } 259 260 static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) 261 { 262 u32 dpfc_ctl; 263 264 /* Disable compression */ 265 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 266 if (dpfc_ctl & DPFC_CTL_EN) { 267 dpfc_ctl &= ~DPFC_CTL_EN; 268 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); 269 } 270 } 271 272 static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) 273 { 274 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; 275 } 276 277 static void gen7_fbc_activate(struct drm_i915_private *dev_priv) 278 { 279 struct intel_fbc_reg_params *params = &dev_priv->fbc.params; 280 u32 dpfc_ctl; 281 int threshold = dev_priv->fbc.threshold; 282 283 /* Display WA #0529: skl, kbl, bxt. */ 284 if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) { 285 u32 val = I915_READ(CHICKEN_MISC_4); 286 287 val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK); 288 289 if (params->gen9_wa_cfb_stride) 290 val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride; 291 292 I915_WRITE(CHICKEN_MISC_4, val); 293 } 294 295 dpfc_ctl = 0; 296 if (IS_IVYBRIDGE(dev_priv)) 297 dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane); 298 299 if (params->fb.format->cpp[0] == 2) 300 threshold++; 301 302 switch (threshold) { 303 case 4: 304 case 3: 305 dpfc_ctl |= DPFC_CTL_LIMIT_4X; 306 break; 307 case 2: 308 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 309 break; 310 case 1: 311 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 312 break; 313 } 314 315 if (params->fence_id >= 0) { 316 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; 317 I915_WRITE(SNB_DPFC_CTL_SA, 318 SNB_CPU_FENCE_ENABLE | 319 params->fence_id); 320 I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); 321 } else { 322 I915_WRITE(SNB_DPFC_CTL_SA,0); 323 I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0); 324 } 325 326 if (dev_priv->fbc.false_color) 327 dpfc_ctl |= FBC_CTL_FALSE_COLOR; 328 329 if (IS_IVYBRIDGE(dev_priv)) { 330 /* WaFbcAsynchFlipDisableFbcQueue:ivb */ 331 I915_WRITE(ILK_DISPLAY_CHICKEN1, 332 I915_READ(ILK_DISPLAY_CHICKEN1) | 333 ILK_FBCQ_DIS); 334 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 335 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ 336 I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe), 337 I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) | 338 HSW_FBCQ_DIS); 339 } 340 341 if (INTEL_GEN(dev_priv) >= 11) 342 /* Wa_1409120013:icl,ehl,tgl */ 343 I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); 344 345 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 346 347 intel_fbc_recompress(dev_priv); 348 } 349 350 static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv) 351 { 352 if (INTEL_GEN(dev_priv) >= 5) 353 return ilk_fbc_is_active(dev_priv); 354 else if (IS_GM45(dev_priv)) 355 return g4x_fbc_is_active(dev_priv); 356 else 357 return i8xx_fbc_is_active(dev_priv); 358 } 359 360 static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) 361 { 362 struct intel_fbc *fbc = &dev_priv->fbc; 363 364 fbc->active = true; 365 fbc->activated = true; 366 367 if (INTEL_GEN(dev_priv) >= 7) 368 gen7_fbc_activate(dev_priv); 369 else if (INTEL_GEN(dev_priv) >= 5) 370 ilk_fbc_activate(dev_priv); 371 else if (IS_GM45(dev_priv)) 372 g4x_fbc_activate(dev_priv); 373 else 374 i8xx_fbc_activate(dev_priv); 375 } 376 377 static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) 378 { 379 struct intel_fbc *fbc = &dev_priv->fbc; 380 381 fbc->active = false; 382 383 if (INTEL_GEN(dev_priv) >= 5) 384 ilk_fbc_deactivate(dev_priv); 385 else if (IS_GM45(dev_priv)) 386 g4x_fbc_deactivate(dev_priv); 387 else 388 i8xx_fbc_deactivate(dev_priv); 389 } 390 391 /** 392 * intel_fbc_is_active - Is FBC active? 393 * @dev_priv: i915 device instance 394 * 395 * This function is used to verify the current state of FBC. 396 * 397 * FIXME: This should be tracked in the plane config eventually 398 * instead of queried at runtime for most callers. 399 */ 400 bool intel_fbc_is_active(struct drm_i915_private *dev_priv) 401 { 402 return dev_priv->fbc.active; 403 } 404 405 static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, 406 const char *reason) 407 { 408 struct intel_fbc *fbc = &dev_priv->fbc; 409 410 WARN_ON(!mutex_is_locked(&fbc->lock)); 411 412 if (fbc->active) 413 intel_fbc_hw_deactivate(dev_priv); 414 415 fbc->no_fbc_reason = reason; 416 } 417 418 static int find_compression_threshold(struct drm_i915_private *dev_priv, 419 struct drm_mm_node *node, 420 unsigned int size, 421 unsigned int fb_cpp) 422 { 423 int compression_threshold = 1; 424 int ret; 425 u64 end; 426 427 /* The FBC hardware for BDW/SKL doesn't have access to the stolen 428 * reserved range size, so it always assumes the maximum (8mb) is used. 429 * If we enable FBC using a CFB on that memory range we'll get FIFO 430 * underruns, even if that range is not reserved by the BIOS. */ 431 if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv)) 432 end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024; 433 else 434 end = U64_MAX; 435 436 /* HACK: This code depends on what we will do in *_enable_fbc. If that 437 * code changes, this code needs to change as well. 438 * 439 * The enable_fbc code will attempt to use one of our 2 compression 440 * thresholds, therefore, in that case, we only have 1 resort. 441 */ 442 443 /* Try to over-allocate to reduce reallocations and fragmentation. */ 444 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1, 445 4096, 0, end); 446 if (ret == 0) 447 return compression_threshold; 448 449 again: 450 /* HW's ability to limit the CFB is 1:4 */ 451 if (compression_threshold > 4 || 452 (fb_cpp == 2 && compression_threshold == 2)) 453 return 0; 454 455 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1, 456 4096, 0, end); 457 if (ret && INTEL_GEN(dev_priv) <= 4) { 458 return 0; 459 } else if (ret) { 460 compression_threshold <<= 1; 461 goto again; 462 } else { 463 return compression_threshold; 464 } 465 } 466 467 static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, 468 unsigned int size, unsigned int fb_cpp) 469 { 470 struct intel_fbc *fbc = &dev_priv->fbc; 471 struct drm_mm_node *uninitialized_var(compressed_llb); 472 int ret; 473 474 WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb)); 475 476 ret = find_compression_threshold(dev_priv, &fbc->compressed_fb, 477 size, fb_cpp); 478 if (!ret) 479 goto err_llb; 480 else if (ret > 1) { 481 DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); 482 483 } 484 485 fbc->threshold = ret; 486 487 if (INTEL_GEN(dev_priv) >= 5) 488 I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start); 489 else if (IS_GM45(dev_priv)) { 490 I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start); 491 } else { 492 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); 493 if (!compressed_llb) 494 goto err_fb; 495 496 ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb, 497 4096, 4096); 498 if (ret) 499 goto err_fb; 500 501 fbc->compressed_llb = compressed_llb; 502 503 GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start, 504 fbc->compressed_fb.start, 505 U32_MAX)); 506 GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start, 507 fbc->compressed_llb->start, 508 U32_MAX)); 509 I915_WRITE(FBC_CFB_BASE, 510 dev_priv->dsm.start + fbc->compressed_fb.start); 511 I915_WRITE(FBC_LL_BASE, 512 dev_priv->dsm.start + compressed_llb->start); 513 } 514 515 DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n", 516 fbc->compressed_fb.size, fbc->threshold); 517 518 return 0; 519 520 err_fb: 521 kfree(compressed_llb); 522 i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); 523 err_llb: 524 if (drm_mm_initialized(&dev_priv->mm.stolen)) 525 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); 526 return -ENOSPC; 527 } 528 529 static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) 530 { 531 struct intel_fbc *fbc = &dev_priv->fbc; 532 533 if (drm_mm_node_allocated(&fbc->compressed_fb)) 534 i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); 535 536 if (fbc->compressed_llb) { 537 i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb); 538 kfree(fbc->compressed_llb); 539 } 540 } 541 542 void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) 543 { 544 struct intel_fbc *fbc = &dev_priv->fbc; 545 546 if (!fbc_supported(dev_priv)) 547 return; 548 549 mutex_lock(&fbc->lock); 550 __intel_fbc_cleanup_cfb(dev_priv); 551 mutex_unlock(&fbc->lock); 552 } 553 554 static bool stride_is_valid(struct drm_i915_private *dev_priv, 555 unsigned int stride) 556 { 557 /* This should have been caught earlier. */ 558 if (WARN_ON_ONCE((stride & (64 - 1)) != 0)) 559 return false; 560 561 /* Below are the additional FBC restrictions. */ 562 if (stride < 512) 563 return false; 564 565 if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3)) 566 return stride == 4096 || stride == 8192; 567 568 if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048) 569 return false; 570 571 if (stride > 16384) 572 return false; 573 574 return true; 575 } 576 577 static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, 578 u32 pixel_format) 579 { 580 switch (pixel_format) { 581 case DRM_FORMAT_XRGB8888: 582 case DRM_FORMAT_XBGR8888: 583 return true; 584 case DRM_FORMAT_XRGB1555: 585 case DRM_FORMAT_RGB565: 586 /* 16bpp not supported on gen2 */ 587 if (IS_GEN(dev_priv, 2)) 588 return false; 589 /* WaFbcOnly1to1Ratio:ctg */ 590 if (IS_G4X(dev_priv)) 591 return false; 592 return true; 593 default: 594 return false; 595 } 596 } 597 598 /* 599 * For some reason, the hardware tracking starts looking at whatever we 600 * programmed as the display plane base address register. It does not look at 601 * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y} 602 * variables instead of just looking at the pipe/plane size. 603 */ 604 static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) 605 { 606 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 607 struct intel_fbc *fbc = &dev_priv->fbc; 608 unsigned int effective_w, effective_h, max_w, max_h; 609 610 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { 611 max_w = 5120; 612 max_h = 4096; 613 } else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) { 614 max_w = 4096; 615 max_h = 4096; 616 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 617 max_w = 4096; 618 max_h = 2048; 619 } else { 620 max_w = 2048; 621 max_h = 1536; 622 } 623 624 intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w, 625 &effective_h); 626 effective_w += fbc->state_cache.plane.adjusted_x; 627 effective_h += fbc->state_cache.plane.adjusted_y; 628 629 return effective_w <= max_w && effective_h <= max_h; 630 } 631 632 static void intel_fbc_update_state_cache(struct intel_crtc *crtc, 633 const struct intel_crtc_state *crtc_state, 634 const struct intel_plane_state *plane_state) 635 { 636 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 637 struct intel_fbc *fbc = &dev_priv->fbc; 638 struct intel_fbc_state_cache *cache = &fbc->state_cache; 639 struct drm_framebuffer *fb = plane_state->hw.fb; 640 641 cache->plane.visible = plane_state->uapi.visible; 642 if (!cache->plane.visible) 643 return; 644 645 cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags; 646 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 647 cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; 648 649 cache->plane.rotation = plane_state->hw.rotation; 650 /* 651 * Src coordinates are already rotated by 270 degrees for 652 * the 90/270 degree plane rotation cases (to match the 653 * GTT mapping), hence no need to account for rotation here. 654 */ 655 cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 656 cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 657 cache->plane.adjusted_x = plane_state->color_plane[0].x; 658 cache->plane.adjusted_y = plane_state->color_plane[0].y; 659 cache->plane.y = plane_state->uapi.src.y1 >> 16; 660 661 cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode; 662 663 cache->fb.format = fb->format; 664 cache->fb.stride = fb->pitches[0]; 665 666 WARN_ON(plane_state->flags & PLANE_HAS_FENCE && 667 !plane_state->vma->fence); 668 669 if (plane_state->flags & PLANE_HAS_FENCE && 670 plane_state->vma->fence) 671 cache->fence_id = plane_state->vma->fence->id; 672 else 673 cache->fence_id = -1; 674 } 675 676 static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv) 677 { 678 struct intel_fbc *fbc = &dev_priv->fbc; 679 680 return intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) > 681 fbc->compressed_fb.size * fbc->threshold; 682 } 683 684 static bool intel_fbc_can_activate(struct intel_crtc *crtc) 685 { 686 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 687 struct intel_fbc *fbc = &dev_priv->fbc; 688 struct intel_fbc_state_cache *cache = &fbc->state_cache; 689 690 if (!cache->plane.visible) { 691 fbc->no_fbc_reason = "primary plane not visible"; 692 return false; 693 } 694 695 /* We don't need to use a state cache here since this information is 696 * global for all CRTC. 697 */ 698 if (fbc->underrun_detected) { 699 fbc->no_fbc_reason = "underrun detected"; 700 return false; 701 } 702 703 if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) { 704 fbc->no_fbc_reason = "incompatible mode"; 705 return false; 706 } 707 708 if (!intel_fbc_hw_tracking_covers_screen(crtc)) { 709 fbc->no_fbc_reason = "mode too large for compression"; 710 return false; 711 } 712 713 /* The use of a CPU fence is mandatory in order to detect writes 714 * by the CPU to the scanout and trigger updates to the FBC. 715 * 716 * Note that is possible for a tiled surface to be unmappable (and 717 * so have no fence associated with it) due to aperture constaints 718 * at the time of pinning. 719 * 720 * FIXME with 90/270 degree rotation we should use the fence on 721 * the normal GTT view (the rotated view doesn't even have a 722 * fence). Would need changes to the FBC fence Y offset as well. 723 * For now this will effecively disable FBC with 90/270 degree 724 * rotation. 725 */ 726 if (cache->fence_id < 0) { 727 fbc->no_fbc_reason = "framebuffer not tiled or fenced"; 728 return false; 729 } 730 if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) && 731 cache->plane.rotation != DRM_MODE_ROTATE_0) { 732 fbc->no_fbc_reason = "rotation unsupported"; 733 return false; 734 } 735 736 if (!stride_is_valid(dev_priv, cache->fb.stride)) { 737 fbc->no_fbc_reason = "framebuffer stride not supported"; 738 return false; 739 } 740 741 if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) { 742 fbc->no_fbc_reason = "pixel format is invalid"; 743 return false; 744 } 745 746 if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && 747 cache->fb.format->has_alpha) { 748 fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC"; 749 return false; 750 } 751 752 /* WaFbcExceedCdClockThreshold:hsw,bdw */ 753 if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && 754 cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) { 755 fbc->no_fbc_reason = "pixel rate is too big"; 756 return false; 757 } 758 759 /* It is possible for the required CFB size change without a 760 * crtc->disable + crtc->enable since it is possible to change the 761 * stride without triggering a full modeset. Since we try to 762 * over-allocate the CFB, there's a chance we may keep FBC enabled even 763 * if this happens, but if we exceed the current CFB size we'll have to 764 * disable FBC. Notice that it would be possible to disable FBC, wait 765 * for a frame, free the stolen node, then try to reenable FBC in case 766 * we didn't get any invalidate/deactivate calls, but this would require 767 * a lot of tracking just for a specific case. If we conclude it's an 768 * important case, we can implement it later. */ 769 if (intel_fbc_cfb_size_changed(dev_priv)) { 770 fbc->no_fbc_reason = "CFB requirements changed"; 771 return false; 772 } 773 774 /* 775 * Work around a problem on GEN9+ HW, where enabling FBC on a plane 776 * having a Y offset that isn't divisible by 4 causes FIFO underrun 777 * and screen flicker. 778 */ 779 if (INTEL_GEN(dev_priv) >= 9 && 780 (fbc->state_cache.plane.adjusted_y & 3)) { 781 fbc->no_fbc_reason = "plane Y offset is misaligned"; 782 return false; 783 } 784 785 return true; 786 } 787 788 static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) 789 { 790 struct intel_fbc *fbc = &dev_priv->fbc; 791 792 if (intel_vgpu_active(dev_priv)) { 793 fbc->no_fbc_reason = "VGPU is active"; 794 return false; 795 } 796 797 if (!i915_modparams.enable_fbc) { 798 fbc->no_fbc_reason = "disabled per module param or by default"; 799 return false; 800 } 801 802 if (fbc->underrun_detected) { 803 fbc->no_fbc_reason = "underrun detected"; 804 return false; 805 } 806 807 return true; 808 } 809 810 static void intel_fbc_get_reg_params(struct intel_crtc *crtc, 811 struct intel_fbc_reg_params *params) 812 { 813 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 814 struct intel_fbc *fbc = &dev_priv->fbc; 815 struct intel_fbc_state_cache *cache = &fbc->state_cache; 816 817 /* Since all our fields are integer types, use memset here so the 818 * comparison function can rely on memcmp because the padding will be 819 * zero. */ 820 memset(params, 0, sizeof(*params)); 821 822 params->fence_id = cache->fence_id; 823 824 params->crtc.pipe = crtc->pipe; 825 params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane; 826 params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc); 827 828 params->fb.format = cache->fb.format; 829 params->fb.stride = cache->fb.stride; 830 831 params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); 832 833 params->gen9_wa_cfb_stride = cache->gen9_wa_cfb_stride; 834 835 params->plane_visible = cache->plane.visible; 836 } 837 838 static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state) 839 { 840 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 841 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 842 const struct intel_fbc *fbc = &dev_priv->fbc; 843 const struct intel_fbc_state_cache *cache = &fbc->state_cache; 844 const struct intel_fbc_reg_params *params = &fbc->params; 845 846 if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) 847 return false; 848 849 if (!params->plane_visible) 850 return false; 851 852 if (!intel_fbc_can_activate(crtc)) 853 return false; 854 855 if (params->fb.format != cache->fb.format) 856 return false; 857 858 if (params->fb.stride != cache->fb.stride) 859 return false; 860 861 if (params->cfb_size != intel_fbc_calculate_cfb_size(dev_priv, cache)) 862 return false; 863 864 if (params->gen9_wa_cfb_stride != cache->gen9_wa_cfb_stride) 865 return false; 866 867 return true; 868 } 869 870 bool intel_fbc_pre_update(struct intel_crtc *crtc, 871 const struct intel_crtc_state *crtc_state, 872 const struct intel_plane_state *plane_state) 873 { 874 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 875 struct intel_fbc *fbc = &dev_priv->fbc; 876 const char *reason = "update pending"; 877 bool need_vblank_wait = false; 878 879 if (!fbc_supported(dev_priv)) 880 return need_vblank_wait; 881 882 mutex_lock(&fbc->lock); 883 884 if (fbc->crtc != crtc) 885 goto unlock; 886 887 intel_fbc_update_state_cache(crtc, crtc_state, plane_state); 888 fbc->flip_pending = true; 889 890 if (!intel_fbc_can_flip_nuke(crtc_state)) { 891 intel_fbc_deactivate(dev_priv, reason); 892 893 /* 894 * Display WA #1198: glk+ 895 * Need an extra vblank wait between FBC disable and most plane 896 * updates. Bspec says this is only needed for plane disable, but 897 * that is not true. Touching most plane registers will cause the 898 * corruption to appear. Also SKL/derivatives do not seem to be 899 * affected. 900 * 901 * TODO: could optimize this a bit by sampling the frame 902 * counter when we disable FBC (if it was already done earlier) 903 * and skipping the extra vblank wait before the plane update 904 * if at least one frame has already passed. 905 */ 906 if (fbc->activated && 907 (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))) 908 need_vblank_wait = true; 909 fbc->activated = false; 910 } 911 unlock: 912 mutex_unlock(&fbc->lock); 913 914 return need_vblank_wait; 915 } 916 917 /** 918 * __intel_fbc_disable - disable FBC 919 * @dev_priv: i915 device instance 920 * 921 * This is the low level function that actually disables FBC. Callers should 922 * grab the FBC lock. 923 */ 924 static void __intel_fbc_disable(struct drm_i915_private *dev_priv) 925 { 926 struct intel_fbc *fbc = &dev_priv->fbc; 927 struct intel_crtc *crtc = fbc->crtc; 928 929 WARN_ON(!mutex_is_locked(&fbc->lock)); 930 WARN_ON(!fbc->crtc); 931 WARN_ON(fbc->active); 932 933 DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe)); 934 935 __intel_fbc_cleanup_cfb(dev_priv); 936 937 fbc->crtc = NULL; 938 } 939 940 static void __intel_fbc_post_update(struct intel_crtc *crtc) 941 { 942 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 943 struct intel_fbc *fbc = &dev_priv->fbc; 944 945 WARN_ON(!mutex_is_locked(&fbc->lock)); 946 947 if (fbc->crtc != crtc) 948 return; 949 950 fbc->flip_pending = false; 951 952 if (!i915_modparams.enable_fbc) { 953 intel_fbc_deactivate(dev_priv, "disabled at runtime per module param"); 954 __intel_fbc_disable(dev_priv); 955 956 return; 957 } 958 959 intel_fbc_get_reg_params(crtc, &fbc->params); 960 961 if (!intel_fbc_can_activate(crtc)) 962 return; 963 964 if (!fbc->busy_bits) 965 intel_fbc_hw_activate(dev_priv); 966 else 967 intel_fbc_deactivate(dev_priv, "frontbuffer write"); 968 } 969 970 void intel_fbc_post_update(struct intel_crtc *crtc) 971 { 972 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 973 struct intel_fbc *fbc = &dev_priv->fbc; 974 975 if (!fbc_supported(dev_priv)) 976 return; 977 978 mutex_lock(&fbc->lock); 979 __intel_fbc_post_update(crtc); 980 mutex_unlock(&fbc->lock); 981 } 982 983 static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) 984 { 985 if (fbc->crtc) 986 return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit; 987 else 988 return fbc->possible_framebuffer_bits; 989 } 990 991 void intel_fbc_invalidate(struct drm_i915_private *dev_priv, 992 unsigned int frontbuffer_bits, 993 enum fb_op_origin origin) 994 { 995 struct intel_fbc *fbc = &dev_priv->fbc; 996 997 if (!fbc_supported(dev_priv)) 998 return; 999 1000 if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) 1001 return; 1002 1003 mutex_lock(&fbc->lock); 1004 1005 fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits; 1006 1007 if (fbc->crtc && fbc->busy_bits) 1008 intel_fbc_deactivate(dev_priv, "frontbuffer write"); 1009 1010 mutex_unlock(&fbc->lock); 1011 } 1012 1013 void intel_fbc_flush(struct drm_i915_private *dev_priv, 1014 unsigned int frontbuffer_bits, enum fb_op_origin origin) 1015 { 1016 struct intel_fbc *fbc = &dev_priv->fbc; 1017 1018 if (!fbc_supported(dev_priv)) 1019 return; 1020 1021 mutex_lock(&fbc->lock); 1022 1023 fbc->busy_bits &= ~frontbuffer_bits; 1024 1025 if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) 1026 goto out; 1027 1028 if (!fbc->busy_bits && fbc->crtc && 1029 (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { 1030 if (fbc->active) 1031 intel_fbc_recompress(dev_priv); 1032 else if (!fbc->flip_pending) 1033 __intel_fbc_post_update(fbc->crtc); 1034 } 1035 1036 out: 1037 mutex_unlock(&fbc->lock); 1038 } 1039 1040 /** 1041 * intel_fbc_choose_crtc - select a CRTC to enable FBC on 1042 * @dev_priv: i915 device instance 1043 * @state: the atomic state structure 1044 * 1045 * This function looks at the proposed state for CRTCs and planes, then chooses 1046 * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to 1047 * true. 1048 * 1049 * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe 1050 * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc. 1051 */ 1052 void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, 1053 struct intel_atomic_state *state) 1054 { 1055 struct intel_fbc *fbc = &dev_priv->fbc; 1056 struct intel_plane *plane; 1057 struct intel_plane_state *plane_state; 1058 bool crtc_chosen = false; 1059 int i; 1060 1061 mutex_lock(&fbc->lock); 1062 1063 /* Does this atomic commit involve the CRTC currently tied to FBC? */ 1064 if (fbc->crtc && 1065 !intel_atomic_get_new_crtc_state(state, fbc->crtc)) 1066 goto out; 1067 1068 if (!intel_fbc_can_enable(dev_priv)) 1069 goto out; 1070 1071 /* Simply choose the first CRTC that is compatible and has a visible 1072 * plane. We could go for fancier schemes such as checking the plane 1073 * size, but this would just affect the few platforms that don't tie FBC 1074 * to pipe or plane A. */ 1075 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1076 struct intel_crtc_state *crtc_state; 1077 struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); 1078 1079 if (!plane->has_fbc) 1080 continue; 1081 1082 if (!plane_state->uapi.visible) 1083 continue; 1084 1085 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 1086 1087 crtc_state->enable_fbc = true; 1088 crtc_chosen = true; 1089 break; 1090 } 1091 1092 if (!crtc_chosen) 1093 fbc->no_fbc_reason = "no suitable CRTC for FBC"; 1094 1095 out: 1096 mutex_unlock(&fbc->lock); 1097 } 1098 1099 /** 1100 * intel_fbc_enable: tries to enable FBC on the CRTC 1101 * @crtc: the CRTC 1102 * @crtc_state: corresponding &drm_crtc_state for @crtc 1103 * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc 1104 * 1105 * This function checks if the given CRTC was chosen for FBC, then enables it if 1106 * possible. Notice that it doesn't activate FBC. It is valid to call 1107 * intel_fbc_enable multiple times for the same pipe without an 1108 * intel_fbc_disable in the middle, as long as it is deactivated. 1109 */ 1110 void intel_fbc_enable(struct intel_crtc *crtc, 1111 const struct intel_crtc_state *crtc_state, 1112 const struct intel_plane_state *plane_state) 1113 { 1114 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1115 struct intel_fbc *fbc = &dev_priv->fbc; 1116 struct intel_fbc_state_cache *cache = &fbc->state_cache; 1117 const struct drm_framebuffer *fb = plane_state->hw.fb; 1118 1119 if (!fbc_supported(dev_priv)) 1120 return; 1121 1122 mutex_lock(&fbc->lock); 1123 1124 if (fbc->crtc) { 1125 if (fbc->crtc != crtc || 1126 !intel_fbc_cfb_size_changed(dev_priv)) 1127 goto out; 1128 1129 __intel_fbc_disable(dev_priv); 1130 } 1131 1132 WARN_ON(fbc->active); 1133 1134 intel_fbc_update_state_cache(crtc, crtc_state, plane_state); 1135 1136 /* FIXME crtc_state->enable_fbc lies :( */ 1137 if (!cache->plane.visible) 1138 goto out; 1139 1140 if (intel_fbc_alloc_cfb(dev_priv, 1141 intel_fbc_calculate_cfb_size(dev_priv, cache), 1142 fb->format->cpp[0])) { 1143 cache->plane.visible = false; 1144 fbc->no_fbc_reason = "not enough stolen memory"; 1145 goto out; 1146 } 1147 1148 if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) && 1149 fb->modifier != I915_FORMAT_MOD_X_TILED) 1150 cache->gen9_wa_cfb_stride = 1151 DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; 1152 else 1153 cache->gen9_wa_cfb_stride = 0; 1154 1155 DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe)); 1156 fbc->no_fbc_reason = "FBC enabled but not active yet\n"; 1157 1158 fbc->crtc = crtc; 1159 out: 1160 mutex_unlock(&fbc->lock); 1161 } 1162 1163 /** 1164 * intel_fbc_disable - disable FBC if it's associated with crtc 1165 * @crtc: the CRTC 1166 * 1167 * This function disables FBC if it's associated with the provided CRTC. 1168 */ 1169 void intel_fbc_disable(struct intel_crtc *crtc) 1170 { 1171 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1172 struct intel_fbc *fbc = &dev_priv->fbc; 1173 1174 if (!fbc_supported(dev_priv)) 1175 return; 1176 1177 mutex_lock(&fbc->lock); 1178 if (fbc->crtc == crtc) 1179 __intel_fbc_disable(dev_priv); 1180 mutex_unlock(&fbc->lock); 1181 } 1182 1183 /** 1184 * intel_fbc_global_disable - globally disable FBC 1185 * @dev_priv: i915 device instance 1186 * 1187 * This function disables FBC regardless of which CRTC is associated with it. 1188 */ 1189 void intel_fbc_global_disable(struct drm_i915_private *dev_priv) 1190 { 1191 struct intel_fbc *fbc = &dev_priv->fbc; 1192 1193 if (!fbc_supported(dev_priv)) 1194 return; 1195 1196 mutex_lock(&fbc->lock); 1197 if (fbc->crtc) { 1198 WARN_ON(fbc->crtc->active); 1199 __intel_fbc_disable(dev_priv); 1200 } 1201 mutex_unlock(&fbc->lock); 1202 } 1203 1204 static void intel_fbc_underrun_work_fn(struct work_struct *work) 1205 { 1206 struct drm_i915_private *dev_priv = 1207 container_of(work, struct drm_i915_private, fbc.underrun_work); 1208 struct intel_fbc *fbc = &dev_priv->fbc; 1209 1210 mutex_lock(&fbc->lock); 1211 1212 /* Maybe we were scheduled twice. */ 1213 if (fbc->underrun_detected || !fbc->crtc) 1214 goto out; 1215 1216 DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n"); 1217 fbc->underrun_detected = true; 1218 1219 intel_fbc_deactivate(dev_priv, "FIFO underrun"); 1220 out: 1221 mutex_unlock(&fbc->lock); 1222 } 1223 1224 /* 1225 * intel_fbc_reset_underrun - reset FBC fifo underrun status. 1226 * @dev_priv: i915 device instance 1227 * 1228 * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we 1229 * want to re-enable FBC after an underrun to increase test coverage. 1230 */ 1231 int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv) 1232 { 1233 int ret; 1234 1235 cancel_work_sync(&dev_priv->fbc.underrun_work); 1236 1237 ret = mutex_lock_interruptible(&dev_priv->fbc.lock); 1238 if (ret) 1239 return ret; 1240 1241 if (dev_priv->fbc.underrun_detected) { 1242 DRM_DEBUG_KMS("Re-allowing FBC after fifo underrun\n"); 1243 dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared"; 1244 } 1245 1246 dev_priv->fbc.underrun_detected = false; 1247 mutex_unlock(&dev_priv->fbc.lock); 1248 1249 return 0; 1250 } 1251 1252 /** 1253 * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun 1254 * @dev_priv: i915 device instance 1255 * 1256 * Without FBC, most underruns are harmless and don't really cause too many 1257 * problems, except for an annoying message on dmesg. With FBC, underruns can 1258 * become black screens or even worse, especially when paired with bad 1259 * watermarks. So in order for us to be on the safe side, completely disable FBC 1260 * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe 1261 * already suggests that watermarks may be bad, so try to be as safe as 1262 * possible. 1263 * 1264 * This function is called from the IRQ handler. 1265 */ 1266 void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv) 1267 { 1268 struct intel_fbc *fbc = &dev_priv->fbc; 1269 1270 if (!fbc_supported(dev_priv)) 1271 return; 1272 1273 /* There's no guarantee that underrun_detected won't be set to true 1274 * right after this check and before the work is scheduled, but that's 1275 * not a problem since we'll check it again under the work function 1276 * while FBC is locked. This check here is just to prevent us from 1277 * unnecessarily scheduling the work, and it relies on the fact that we 1278 * never switch underrun_detect back to false after it's true. */ 1279 if (READ_ONCE(fbc->underrun_detected)) 1280 return; 1281 1282 schedule_work(&fbc->underrun_work); 1283 } 1284 1285 /* 1286 * The DDX driver changes its behavior depending on the value it reads from 1287 * i915.enable_fbc, so sanitize it by translating the default value into either 1288 * 0 or 1 in order to allow it to know what's going on. 1289 * 1290 * Notice that this is done at driver initialization and we still allow user 1291 * space to change the value during runtime without sanitizing it again. IGT 1292 * relies on being able to change i915.enable_fbc at runtime. 1293 */ 1294 static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) 1295 { 1296 if (i915_modparams.enable_fbc >= 0) 1297 return !!i915_modparams.enable_fbc; 1298 1299 if (!HAS_FBC(dev_priv)) 1300 return 0; 1301 1302 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) 1303 return 1; 1304 1305 return 0; 1306 } 1307 1308 static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) 1309 { 1310 /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ 1311 if (intel_vtd_active() && 1312 (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) { 1313 DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); 1314 return true; 1315 } 1316 1317 return false; 1318 } 1319 1320 /** 1321 * intel_fbc_init - Initialize FBC 1322 * @dev_priv: the i915 device 1323 * 1324 * This function might be called during PM init process. 1325 */ 1326 void intel_fbc_init(struct drm_i915_private *dev_priv) 1327 { 1328 struct intel_fbc *fbc = &dev_priv->fbc; 1329 1330 INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); 1331 mutex_init(&fbc->lock); 1332 fbc->active = false; 1333 1334 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 1335 mkwrite_device_info(dev_priv)->display.has_fbc = false; 1336 1337 if (need_fbc_vtd_wa(dev_priv)) 1338 mkwrite_device_info(dev_priv)->display.has_fbc = false; 1339 1340 i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv); 1341 DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", 1342 i915_modparams.enable_fbc); 1343 1344 if (!HAS_FBC(dev_priv)) { 1345 fbc->no_fbc_reason = "unsupported by this chipset"; 1346 return; 1347 } 1348 1349 /* This value was pulled out of someone's hat */ 1350 if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv)) 1351 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); 1352 1353 /* We still don't have any sort of hardware state readout for FBC, so 1354 * deactivate it in case the BIOS activated it to make sure software 1355 * matches the hardware state. */ 1356 if (intel_fbc_hw_is_active(dev_priv)) 1357 intel_fbc_hw_deactivate(dev_priv); 1358 } 1359