1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 #include <linux/kernel.h> 6 #include <linux/slab.h> 7 8 #include <drm/drm_atomic_helper.h> 9 #include <drm/drm_fourcc.h> 10 #include <drm/drm_plane.h> 11 #include <drm/drm_plane_helper.h> 12 13 #include "i915_trace.h" 14 #include "i915_vgpu.h" 15 16 #include "intel_atomic.h" 17 #include "intel_atomic_plane.h" 18 #include "intel_color.h" 19 #include "intel_crtc.h" 20 #include "intel_cursor.h" 21 #include "intel_display_debugfs.h" 22 #include "intel_display_types.h" 23 #include "intel_dsi.h" 24 #include "intel_pipe_crc.h" 25 #include "intel_psr.h" 26 #include "intel_sprite.h" 27 #include "intel_vrr.h" 28 #include "i9xx_plane.h" 29 #include "skl_universal_plane.h" 30 31 static void assert_vblank_disabled(struct drm_crtc *crtc) 32 { 33 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 34 drm_crtc_vblank_put(crtc); 35 } 36 37 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) 38 { 39 struct drm_device *dev = crtc->base.dev; 40 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)]; 41 42 if (!crtc->active) 43 return 0; 44 45 if (!vblank->max_vblank_count) 46 return (u32)drm_crtc_accurate_vblank_count(&crtc->base); 47 48 return crtc->base.funcs->get_vblank_counter(&crtc->base); 49 } 50 51 u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state) 52 { 53 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 54 55 /* 56 * From Gen 11, In case of dsi cmd mode, frame counter wouldnt 57 * have updated at the beginning of TE, if we want to use 58 * the hw counter, then we would find it updated in only 59 * the next TE, hence switching to sw counter. 60 */ 61 if (crtc_state->mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | 62 I915_MODE_FLAG_DSI_USE_TE1)) 63 return 0; 64 65 /* 66 * On i965gm the hardware frame counter reads 67 * zero when the TV encoder is enabled :( 68 */ 69 if (IS_I965GM(dev_priv) && 70 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT))) 71 return 0; 72 73 if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) 74 return 0xffffffff; /* full 32 bit counter */ 75 else if (DISPLAY_VER(dev_priv) >= 3) 76 return 0xffffff; /* only 24 bits of frame count */ 77 else 78 return 0; /* Gen2 doesn't have a hardware frame counter */ 79 } 80 81 void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) 82 { 83 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 84 85 assert_vblank_disabled(&crtc->base); 86 drm_crtc_set_max_vblank_count(&crtc->base, 87 intel_crtc_max_vblank_count(crtc_state)); 88 drm_crtc_vblank_on(&crtc->base); 89 90 /* 91 * Should really happen exactly when we enable the pipe 92 * but we want the frame counters in the trace, and that 93 * requires vblank support on some platforms/outputs. 94 */ 95 trace_intel_pipe_enable(crtc); 96 } 97 98 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state) 99 { 100 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 101 102 /* 103 * Should really happen exactly when we disable the pipe 104 * but we want the frame counters in the trace, and that 105 * requires vblank support on some platforms/outputs. 106 */ 107 trace_intel_pipe_disable(crtc); 108 109 drm_crtc_vblank_off(&crtc->base); 110 assert_vblank_disabled(&crtc->base); 111 } 112 113 struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc) 114 { 115 struct intel_crtc_state *crtc_state; 116 117 crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL); 118 119 if (crtc_state) 120 intel_crtc_state_reset(crtc_state, crtc); 121 122 return crtc_state; 123 } 124 125 void intel_crtc_state_reset(struct intel_crtc_state *crtc_state, 126 struct intel_crtc *crtc) 127 { 128 memset(crtc_state, 0, sizeof(*crtc_state)); 129 130 __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base); 131 132 crtc_state->cpu_transcoder = INVALID_TRANSCODER; 133 crtc_state->master_transcoder = INVALID_TRANSCODER; 134 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 135 crtc_state->scaler_state.scaler_id = -1; 136 crtc_state->mst_master_transcoder = INVALID_TRANSCODER; 137 } 138 139 static struct intel_crtc *intel_crtc_alloc(void) 140 { 141 struct intel_crtc_state *crtc_state; 142 struct intel_crtc *crtc; 143 144 crtc = kzalloc(sizeof(*crtc), GFP_KERNEL); 145 if (!crtc) 146 return ERR_PTR(-ENOMEM); 147 148 crtc_state = intel_crtc_state_alloc(crtc); 149 if (!crtc_state) { 150 kfree(crtc); 151 return ERR_PTR(-ENOMEM); 152 } 153 154 crtc->base.state = &crtc_state->uapi; 155 crtc->config = crtc_state; 156 157 return crtc; 158 } 159 160 static void intel_crtc_free(struct intel_crtc *crtc) 161 { 162 intel_crtc_destroy_state(&crtc->base, crtc->base.state); 163 kfree(crtc); 164 } 165 166 static void intel_crtc_destroy(struct drm_crtc *_crtc) 167 { 168 struct intel_crtc *crtc = to_intel_crtc(_crtc); 169 170 drm_crtc_cleanup(&crtc->base); 171 kfree(crtc); 172 } 173 174 static int intel_crtc_late_register(struct drm_crtc *crtc) 175 { 176 intel_crtc_debugfs_add(crtc); 177 return 0; 178 } 179 180 #define INTEL_CRTC_FUNCS \ 181 .set_config = drm_atomic_helper_set_config, \ 182 .destroy = intel_crtc_destroy, \ 183 .page_flip = drm_atomic_helper_page_flip, \ 184 .atomic_duplicate_state = intel_crtc_duplicate_state, \ 185 .atomic_destroy_state = intel_crtc_destroy_state, \ 186 .set_crc_source = intel_crtc_set_crc_source, \ 187 .verify_crc_source = intel_crtc_verify_crc_source, \ 188 .get_crc_sources = intel_crtc_get_crc_sources, \ 189 .late_register = intel_crtc_late_register 190 191 static const struct drm_crtc_funcs bdw_crtc_funcs = { 192 INTEL_CRTC_FUNCS, 193 194 .get_vblank_counter = g4x_get_vblank_counter, 195 .enable_vblank = bdw_enable_vblank, 196 .disable_vblank = bdw_disable_vblank, 197 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 198 }; 199 200 static const struct drm_crtc_funcs ilk_crtc_funcs = { 201 INTEL_CRTC_FUNCS, 202 203 .get_vblank_counter = g4x_get_vblank_counter, 204 .enable_vblank = ilk_enable_vblank, 205 .disable_vblank = ilk_disable_vblank, 206 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 207 }; 208 209 static const struct drm_crtc_funcs g4x_crtc_funcs = { 210 INTEL_CRTC_FUNCS, 211 212 .get_vblank_counter = g4x_get_vblank_counter, 213 .enable_vblank = i965_enable_vblank, 214 .disable_vblank = i965_disable_vblank, 215 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 216 }; 217 218 static const struct drm_crtc_funcs i965_crtc_funcs = { 219 INTEL_CRTC_FUNCS, 220 221 .get_vblank_counter = i915_get_vblank_counter, 222 .enable_vblank = i965_enable_vblank, 223 .disable_vblank = i965_disable_vblank, 224 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 225 }; 226 227 static const struct drm_crtc_funcs i915gm_crtc_funcs = { 228 INTEL_CRTC_FUNCS, 229 230 .get_vblank_counter = i915_get_vblank_counter, 231 .enable_vblank = i915gm_enable_vblank, 232 .disable_vblank = i915gm_disable_vblank, 233 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 234 }; 235 236 static const struct drm_crtc_funcs i915_crtc_funcs = { 237 INTEL_CRTC_FUNCS, 238 239 .get_vblank_counter = i915_get_vblank_counter, 240 .enable_vblank = i8xx_enable_vblank, 241 .disable_vblank = i8xx_disable_vblank, 242 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 243 }; 244 245 static const struct drm_crtc_funcs i8xx_crtc_funcs = { 246 INTEL_CRTC_FUNCS, 247 248 /* no hw vblank counter */ 249 .enable_vblank = i8xx_enable_vblank, 250 .disable_vblank = i8xx_disable_vblank, 251 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 252 }; 253 254 int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) 255 { 256 struct intel_plane *primary, *cursor; 257 const struct drm_crtc_funcs *funcs; 258 struct intel_crtc *crtc; 259 int sprite, ret; 260 261 crtc = intel_crtc_alloc(); 262 if (IS_ERR(crtc)) 263 return PTR_ERR(crtc); 264 265 crtc->pipe = pipe; 266 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe]; 267 268 if (DISPLAY_VER(dev_priv) >= 9) 269 primary = skl_universal_plane_create(dev_priv, pipe, 270 PLANE_PRIMARY); 271 else 272 primary = intel_primary_plane_create(dev_priv, pipe); 273 if (IS_ERR(primary)) { 274 ret = PTR_ERR(primary); 275 goto fail; 276 } 277 crtc->plane_ids_mask |= BIT(primary->id); 278 279 for_each_sprite(dev_priv, pipe, sprite) { 280 struct intel_plane *plane; 281 282 if (DISPLAY_VER(dev_priv) >= 9) 283 plane = skl_universal_plane_create(dev_priv, pipe, 284 PLANE_SPRITE0 + sprite); 285 else 286 plane = intel_sprite_plane_create(dev_priv, pipe, sprite); 287 if (IS_ERR(plane)) { 288 ret = PTR_ERR(plane); 289 goto fail; 290 } 291 crtc->plane_ids_mask |= BIT(plane->id); 292 } 293 294 cursor = intel_cursor_plane_create(dev_priv, pipe); 295 if (IS_ERR(cursor)) { 296 ret = PTR_ERR(cursor); 297 goto fail; 298 } 299 crtc->plane_ids_mask |= BIT(cursor->id); 300 301 if (HAS_GMCH(dev_priv)) { 302 if (IS_CHERRYVIEW(dev_priv) || 303 IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv)) 304 funcs = &g4x_crtc_funcs; 305 else if (DISPLAY_VER(dev_priv) == 4) 306 funcs = &i965_crtc_funcs; 307 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv)) 308 funcs = &i915gm_crtc_funcs; 309 else if (DISPLAY_VER(dev_priv) == 3) 310 funcs = &i915_crtc_funcs; 311 else 312 funcs = &i8xx_crtc_funcs; 313 } else { 314 if (DISPLAY_VER(dev_priv) >= 8) 315 funcs = &bdw_crtc_funcs; 316 else 317 funcs = &ilk_crtc_funcs; 318 } 319 320 ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base, 321 &primary->base, &cursor->base, 322 funcs, "pipe %c", pipe_name(pipe)); 323 if (ret) 324 goto fail; 325 326 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) || 327 dev_priv->pipe_to_crtc_mapping[pipe] != NULL); 328 dev_priv->pipe_to_crtc_mapping[pipe] = crtc; 329 330 if (DISPLAY_VER(dev_priv) < 9) { 331 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane; 332 333 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 334 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL); 335 dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc; 336 } 337 338 if (DISPLAY_VER(dev_priv) >= 11) 339 drm_crtc_create_scaling_filter_property(&crtc->base, 340 BIT(DRM_SCALING_FILTER_DEFAULT) | 341 BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR)); 342 343 intel_color_init(crtc); 344 345 intel_crtc_crc_init(crtc); 346 347 drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe); 348 349 return 0; 350 351 fail: 352 intel_crtc_free(crtc); 353 354 return ret; 355 } 356 357 int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, 358 int usecs) 359 { 360 /* paranoia */ 361 if (!adjusted_mode->crtc_htotal) 362 return 1; 363 364 return DIV_ROUND_UP(usecs * adjusted_mode->crtc_clock, 365 1000 * adjusted_mode->crtc_htotal); 366 } 367 368 static int intel_mode_vblank_start(const struct drm_display_mode *mode) 369 { 370 int vblank_start = mode->crtc_vblank_start; 371 372 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 373 vblank_start = DIV_ROUND_UP(vblank_start, 2); 374 375 return vblank_start; 376 } 377 378 /** 379 * intel_pipe_update_start() - start update of a set of display registers 380 * @new_crtc_state: the new crtc state 381 * 382 * Mark the start of an update to pipe registers that should be updated 383 * atomically regarding vblank. If the next vblank will happens within 384 * the next 100 us, this function waits until the vblank passes. 385 * 386 * After a successful call to this function, interrupts will be disabled 387 * until a subsequent call to intel_pipe_update_end(). That is done to 388 * avoid random delays. 389 */ 390 void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) 391 { 392 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 393 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 394 const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode; 395 long timeout = msecs_to_jiffies_timeout(1); 396 int scanline, min, max, vblank_start; 397 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); 398 bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 399 intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI); 400 DEFINE_WAIT(wait); 401 402 if (new_crtc_state->uapi.async_flip) 403 return; 404 405 if (new_crtc_state->vrr.enable) 406 vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state); 407 else 408 vblank_start = intel_mode_vblank_start(adjusted_mode); 409 410 /* FIXME needs to be calibrated sensibly */ 411 min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, 412 VBLANK_EVASION_TIME_US); 413 max = vblank_start - 1; 414 415 if (min <= 0 || max <= 0) 416 goto irq_disable; 417 418 if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base))) 419 goto irq_disable; 420 421 /* 422 * Wait for psr to idle out after enabling the VBL interrupts 423 * VBL interrupts will start the PSR exit and prevent a PSR 424 * re-entry as well. 425 */ 426 intel_psr_wait_for_idle(new_crtc_state); 427 428 local_irq_disable(); 429 430 crtc->debug.min_vbl = min; 431 crtc->debug.max_vbl = max; 432 trace_intel_pipe_update_start(crtc); 433 434 for (;;) { 435 /* 436 * prepare_to_wait() has a memory barrier, which guarantees 437 * other CPUs can see the task state update by the time we 438 * read the scanline. 439 */ 440 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); 441 442 scanline = intel_get_crtc_scanline(crtc); 443 if (scanline < min || scanline > max) 444 break; 445 446 if (!timeout) { 447 drm_err(&dev_priv->drm, 448 "Potential atomic update failure on pipe %c\n", 449 pipe_name(crtc->pipe)); 450 break; 451 } 452 453 local_irq_enable(); 454 455 timeout = schedule_timeout(timeout); 456 457 local_irq_disable(); 458 } 459 460 finish_wait(wq, &wait); 461 462 drm_crtc_vblank_put(&crtc->base); 463 464 /* 465 * On VLV/CHV DSI the scanline counter would appear to 466 * increment approx. 1/3 of a scanline before start of vblank. 467 * The registers still get latched at start of vblank however. 468 * This means we must not write any registers on the first 469 * line of vblank (since not the whole line is actually in 470 * vblank). And unfortunately we can't use the interrupt to 471 * wait here since it will fire too soon. We could use the 472 * frame start interrupt instead since it will fire after the 473 * critical scanline, but that would require more changes 474 * in the interrupt code. So for now we'll just do the nasty 475 * thing and poll for the bad scanline to pass us by. 476 * 477 * FIXME figure out if BXT+ DSI suffers from this as well 478 */ 479 while (need_vlv_dsi_wa && scanline == vblank_start) 480 scanline = intel_get_crtc_scanline(crtc); 481 482 crtc->debug.scanline_start = scanline; 483 crtc->debug.start_vbl_time = ktime_get(); 484 crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); 485 486 trace_intel_pipe_update_vblank_evaded(crtc); 487 return; 488 489 irq_disable: 490 local_irq_disable(); 491 } 492 493 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE) 494 static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) 495 { 496 u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time)); 497 unsigned int h; 498 499 h = ilog2(delta >> 9); 500 if (h >= ARRAY_SIZE(crtc->debug.vbl.times)) 501 h = ARRAY_SIZE(crtc->debug.vbl.times) - 1; 502 crtc->debug.vbl.times[h]++; 503 504 crtc->debug.vbl.sum += delta; 505 if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min) 506 crtc->debug.vbl.min = delta; 507 if (delta > crtc->debug.vbl.max) 508 crtc->debug.vbl.max = delta; 509 510 if (delta > 1000 * VBLANK_EVASION_TIME_US) { 511 drm_dbg_kms(crtc->base.dev, 512 "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n", 513 pipe_name(crtc->pipe), 514 div_u64(delta, 1000), 515 VBLANK_EVASION_TIME_US); 516 crtc->debug.vbl.over++; 517 } 518 } 519 #else 520 static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {} 521 #endif 522 523 /** 524 * intel_pipe_update_end() - end update of a set of display registers 525 * @new_crtc_state: the new crtc state 526 * 527 * Mark the end of an update started with intel_pipe_update_start(). This 528 * re-enables interrupts and verifies the update was actually completed 529 * before a vblank. 530 */ 531 void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) 532 { 533 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 534 enum pipe pipe = crtc->pipe; 535 int scanline_end = intel_get_crtc_scanline(crtc); 536 u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc); 537 ktime_t end_vbl_time = ktime_get(); 538 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 539 540 if (new_crtc_state->uapi.async_flip) 541 return; 542 543 trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end); 544 545 /* 546 * Incase of mipi dsi command mode, we need to set frame update 547 * request for every commit. 548 */ 549 if (DISPLAY_VER(dev_priv) >= 11 && 550 intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 551 icl_dsi_frame_update(new_crtc_state); 552 553 /* We're still in the vblank-evade critical section, this can't race. 554 * Would be slightly nice to just grab the vblank count and arm the 555 * event outside of the critical section - the spinlock might spin for a 556 * while ... */ 557 if (new_crtc_state->uapi.event) { 558 drm_WARN_ON(&dev_priv->drm, 559 drm_crtc_vblank_get(&crtc->base) != 0); 560 561 spin_lock(&crtc->base.dev->event_lock); 562 drm_crtc_arm_vblank_event(&crtc->base, 563 new_crtc_state->uapi.event); 564 spin_unlock(&crtc->base.dev->event_lock); 565 566 new_crtc_state->uapi.event = NULL; 567 } 568 569 local_irq_enable(); 570 571 /* Send VRR Push to terminate Vblank */ 572 intel_vrr_send_push(new_crtc_state); 573 574 if (intel_vgpu_active(dev_priv)) 575 return; 576 577 if (crtc->debug.start_vbl_count && 578 crtc->debug.start_vbl_count != end_vbl_count) { 579 drm_err(&dev_priv->drm, 580 "Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n", 581 pipe_name(pipe), crtc->debug.start_vbl_count, 582 end_vbl_count, 583 ktime_us_delta(end_vbl_time, 584 crtc->debug.start_vbl_time), 585 crtc->debug.min_vbl, crtc->debug.max_vbl, 586 crtc->debug.scanline_start, scanline_end); 587 } 588 589 dbg_vblank_evade(crtc, end_vbl_time); 590 } 591