1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <acpi/video.h> 28 #include <linux/i2c.h> 29 #include <linux/input.h> 30 #include <linux/intel-iommu.h> 31 #include <linux/kernel.h> 32 #include <linux/module.h> 33 #include <linux/dma-resv.h> 34 #include <linux/slab.h> 35 #include <linux/vga_switcheroo.h> 36 37 #include <drm/drm_atomic.h> 38 #include <drm/drm_atomic_helper.h> 39 #include <drm/drm_atomic_uapi.h> 40 #include <drm/drm_damage_helper.h> 41 #include <drm/dp/drm_dp_helper.h> 42 #include <drm/drm_edid.h> 43 #include <drm/drm_fourcc.h> 44 #include <drm/drm_plane_helper.h> 45 #include <drm/drm_privacy_screen_consumer.h> 46 #include <drm/drm_probe_helper.h> 47 #include <drm/drm_rect.h> 48 49 #include "display/intel_audio.h" 50 #include "display/intel_crt.h" 51 #include "display/intel_ddi.h" 52 #include "display/intel_display_debugfs.h" 53 #include "display/intel_dp.h" 54 #include "display/intel_dp_mst.h" 55 #include "display/intel_dpll.h" 56 #include "display/intel_dpll_mgr.h" 57 #include "display/intel_drrs.h" 58 #include "display/intel_dsi.h" 59 #include "display/intel_dvo.h" 60 #include "display/intel_fb.h" 61 #include "display/intel_gmbus.h" 62 #include "display/intel_hdmi.h" 63 #include "display/intel_lvds.h" 64 #include "display/intel_sdvo.h" 65 #include "display/intel_snps_phy.h" 66 #include "display/intel_tv.h" 67 #include "display/intel_vdsc.h" 68 #include "display/intel_vrr.h" 69 70 #include "gem/i915_gem_lmem.h" 71 #include "gem/i915_gem_object.h" 72 73 #include "gt/gen8_ppgtt.h" 74 75 #include "g4x_dp.h" 76 #include "g4x_hdmi.h" 77 #include "hsw_ips.h" 78 #include "i915_drv.h" 79 #include "icl_dsi.h" 80 #include "intel_acpi.h" 81 #include "intel_atomic.h" 82 #include "intel_atomic_plane.h" 83 #include "intel_bw.h" 84 #include "intel_cdclk.h" 85 #include "intel_color.h" 86 #include "intel_crtc.h" 87 #include "intel_de.h" 88 #include "intel_display_types.h" 89 #include "intel_dmc.h" 90 #include "intel_dp_link_training.h" 91 #include "intel_dpt.h" 92 #include "intel_fbc.h" 93 #include "intel_fbdev.h" 94 #include "intel_fdi.h" 95 #include "intel_fifo_underrun.h" 96 #include "intel_frontbuffer.h" 97 #include "intel_hdcp.h" 98 #include "intel_hotplug.h" 99 #include "intel_overlay.h" 100 #include "intel_panel.h" 101 #include "intel_pch_display.h" 102 #include "intel_pch_refclk.h" 103 #include "intel_pcode.h" 104 #include "intel_pipe_crc.h" 105 #include "intel_plane_initial.h" 106 #include "intel_pm.h" 107 #include "intel_pps.h" 108 #include "intel_psr.h" 109 #include "intel_quirks.h" 110 #include "intel_sprite.h" 111 #include "intel_tc.h" 112 #include "intel_vga.h" 113 #include "i9xx_plane.h" 114 #include "skl_scaler.h" 115 #include "skl_universal_plane.h" 116 #include "vlv_dsi.h" 117 #include "vlv_dsi_pll.h" 118 #include "vlv_dsi_regs.h" 119 #include "vlv_sideband.h" 120 121 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); 122 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 123 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); 124 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state); 125 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); 126 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); 127 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); 128 static void intel_modeset_setup_hw_state(struct drm_device *dev, 129 struct drm_modeset_acquire_ctx *ctx); 130 131 /** 132 * intel_update_watermarks - update FIFO watermark values based on current modes 133 * @dev_priv: i915 device 134 * 135 * Calculate watermark values for the various WM regs based on current mode 136 * and plane configuration. 137 * 138 * There are several cases to deal with here: 139 * - normal (i.e. non-self-refresh) 140 * - self-refresh (SR) mode 141 * - lines are large relative to FIFO size (buffer can hold up to 2) 142 * - lines are small relative to FIFO size (buffer can hold more than 2 143 * lines), so need to account for TLB latency 144 * 145 * The normal calculation is: 146 * watermark = dotclock * bytes per pixel * latency 147 * where latency is platform & configuration dependent (we assume pessimal 148 * values here). 149 * 150 * The SR calculation is: 151 * watermark = (trunc(latency/line time)+1) * surface width * 152 * bytes per pixel 153 * where 154 * line time = htotal / dotclock 155 * surface width = hdisplay for normal plane and 64 for cursor 156 * and latency is assumed to be high, as above. 157 * 158 * The final value programmed to the register should always be rounded up, 159 * and include an extra 2 entries to account for clock crossings. 160 * 161 * We don't use the sprite, so we can ignore that. And on Crestline we have 162 * to set the non-SR watermarks to 8. 163 */ 164 static void intel_update_watermarks(struct drm_i915_private *dev_priv) 165 { 166 if (dev_priv->wm_disp->update_wm) 167 dev_priv->wm_disp->update_wm(dev_priv); 168 } 169 170 static int intel_compute_pipe_wm(struct intel_atomic_state *state, 171 struct intel_crtc *crtc) 172 { 173 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 174 if (dev_priv->wm_disp->compute_pipe_wm) 175 return dev_priv->wm_disp->compute_pipe_wm(state, crtc); 176 return 0; 177 } 178 179 static int intel_compute_intermediate_wm(struct intel_atomic_state *state, 180 struct intel_crtc *crtc) 181 { 182 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 183 if (!dev_priv->wm_disp->compute_intermediate_wm) 184 return 0; 185 if (drm_WARN_ON(&dev_priv->drm, 186 !dev_priv->wm_disp->compute_pipe_wm)) 187 return 0; 188 return dev_priv->wm_disp->compute_intermediate_wm(state, crtc); 189 } 190 191 static bool intel_initial_watermarks(struct intel_atomic_state *state, 192 struct intel_crtc *crtc) 193 { 194 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 195 if (dev_priv->wm_disp->initial_watermarks) { 196 dev_priv->wm_disp->initial_watermarks(state, crtc); 197 return true; 198 } 199 return false; 200 } 201 202 static void intel_atomic_update_watermarks(struct intel_atomic_state *state, 203 struct intel_crtc *crtc) 204 { 205 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 206 if (dev_priv->wm_disp->atomic_update_watermarks) 207 dev_priv->wm_disp->atomic_update_watermarks(state, crtc); 208 } 209 210 static void intel_optimize_watermarks(struct intel_atomic_state *state, 211 struct intel_crtc *crtc) 212 { 213 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 214 if (dev_priv->wm_disp->optimize_watermarks) 215 dev_priv->wm_disp->optimize_watermarks(state, crtc); 216 } 217 218 static int intel_compute_global_watermarks(struct intel_atomic_state *state) 219 { 220 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 221 if (dev_priv->wm_disp->compute_global_watermarks) 222 return dev_priv->wm_disp->compute_global_watermarks(state); 223 return 0; 224 } 225 226 /* returns HPLL frequency in kHz */ 227 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 228 { 229 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 230 231 /* Obtain SKU information */ 232 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 233 CCK_FUSE_HPLL_FREQ_MASK; 234 235 return vco_freq[hpll_freq] * 1000; 236 } 237 238 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 239 const char *name, u32 reg, int ref_freq) 240 { 241 u32 val; 242 int divider; 243 244 val = vlv_cck_read(dev_priv, reg); 245 divider = val & CCK_FREQUENCY_VALUES; 246 247 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) != 248 (divider << CCK_FREQUENCY_STATUS_SHIFT), 249 "%s change in progress\n", name); 250 251 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 252 } 253 254 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 255 const char *name, u32 reg) 256 { 257 int hpll; 258 259 vlv_cck_get(dev_priv); 260 261 if (dev_priv->hpll_freq == 0) 262 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 263 264 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); 265 266 vlv_cck_put(dev_priv); 267 268 return hpll; 269 } 270 271 static void intel_update_czclk(struct drm_i915_private *dev_priv) 272 { 273 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 274 return; 275 276 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 277 CCK_CZ_CLOCK_CONTROL); 278 279 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n", 280 dev_priv->czclk_freq); 281 } 282 283 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state) 284 { 285 return (crtc_state->active_planes & 286 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0; 287 } 288 289 /* WA Display #0827: Gen9:all */ 290 static void 291 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) 292 { 293 if (enable) 294 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 295 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS); 296 else 297 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 298 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS)); 299 } 300 301 /* Wa_2006604312:icl,ehl */ 302 static void 303 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 304 bool enable) 305 { 306 if (enable) 307 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 308 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS); 309 else 310 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 311 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS); 312 } 313 314 /* Wa_1604331009:icl,jsl,ehl */ 315 static void 316 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 317 bool enable) 318 { 319 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS, 320 enable ? CURSOR_GATING_DIS : 0); 321 } 322 323 static bool 324 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) 325 { 326 return crtc_state->master_transcoder != INVALID_TRANSCODER; 327 } 328 329 static bool 330 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) 331 { 332 return crtc_state->sync_mode_slaves_mask != 0; 333 } 334 335 bool 336 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) 337 { 338 return is_trans_port_sync_master(crtc_state) || 339 is_trans_port_sync_slave(crtc_state); 340 } 341 342 static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state) 343 { 344 return ffs(crtc_state->bigjoiner_pipes) - 1; 345 } 346 347 u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state) 348 { 349 if (crtc_state->bigjoiner_pipes) 350 return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state)); 351 else 352 return 0; 353 } 354 355 bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state) 356 { 357 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 358 359 return crtc_state->bigjoiner_pipes && 360 crtc->pipe != bigjoiner_master_pipe(crtc_state); 361 } 362 363 bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state) 364 { 365 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 366 367 return crtc_state->bigjoiner_pipes && 368 crtc->pipe == bigjoiner_master_pipe(crtc_state); 369 } 370 371 struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state) 372 { 373 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 374 375 if (intel_crtc_is_bigjoiner_slave(crtc_state)) 376 return intel_crtc_for_pipe(i915, bigjoiner_master_pipe(crtc_state)); 377 else 378 return to_intel_crtc(crtc_state->uapi.crtc); 379 } 380 381 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, 382 enum pipe pipe) 383 { 384 i915_reg_t reg = PIPEDSL(pipe); 385 u32 line1, line2; 386 387 line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK; 388 msleep(5); 389 line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK; 390 391 return line1 != line2; 392 } 393 394 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state) 395 { 396 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 397 enum pipe pipe = crtc->pipe; 398 399 /* Wait for the display line to settle/start moving */ 400 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100)) 401 drm_err(&dev_priv->drm, 402 "pipe %c scanline %s wait timed out\n", 403 pipe_name(pipe), onoff(state)); 404 } 405 406 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc) 407 { 408 wait_for_pipe_scanline_moving(crtc, false); 409 } 410 411 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) 412 { 413 wait_for_pipe_scanline_moving(crtc, true); 414 } 415 416 static void 417 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 418 { 419 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 420 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 421 422 if (DISPLAY_VER(dev_priv) >= 4) { 423 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 424 425 /* Wait for the Pipe State to go off */ 426 if (intel_de_wait_for_clear(dev_priv, PIPECONF(cpu_transcoder), 427 PIPECONF_STATE_ENABLE, 100)) 428 drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n"); 429 } else { 430 intel_wait_for_pipe_scanline_stopped(crtc); 431 } 432 } 433 434 void assert_transcoder(struct drm_i915_private *dev_priv, 435 enum transcoder cpu_transcoder, bool state) 436 { 437 bool cur_state; 438 enum intel_display_power_domain power_domain; 439 intel_wakeref_t wakeref; 440 441 /* we keep both pipes enabled on 830 */ 442 if (IS_I830(dev_priv)) 443 state = true; 444 445 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 446 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 447 if (wakeref) { 448 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); 449 cur_state = !!(val & PIPECONF_ENABLE); 450 451 intel_display_power_put(dev_priv, power_domain, wakeref); 452 } else { 453 cur_state = false; 454 } 455 456 I915_STATE_WARN(cur_state != state, 457 "transcoder %s assertion failure (expected %s, current %s)\n", 458 transcoder_name(cpu_transcoder), 459 onoff(state), onoff(cur_state)); 460 } 461 462 static void assert_plane(struct intel_plane *plane, bool state) 463 { 464 enum pipe pipe; 465 bool cur_state; 466 467 cur_state = plane->get_hw_state(plane, &pipe); 468 469 I915_STATE_WARN(cur_state != state, 470 "%s assertion failure (expected %s, current %s)\n", 471 plane->base.name, onoff(state), onoff(cur_state)); 472 } 473 474 #define assert_plane_enabled(p) assert_plane(p, true) 475 #define assert_plane_disabled(p) assert_plane(p, false) 476 477 static void assert_planes_disabled(struct intel_crtc *crtc) 478 { 479 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 480 struct intel_plane *plane; 481 482 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 483 assert_plane_disabled(plane); 484 } 485 486 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 487 struct intel_digital_port *dig_port, 488 unsigned int expected_mask) 489 { 490 u32 port_mask; 491 i915_reg_t dpll_reg; 492 493 switch (dig_port->base.port) { 494 case PORT_B: 495 port_mask = DPLL_PORTB_READY_MASK; 496 dpll_reg = DPLL(0); 497 break; 498 case PORT_C: 499 port_mask = DPLL_PORTC_READY_MASK; 500 dpll_reg = DPLL(0); 501 expected_mask <<= 4; 502 break; 503 case PORT_D: 504 port_mask = DPLL_PORTD_READY_MASK; 505 dpll_reg = DPIO_PHY_STATUS; 506 break; 507 default: 508 BUG(); 509 } 510 511 if (intel_de_wait_for_register(dev_priv, dpll_reg, 512 port_mask, expected_mask, 1000)) 513 drm_WARN(&dev_priv->drm, 1, 514 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", 515 dig_port->base.base.base.id, dig_port->base.base.name, 516 intel_de_read(dev_priv, dpll_reg) & port_mask, 517 expected_mask); 518 } 519 520 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) 521 { 522 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 523 524 if (HAS_PCH_LPT(dev_priv)) 525 return PIPE_A; 526 else 527 return crtc->pipe; 528 } 529 530 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state) 531 { 532 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 533 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 534 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 535 enum pipe pipe = crtc->pipe; 536 i915_reg_t reg; 537 u32 val; 538 539 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe)); 540 541 assert_planes_disabled(crtc); 542 543 /* 544 * A pipe without a PLL won't actually be able to drive bits from 545 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 546 * need the check. 547 */ 548 if (HAS_GMCH(dev_priv)) { 549 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 550 assert_dsi_pll_enabled(dev_priv); 551 else 552 assert_pll_enabled(dev_priv, pipe); 553 } else { 554 if (new_crtc_state->has_pch_encoder) { 555 /* if driving the PCH, we need FDI enabled */ 556 assert_fdi_rx_pll_enabled(dev_priv, 557 intel_crtc_pch_transcoder(crtc)); 558 assert_fdi_tx_pll_enabled(dev_priv, 559 (enum pipe) cpu_transcoder); 560 } 561 /* FIXME: assert CPU port conditions for SNB+ */ 562 } 563 564 /* Wa_22012358565:adl-p */ 565 if (DISPLAY_VER(dev_priv) == 13) 566 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe), 567 0, PIPE_ARB_USE_PROG_SLOTS); 568 569 reg = PIPECONF(cpu_transcoder); 570 val = intel_de_read(dev_priv, reg); 571 if (val & PIPECONF_ENABLE) { 572 /* we keep both pipes enabled on 830 */ 573 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); 574 return; 575 } 576 577 intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE); 578 intel_de_posting_read(dev_priv, reg); 579 580 /* 581 * Until the pipe starts PIPEDSL reads will return a stale value, 582 * which causes an apparent vblank timestamp jump when PIPEDSL 583 * resets to its proper value. That also messes up the frame count 584 * when it's derived from the timestamps. So let's wait for the 585 * pipe to start properly before we call drm_crtc_vblank_on() 586 */ 587 if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 588 intel_wait_for_pipe_scanline_moving(crtc); 589 } 590 591 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) 592 { 593 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 594 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 595 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 596 enum pipe pipe = crtc->pipe; 597 i915_reg_t reg; 598 u32 val; 599 600 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe)); 601 602 /* 603 * Make sure planes won't keep trying to pump pixels to us, 604 * or we might hang the display. 605 */ 606 assert_planes_disabled(crtc); 607 608 reg = PIPECONF(cpu_transcoder); 609 val = intel_de_read(dev_priv, reg); 610 if ((val & PIPECONF_ENABLE) == 0) 611 return; 612 613 /* 614 * Double wide has implications for planes 615 * so best keep it disabled when not needed. 616 */ 617 if (old_crtc_state->double_wide) 618 val &= ~PIPECONF_DOUBLE_WIDE; 619 620 /* Don't disable pipe or pipe PLLs if needed */ 621 if (!IS_I830(dev_priv)) 622 val &= ~PIPECONF_ENABLE; 623 624 if (DISPLAY_VER(dev_priv) >= 12) 625 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 626 FECSTALL_DIS_DPTSTREAM_DPTTG, 0); 627 628 intel_de_write(dev_priv, reg, val); 629 if ((val & PIPECONF_ENABLE) == 0) 630 intel_wait_for_pipe_off(old_crtc_state); 631 } 632 633 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 634 { 635 unsigned int size = 0; 636 int i; 637 638 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 639 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width; 640 641 return size; 642 } 643 644 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info) 645 { 646 unsigned int size = 0; 647 int i; 648 649 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { 650 unsigned int plane_size; 651 652 if (rem_info->plane[i].linear) 653 plane_size = rem_info->plane[i].size; 654 else 655 plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height; 656 657 if (plane_size == 0) 658 continue; 659 660 if (rem_info->plane_alignment) 661 size = ALIGN(size, rem_info->plane_alignment); 662 663 size += plane_size; 664 } 665 666 return size; 667 } 668 669 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) 670 { 671 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 672 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 673 674 return DISPLAY_VER(dev_priv) < 4 || 675 (plane->fbc && 676 plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL); 677 } 678 679 /* 680 * Convert the x/y offsets into a linear offset. 681 * Only valid with 0/180 degree rotation, which is fine since linear 682 * offset is only used with linear buffers on pre-hsw and tiled buffers 683 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 684 */ 685 u32 intel_fb_xy_to_linear(int x, int y, 686 const struct intel_plane_state *state, 687 int color_plane) 688 { 689 const struct drm_framebuffer *fb = state->hw.fb; 690 unsigned int cpp = fb->format->cpp[color_plane]; 691 unsigned int pitch = state->view.color_plane[color_plane].mapping_stride; 692 693 return y * pitch + x * cpp; 694 } 695 696 /* 697 * Add the x/y offsets derived from fb->offsets[] to the user 698 * specified plane src x/y offsets. The resulting x/y offsets 699 * specify the start of scanout from the beginning of the gtt mapping. 700 */ 701 void intel_add_fb_offsets(int *x, int *y, 702 const struct intel_plane_state *state, 703 int color_plane) 704 705 { 706 *x += state->view.color_plane[color_plane].x; 707 *y += state->view.color_plane[color_plane].y; 708 } 709 710 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 711 u32 pixel_format, u64 modifier) 712 { 713 struct intel_crtc *crtc; 714 struct intel_plane *plane; 715 716 if (!HAS_DISPLAY(dev_priv)) 717 return 0; 718 719 /* 720 * We assume the primary plane for pipe A has 721 * the highest stride limits of them all, 722 * if in case pipe A is disabled, use the first pipe from pipe_mask. 723 */ 724 crtc = intel_first_crtc(dev_priv); 725 if (!crtc) 726 return 0; 727 728 plane = to_intel_plane(crtc->base.primary); 729 730 return plane->max_stride(plane, pixel_format, modifier, 731 DRM_MODE_ROTATE_0); 732 } 733 734 static void 735 intel_set_plane_visible(struct intel_crtc_state *crtc_state, 736 struct intel_plane_state *plane_state, 737 bool visible) 738 { 739 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 740 741 plane_state->uapi.visible = visible; 742 743 if (visible) 744 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base); 745 else 746 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); 747 } 748 749 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state) 750 { 751 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 752 struct drm_plane *plane; 753 754 /* 755 * Active_planes aliases if multiple "primary" or cursor planes 756 * have been used on the same (or wrong) pipe. plane_mask uses 757 * unique ids, hence we can use that to reconstruct active_planes. 758 */ 759 crtc_state->enabled_planes = 0; 760 crtc_state->active_planes = 0; 761 762 drm_for_each_plane_mask(plane, &dev_priv->drm, 763 crtc_state->uapi.plane_mask) { 764 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id); 765 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 766 } 767 } 768 769 void intel_plane_disable_noatomic(struct intel_crtc *crtc, 770 struct intel_plane *plane) 771 { 772 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 773 struct intel_crtc_state *crtc_state = 774 to_intel_crtc_state(crtc->base.state); 775 struct intel_plane_state *plane_state = 776 to_intel_plane_state(plane->base.state); 777 778 drm_dbg_kms(&dev_priv->drm, 779 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 780 plane->base.base.id, plane->base.name, 781 crtc->base.base.id, crtc->base.name); 782 783 intel_set_plane_visible(crtc_state, plane_state, false); 784 fixup_plane_bitmasks(crtc_state); 785 crtc_state->data_rate[plane->id] = 0; 786 crtc_state->min_cdclk[plane->id] = 0; 787 788 if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 && 789 hsw_ips_disable(crtc_state)) { 790 crtc_state->ips_enabled = false; 791 intel_crtc_wait_for_next_vblank(crtc); 792 } 793 794 /* 795 * Vblank time updates from the shadow to live plane control register 796 * are blocked if the memory self-refresh mode is active at that 797 * moment. So to make sure the plane gets truly disabled, disable 798 * first the self-refresh mode. The self-refresh enable bit in turn 799 * will be checked/applied by the HW only at the next frame start 800 * event which is after the vblank start event, so we need to have a 801 * wait-for-vblank between disabling the plane and the pipe. 802 */ 803 if (HAS_GMCH(dev_priv) && 804 intel_set_memory_cxsr(dev_priv, false)) 805 intel_crtc_wait_for_next_vblank(crtc); 806 807 /* 808 * Gen2 reports pipe underruns whenever all planes are disabled. 809 * So disable underrun reporting before all the planes get disabled. 810 */ 811 if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes) 812 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 813 814 intel_plane_disable_arm(plane, crtc_state); 815 intel_crtc_wait_for_next_vblank(crtc); 816 } 817 818 unsigned int 819 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) 820 { 821 int x = 0, y = 0; 822 823 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 824 plane_state->view.color_plane[0].offset, 0); 825 826 return y; 827 } 828 829 static int 830 __intel_display_resume(struct drm_device *dev, 831 struct drm_atomic_state *state, 832 struct drm_modeset_acquire_ctx *ctx) 833 { 834 struct drm_crtc_state *crtc_state; 835 struct drm_crtc *crtc; 836 int i, ret; 837 838 intel_modeset_setup_hw_state(dev, ctx); 839 intel_vga_redisable(to_i915(dev)); 840 841 if (!state) 842 return 0; 843 844 /* 845 * We've duplicated the state, pointers to the old state are invalid. 846 * 847 * Don't attempt to use the old state until we commit the duplicated state. 848 */ 849 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 850 /* 851 * Force recalculation even if we restore 852 * current state. With fast modeset this may not result 853 * in a modeset when the state is compatible. 854 */ 855 crtc_state->mode_changed = true; 856 } 857 858 /* ignore any reset values/BIOS leftovers in the WM registers */ 859 if (!HAS_GMCH(to_i915(dev))) 860 to_intel_atomic_state(state)->skip_intermediate_wm = true; 861 862 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 863 864 drm_WARN_ON(dev, ret == -EDEADLK); 865 return ret; 866 } 867 868 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) 869 { 870 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display && 871 intel_has_gpu_reset(to_gt(dev_priv))); 872 } 873 874 void intel_display_prepare_reset(struct drm_i915_private *dev_priv) 875 { 876 struct drm_device *dev = &dev_priv->drm; 877 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 878 struct drm_atomic_state *state; 879 int ret; 880 881 if (!HAS_DISPLAY(dev_priv)) 882 return; 883 884 /* reset doesn't touch the display */ 885 if (!dev_priv->params.force_reset_modeset_test && 886 !gpu_reset_clobbers_display(dev_priv)) 887 return; 888 889 /* We have a modeset vs reset deadlock, defensively unbreak it. */ 890 set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags); 891 smp_mb__after_atomic(); 892 wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET); 893 894 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) { 895 drm_dbg_kms(&dev_priv->drm, 896 "Modeset potentially stuck, unbreaking through wedging\n"); 897 intel_gt_set_wedged(to_gt(dev_priv)); 898 } 899 900 /* 901 * Need mode_config.mutex so that we don't 902 * trample ongoing ->detect() and whatnot. 903 */ 904 mutex_lock(&dev->mode_config.mutex); 905 drm_modeset_acquire_init(ctx, 0); 906 while (1) { 907 ret = drm_modeset_lock_all_ctx(dev, ctx); 908 if (ret != -EDEADLK) 909 break; 910 911 drm_modeset_backoff(ctx); 912 } 913 /* 914 * Disabling the crtcs gracefully seems nicer. Also the 915 * g33 docs say we should at least disable all the planes. 916 */ 917 state = drm_atomic_helper_duplicate_state(dev, ctx); 918 if (IS_ERR(state)) { 919 ret = PTR_ERR(state); 920 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n", 921 ret); 922 return; 923 } 924 925 ret = drm_atomic_helper_disable_all(dev, ctx); 926 if (ret) { 927 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n", 928 ret); 929 drm_atomic_state_put(state); 930 return; 931 } 932 933 dev_priv->modeset_restore_state = state; 934 state->acquire_ctx = ctx; 935 } 936 937 void intel_display_finish_reset(struct drm_i915_private *dev_priv) 938 { 939 struct drm_device *dev = &dev_priv->drm; 940 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 941 struct drm_atomic_state *state; 942 int ret; 943 944 if (!HAS_DISPLAY(dev_priv)) 945 return; 946 947 /* reset doesn't touch the display */ 948 if (!test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags)) 949 return; 950 951 state = fetch_and_zero(&dev_priv->modeset_restore_state); 952 if (!state) 953 goto unlock; 954 955 /* reset doesn't touch the display */ 956 if (!gpu_reset_clobbers_display(dev_priv)) { 957 /* for testing only restore the display */ 958 ret = __intel_display_resume(dev, state, ctx); 959 if (ret) 960 drm_err(&dev_priv->drm, 961 "Restoring old state failed with %i\n", ret); 962 } else { 963 /* 964 * The display has been reset as well, 965 * so need a full re-initialization. 966 */ 967 intel_pps_unlock_regs_wa(dev_priv); 968 intel_modeset_init_hw(dev_priv); 969 intel_init_clock_gating(dev_priv); 970 intel_hpd_init(dev_priv); 971 972 ret = __intel_display_resume(dev, state, ctx); 973 if (ret) 974 drm_err(&dev_priv->drm, 975 "Restoring old state failed with %i\n", ret); 976 977 intel_hpd_poll_disable(dev_priv); 978 } 979 980 drm_atomic_state_put(state); 981 unlock: 982 drm_modeset_drop_locks(ctx); 983 drm_modeset_acquire_fini(ctx); 984 mutex_unlock(&dev->mode_config.mutex); 985 986 clear_bit_unlock(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags); 987 } 988 989 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) 990 { 991 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 992 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 993 enum pipe pipe = crtc->pipe; 994 u32 tmp; 995 996 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe)); 997 998 /* 999 * Display WA #1153: icl 1000 * enable hardware to bypass the alpha math 1001 * and rounding for per-pixel values 00 and 0xff 1002 */ 1003 tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 1004 /* 1005 * Display WA # 1605353570: icl 1006 * Set the pixel rounding bit to 1 for allowing 1007 * passthrough of Frame buffer pixels unmodified 1008 * across pipe 1009 */ 1010 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 1011 1012 /* 1013 * Underrun recovery must always be disabled on display 13+. 1014 * DG2 chicken bit meaning is inverted compared to other platforms. 1015 */ 1016 if (IS_DG2(dev_priv)) 1017 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2; 1018 else if (DISPLAY_VER(dev_priv) >= 13) 1019 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; 1020 1021 /* Wa_14010547955:dg2 */ 1022 if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER)) 1023 tmp |= DG2_RENDER_CCSTAG_4_3_EN; 1024 1025 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); 1026 } 1027 1028 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 1029 { 1030 struct drm_crtc *crtc; 1031 bool cleanup_done; 1032 1033 drm_for_each_crtc(crtc, &dev_priv->drm) { 1034 struct drm_crtc_commit *commit; 1035 spin_lock(&crtc->commit_lock); 1036 commit = list_first_entry_or_null(&crtc->commit_list, 1037 struct drm_crtc_commit, commit_entry); 1038 cleanup_done = commit ? 1039 try_wait_for_completion(&commit->cleanup_done) : true; 1040 spin_unlock(&crtc->commit_lock); 1041 1042 if (cleanup_done) 1043 continue; 1044 1045 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc)); 1046 1047 return true; 1048 } 1049 1050 return false; 1051 } 1052 1053 /* 1054 * Finds the encoder associated with the given CRTC. This can only be 1055 * used when we know that the CRTC isn't feeding multiple encoders! 1056 */ 1057 struct intel_encoder * 1058 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 1059 const struct intel_crtc_state *crtc_state) 1060 { 1061 const struct drm_connector_state *connector_state; 1062 const struct drm_connector *connector; 1063 struct intel_encoder *encoder = NULL; 1064 struct intel_crtc *master_crtc; 1065 int num_encoders = 0; 1066 int i; 1067 1068 master_crtc = intel_master_crtc(crtc_state); 1069 1070 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 1071 if (connector_state->crtc != &master_crtc->base) 1072 continue; 1073 1074 encoder = to_intel_encoder(connector_state->best_encoder); 1075 num_encoders++; 1076 } 1077 1078 drm_WARN(encoder->base.dev, num_encoders != 1, 1079 "%d encoders for pipe %c\n", 1080 num_encoders, pipe_name(master_crtc->pipe)); 1081 1082 return encoder; 1083 } 1084 1085 static void cpt_verify_modeset(struct drm_i915_private *dev_priv, 1086 enum pipe pipe) 1087 { 1088 i915_reg_t dslreg = PIPEDSL(pipe); 1089 u32 temp; 1090 1091 temp = intel_de_read(dev_priv, dslreg); 1092 udelay(500); 1093 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) { 1094 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) 1095 drm_err(&dev_priv->drm, 1096 "mode set failed: pipe %c stuck\n", 1097 pipe_name(pipe)); 1098 } 1099 } 1100 1101 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) 1102 { 1103 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1104 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1105 const struct drm_rect *dst = &crtc_state->pch_pfit.dst; 1106 enum pipe pipe = crtc->pipe; 1107 int width = drm_rect_width(dst); 1108 int height = drm_rect_height(dst); 1109 int x = dst->x1; 1110 int y = dst->y1; 1111 1112 if (!crtc_state->pch_pfit.enabled) 1113 return; 1114 1115 /* Force use of hard-coded filter coefficients 1116 * as some pre-programmed values are broken, 1117 * e.g. x201. 1118 */ 1119 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 1120 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE | 1121 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe)); 1122 else 1123 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE | 1124 PF_FILTER_MED_3x3); 1125 intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y); 1126 intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height); 1127 } 1128 1129 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc) 1130 { 1131 if (crtc->overlay) 1132 (void) intel_overlay_switch_off(crtc->overlay); 1133 1134 /* Let userspace switch the overlay on again. In most cases userspace 1135 * has to recompute where to put it anyway. 1136 */ 1137 } 1138 1139 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) 1140 { 1141 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1142 1143 if (!crtc_state->nv12_planes) 1144 return false; 1145 1146 /* WA Display #0827: Gen9:all */ 1147 if (DISPLAY_VER(dev_priv) == 9) 1148 return true; 1149 1150 return false; 1151 } 1152 1153 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) 1154 { 1155 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1156 1157 /* Wa_2006604312:icl,ehl */ 1158 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11) 1159 return true; 1160 1161 return false; 1162 } 1163 1164 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state) 1165 { 1166 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1167 1168 /* Wa_1604331009:icl,jsl,ehl */ 1169 if (is_hdr_mode(crtc_state) && 1170 crtc_state->active_planes & BIT(PLANE_CURSOR) && 1171 DISPLAY_VER(dev_priv) == 11) 1172 return true; 1173 1174 return false; 1175 } 1176 1177 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915, 1178 enum pipe pipe, bool enable) 1179 { 1180 if (DISPLAY_VER(i915) == 9) { 1181 /* 1182 * "Plane N strech max must be programmed to 11b (x1) 1183 * when Async flips are enabled on that plane." 1184 */ 1185 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), 1186 SKL_PLANE1_STRETCH_MAX_MASK, 1187 enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8); 1188 } else { 1189 /* Also needed on HSW/BDW albeit undocumented */ 1190 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), 1191 HSW_PRI_STRETCH_MAX_MASK, 1192 enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8); 1193 } 1194 } 1195 1196 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state) 1197 { 1198 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 1199 1200 return crtc_state->uapi.async_flip && intel_vtd_active(i915) && 1201 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915)); 1202 } 1203 1204 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, 1205 const struct intel_crtc_state *new_crtc_state) 1206 { 1207 return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) && 1208 new_crtc_state->active_planes; 1209 } 1210 1211 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, 1212 const struct intel_crtc_state *new_crtc_state) 1213 { 1214 return old_crtc_state->active_planes && 1215 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)); 1216 } 1217 1218 static void intel_post_plane_update(struct intel_atomic_state *state, 1219 struct intel_crtc *crtc) 1220 { 1221 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1222 const struct intel_crtc_state *old_crtc_state = 1223 intel_atomic_get_old_crtc_state(state, crtc); 1224 const struct intel_crtc_state *new_crtc_state = 1225 intel_atomic_get_new_crtc_state(state, crtc); 1226 enum pipe pipe = crtc->pipe; 1227 1228 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); 1229 1230 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) 1231 intel_update_watermarks(dev_priv); 1232 1233 hsw_ips_post_update(state, crtc); 1234 intel_fbc_post_update(state, crtc); 1235 intel_drrs_page_flip(state, crtc); 1236 1237 if (needs_async_flip_vtd_wa(old_crtc_state) && 1238 !needs_async_flip_vtd_wa(new_crtc_state)) 1239 intel_async_flip_vtd_wa(dev_priv, pipe, false); 1240 1241 if (needs_nv12_wa(old_crtc_state) && 1242 !needs_nv12_wa(new_crtc_state)) 1243 skl_wa_827(dev_priv, pipe, false); 1244 1245 if (needs_scalerclk_wa(old_crtc_state) && 1246 !needs_scalerclk_wa(new_crtc_state)) 1247 icl_wa_scalerclkgating(dev_priv, pipe, false); 1248 1249 if (needs_cursorclk_wa(old_crtc_state) && 1250 !needs_cursorclk_wa(new_crtc_state)) 1251 icl_wa_cursorclkgating(dev_priv, pipe, false); 1252 1253 } 1254 1255 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, 1256 struct intel_crtc *crtc) 1257 { 1258 const struct intel_crtc_state *crtc_state = 1259 intel_atomic_get_new_crtc_state(state, crtc); 1260 u8 update_planes = crtc_state->update_planes; 1261 const struct intel_plane_state *plane_state; 1262 struct intel_plane *plane; 1263 int i; 1264 1265 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1266 if (plane->pipe == crtc->pipe && 1267 update_planes & BIT(plane->id)) 1268 plane->enable_flip_done(plane); 1269 } 1270 } 1271 1272 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state, 1273 struct intel_crtc *crtc) 1274 { 1275 const struct intel_crtc_state *crtc_state = 1276 intel_atomic_get_new_crtc_state(state, crtc); 1277 u8 update_planes = crtc_state->update_planes; 1278 const struct intel_plane_state *plane_state; 1279 struct intel_plane *plane; 1280 int i; 1281 1282 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1283 if (plane->pipe == crtc->pipe && 1284 update_planes & BIT(plane->id)) 1285 plane->disable_flip_done(plane); 1286 } 1287 } 1288 1289 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, 1290 struct intel_crtc *crtc) 1291 { 1292 const struct intel_crtc_state *old_crtc_state = 1293 intel_atomic_get_old_crtc_state(state, crtc); 1294 const struct intel_crtc_state *new_crtc_state = 1295 intel_atomic_get_new_crtc_state(state, crtc); 1296 u8 update_planes = new_crtc_state->update_planes; 1297 const struct intel_plane_state *old_plane_state; 1298 struct intel_plane *plane; 1299 bool need_vbl_wait = false; 1300 int i; 1301 1302 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1303 if (plane->need_async_flip_disable_wa && 1304 plane->pipe == crtc->pipe && 1305 update_planes & BIT(plane->id)) { 1306 /* 1307 * Apart from the async flip bit we want to 1308 * preserve the old state for the plane. 1309 */ 1310 plane->async_flip(plane, old_crtc_state, 1311 old_plane_state, false); 1312 need_vbl_wait = true; 1313 } 1314 } 1315 1316 if (need_vbl_wait) 1317 intel_crtc_wait_for_next_vblank(crtc); 1318 } 1319 1320 static void intel_pre_plane_update(struct intel_atomic_state *state, 1321 struct intel_crtc *crtc) 1322 { 1323 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1324 const struct intel_crtc_state *old_crtc_state = 1325 intel_atomic_get_old_crtc_state(state, crtc); 1326 const struct intel_crtc_state *new_crtc_state = 1327 intel_atomic_get_new_crtc_state(state, crtc); 1328 enum pipe pipe = crtc->pipe; 1329 1330 intel_psr_pre_plane_update(state, crtc); 1331 1332 if (hsw_ips_pre_update(state, crtc)) 1333 intel_crtc_wait_for_next_vblank(crtc); 1334 1335 if (intel_fbc_pre_update(state, crtc)) 1336 intel_crtc_wait_for_next_vblank(crtc); 1337 1338 if (!needs_async_flip_vtd_wa(old_crtc_state) && 1339 needs_async_flip_vtd_wa(new_crtc_state)) 1340 intel_async_flip_vtd_wa(dev_priv, pipe, true); 1341 1342 /* Display WA 827 */ 1343 if (!needs_nv12_wa(old_crtc_state) && 1344 needs_nv12_wa(new_crtc_state)) 1345 skl_wa_827(dev_priv, pipe, true); 1346 1347 /* Wa_2006604312:icl,ehl */ 1348 if (!needs_scalerclk_wa(old_crtc_state) && 1349 needs_scalerclk_wa(new_crtc_state)) 1350 icl_wa_scalerclkgating(dev_priv, pipe, true); 1351 1352 /* Wa_1604331009:icl,jsl,ehl */ 1353 if (!needs_cursorclk_wa(old_crtc_state) && 1354 needs_cursorclk_wa(new_crtc_state)) 1355 icl_wa_cursorclkgating(dev_priv, pipe, true); 1356 1357 /* 1358 * Vblank time updates from the shadow to live plane control register 1359 * are blocked if the memory self-refresh mode is active at that 1360 * moment. So to make sure the plane gets truly disabled, disable 1361 * first the self-refresh mode. The self-refresh enable bit in turn 1362 * will be checked/applied by the HW only at the next frame start 1363 * event which is after the vblank start event, so we need to have a 1364 * wait-for-vblank between disabling the plane and the pipe. 1365 */ 1366 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && 1367 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 1368 intel_crtc_wait_for_next_vblank(crtc); 1369 1370 /* 1371 * IVB workaround: must disable low power watermarks for at least 1372 * one frame before enabling scaling. LP watermarks can be re-enabled 1373 * when scaling is disabled. 1374 * 1375 * WaCxSRDisabledForSpriteScaling:ivb 1376 */ 1377 if (old_crtc_state->hw.active && 1378 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv)) 1379 intel_crtc_wait_for_next_vblank(crtc); 1380 1381 /* 1382 * If we're doing a modeset we don't need to do any 1383 * pre-vblank watermark programming here. 1384 */ 1385 if (!intel_crtc_needs_modeset(new_crtc_state)) { 1386 /* 1387 * For platforms that support atomic watermarks, program the 1388 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 1389 * will be the intermediate values that are safe for both pre- and 1390 * post- vblank; when vblank happens, the 'active' values will be set 1391 * to the final 'target' values and we'll do this again to get the 1392 * optimal watermarks. For gen9+ platforms, the values we program here 1393 * will be the final target values which will get automatically latched 1394 * at vblank time; no further programming will be necessary. 1395 * 1396 * If a platform hasn't been transitioned to atomic watermarks yet, 1397 * we'll continue to update watermarks the old way, if flags tell 1398 * us to. 1399 */ 1400 if (!intel_initial_watermarks(state, crtc)) 1401 if (new_crtc_state->update_wm_pre) 1402 intel_update_watermarks(dev_priv); 1403 } 1404 1405 /* 1406 * Gen2 reports pipe underruns whenever all planes are disabled. 1407 * So disable underrun reporting before all the planes get disabled. 1408 * 1409 * We do this after .initial_watermarks() so that we have a 1410 * chance of catching underruns with the intermediate watermarks 1411 * vs. the old plane configuration. 1412 */ 1413 if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state)) 1414 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1415 1416 /* 1417 * WA for platforms where async address update enable bit 1418 * is double buffered and only latched at start of vblank. 1419 */ 1420 if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip) 1421 intel_crtc_async_flip_disable_wa(state, crtc); 1422 } 1423 1424 static void intel_crtc_disable_planes(struct intel_atomic_state *state, 1425 struct intel_crtc *crtc) 1426 { 1427 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1428 const struct intel_crtc_state *new_crtc_state = 1429 intel_atomic_get_new_crtc_state(state, crtc); 1430 unsigned int update_mask = new_crtc_state->update_planes; 1431 const struct intel_plane_state *old_plane_state; 1432 struct intel_plane *plane; 1433 unsigned fb_bits = 0; 1434 int i; 1435 1436 intel_crtc_dpms_overlay_disable(crtc); 1437 1438 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1439 if (crtc->pipe != plane->pipe || 1440 !(update_mask & BIT(plane->id))) 1441 continue; 1442 1443 intel_plane_disable_arm(plane, new_crtc_state); 1444 1445 if (old_plane_state->uapi.visible) 1446 fb_bits |= plane->frontbuffer_bit; 1447 } 1448 1449 intel_frontbuffer_flip(dev_priv, fb_bits); 1450 } 1451 1452 /* 1453 * intel_connector_primary_encoder - get the primary encoder for a connector 1454 * @connector: connector for which to return the encoder 1455 * 1456 * Returns the primary encoder for a connector. There is a 1:1 mapping from 1457 * all connectors to their encoder, except for DP-MST connectors which have 1458 * both a virtual and a primary encoder. These DP-MST primary encoders can be 1459 * pointed to by as many DP-MST connectors as there are pipes. 1460 */ 1461 static struct intel_encoder * 1462 intel_connector_primary_encoder(struct intel_connector *connector) 1463 { 1464 struct intel_encoder *encoder; 1465 1466 if (connector->mst_port) 1467 return &dp_to_dig_port(connector->mst_port)->base; 1468 1469 encoder = intel_attached_encoder(connector); 1470 drm_WARN_ON(connector->base.dev, !encoder); 1471 1472 return encoder; 1473 } 1474 1475 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 1476 { 1477 struct drm_i915_private *i915 = to_i915(state->base.dev); 1478 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 1479 struct intel_crtc *crtc; 1480 struct drm_connector_state *new_conn_state; 1481 struct drm_connector *connector; 1482 int i; 1483 1484 /* 1485 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits. 1486 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook. 1487 */ 1488 if (i915->dpll.mgr) { 1489 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1490 if (intel_crtc_needs_modeset(new_crtc_state)) 1491 continue; 1492 1493 new_crtc_state->shared_dpll = old_crtc_state->shared_dpll; 1494 new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state; 1495 } 1496 } 1497 1498 if (!state->modeset) 1499 return; 1500 1501 for_each_new_connector_in_state(&state->base, connector, new_conn_state, 1502 i) { 1503 struct intel_connector *intel_connector; 1504 struct intel_encoder *encoder; 1505 struct intel_crtc *crtc; 1506 1507 if (!intel_connector_needs_modeset(state, connector)) 1508 continue; 1509 1510 intel_connector = to_intel_connector(connector); 1511 encoder = intel_connector_primary_encoder(intel_connector); 1512 if (!encoder->update_prepare) 1513 continue; 1514 1515 crtc = new_conn_state->crtc ? 1516 to_intel_crtc(new_conn_state->crtc) : NULL; 1517 encoder->update_prepare(state, encoder, crtc); 1518 } 1519 } 1520 1521 static void intel_encoders_update_complete(struct intel_atomic_state *state) 1522 { 1523 struct drm_connector_state *new_conn_state; 1524 struct drm_connector *connector; 1525 int i; 1526 1527 if (!state->modeset) 1528 return; 1529 1530 for_each_new_connector_in_state(&state->base, connector, new_conn_state, 1531 i) { 1532 struct intel_connector *intel_connector; 1533 struct intel_encoder *encoder; 1534 struct intel_crtc *crtc; 1535 1536 if (!intel_connector_needs_modeset(state, connector)) 1537 continue; 1538 1539 intel_connector = to_intel_connector(connector); 1540 encoder = intel_connector_primary_encoder(intel_connector); 1541 if (!encoder->update_complete) 1542 continue; 1543 1544 crtc = new_conn_state->crtc ? 1545 to_intel_crtc(new_conn_state->crtc) : NULL; 1546 encoder->update_complete(state, encoder, crtc); 1547 } 1548 } 1549 1550 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, 1551 struct intel_crtc *crtc) 1552 { 1553 const struct intel_crtc_state *crtc_state = 1554 intel_atomic_get_new_crtc_state(state, crtc); 1555 const struct drm_connector_state *conn_state; 1556 struct drm_connector *conn; 1557 int i; 1558 1559 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1560 struct intel_encoder *encoder = 1561 to_intel_encoder(conn_state->best_encoder); 1562 1563 if (conn_state->crtc != &crtc->base) 1564 continue; 1565 1566 if (encoder->pre_pll_enable) 1567 encoder->pre_pll_enable(state, encoder, 1568 crtc_state, conn_state); 1569 } 1570 } 1571 1572 static void intel_encoders_pre_enable(struct intel_atomic_state *state, 1573 struct intel_crtc *crtc) 1574 { 1575 const struct intel_crtc_state *crtc_state = 1576 intel_atomic_get_new_crtc_state(state, crtc); 1577 const struct drm_connector_state *conn_state; 1578 struct drm_connector *conn; 1579 int i; 1580 1581 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1582 struct intel_encoder *encoder = 1583 to_intel_encoder(conn_state->best_encoder); 1584 1585 if (conn_state->crtc != &crtc->base) 1586 continue; 1587 1588 if (encoder->pre_enable) 1589 encoder->pre_enable(state, encoder, 1590 crtc_state, conn_state); 1591 } 1592 } 1593 1594 static void intel_encoders_enable(struct intel_atomic_state *state, 1595 struct intel_crtc *crtc) 1596 { 1597 const struct intel_crtc_state *crtc_state = 1598 intel_atomic_get_new_crtc_state(state, crtc); 1599 const struct drm_connector_state *conn_state; 1600 struct drm_connector *conn; 1601 int i; 1602 1603 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1604 struct intel_encoder *encoder = 1605 to_intel_encoder(conn_state->best_encoder); 1606 1607 if (conn_state->crtc != &crtc->base) 1608 continue; 1609 1610 if (encoder->enable) 1611 encoder->enable(state, encoder, 1612 crtc_state, conn_state); 1613 intel_opregion_notify_encoder(encoder, true); 1614 } 1615 } 1616 1617 static void intel_encoders_disable(struct intel_atomic_state *state, 1618 struct intel_crtc *crtc) 1619 { 1620 const struct intel_crtc_state *old_crtc_state = 1621 intel_atomic_get_old_crtc_state(state, crtc); 1622 const struct drm_connector_state *old_conn_state; 1623 struct drm_connector *conn; 1624 int i; 1625 1626 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1627 struct intel_encoder *encoder = 1628 to_intel_encoder(old_conn_state->best_encoder); 1629 1630 if (old_conn_state->crtc != &crtc->base) 1631 continue; 1632 1633 intel_opregion_notify_encoder(encoder, false); 1634 if (encoder->disable) 1635 encoder->disable(state, encoder, 1636 old_crtc_state, old_conn_state); 1637 } 1638 } 1639 1640 static void intel_encoders_post_disable(struct intel_atomic_state *state, 1641 struct intel_crtc *crtc) 1642 { 1643 const struct intel_crtc_state *old_crtc_state = 1644 intel_atomic_get_old_crtc_state(state, crtc); 1645 const struct drm_connector_state *old_conn_state; 1646 struct drm_connector *conn; 1647 int i; 1648 1649 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1650 struct intel_encoder *encoder = 1651 to_intel_encoder(old_conn_state->best_encoder); 1652 1653 if (old_conn_state->crtc != &crtc->base) 1654 continue; 1655 1656 if (encoder->post_disable) 1657 encoder->post_disable(state, encoder, 1658 old_crtc_state, old_conn_state); 1659 } 1660 } 1661 1662 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, 1663 struct intel_crtc *crtc) 1664 { 1665 const struct intel_crtc_state *old_crtc_state = 1666 intel_atomic_get_old_crtc_state(state, crtc); 1667 const struct drm_connector_state *old_conn_state; 1668 struct drm_connector *conn; 1669 int i; 1670 1671 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1672 struct intel_encoder *encoder = 1673 to_intel_encoder(old_conn_state->best_encoder); 1674 1675 if (old_conn_state->crtc != &crtc->base) 1676 continue; 1677 1678 if (encoder->post_pll_disable) 1679 encoder->post_pll_disable(state, encoder, 1680 old_crtc_state, old_conn_state); 1681 } 1682 } 1683 1684 static void intel_encoders_update_pipe(struct intel_atomic_state *state, 1685 struct intel_crtc *crtc) 1686 { 1687 const struct intel_crtc_state *crtc_state = 1688 intel_atomic_get_new_crtc_state(state, crtc); 1689 const struct drm_connector_state *conn_state; 1690 struct drm_connector *conn; 1691 int i; 1692 1693 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1694 struct intel_encoder *encoder = 1695 to_intel_encoder(conn_state->best_encoder); 1696 1697 if (conn_state->crtc != &crtc->base) 1698 continue; 1699 1700 if (encoder->update_pipe) 1701 encoder->update_pipe(state, encoder, 1702 crtc_state, conn_state); 1703 } 1704 } 1705 1706 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) 1707 { 1708 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1709 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 1710 1711 plane->disable_arm(plane, crtc_state); 1712 } 1713 1714 static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1715 { 1716 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1717 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1718 1719 if (crtc_state->has_pch_encoder) { 1720 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1721 &crtc_state->fdi_m_n); 1722 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1723 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1724 &crtc_state->dp_m_n); 1725 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 1726 &crtc_state->dp_m2_n2); 1727 } 1728 1729 intel_set_transcoder_timings(crtc_state); 1730 1731 ilk_set_pipeconf(crtc_state); 1732 } 1733 1734 static void ilk_crtc_enable(struct intel_atomic_state *state, 1735 struct intel_crtc *crtc) 1736 { 1737 const struct intel_crtc_state *new_crtc_state = 1738 intel_atomic_get_new_crtc_state(state, crtc); 1739 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1740 enum pipe pipe = crtc->pipe; 1741 1742 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 1743 return; 1744 1745 /* 1746 * Sometimes spurious CPU pipe underruns happen during FDI 1747 * training, at least with VGA+HDMI cloning. Suppress them. 1748 * 1749 * On ILK we get an occasional spurious CPU pipe underruns 1750 * between eDP port A enable and vdd enable. Also PCH port 1751 * enable seems to result in the occasional CPU pipe underrun. 1752 * 1753 * Spurious PCH underruns also occur during PCH enabling. 1754 */ 1755 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1756 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 1757 1758 ilk_configure_cpu_transcoder(new_crtc_state); 1759 1760 intel_set_pipe_src_size(new_crtc_state); 1761 1762 crtc->active = true; 1763 1764 intel_encoders_pre_enable(state, crtc); 1765 1766 if (new_crtc_state->has_pch_encoder) { 1767 ilk_pch_pre_enable(state, crtc); 1768 } else { 1769 assert_fdi_tx_disabled(dev_priv, pipe); 1770 assert_fdi_rx_disabled(dev_priv, pipe); 1771 } 1772 1773 ilk_pfit_enable(new_crtc_state); 1774 1775 /* 1776 * On ILK+ LUT must be loaded before the pipe is running but with 1777 * clocks enabled 1778 */ 1779 intel_color_load_luts(new_crtc_state); 1780 intel_color_commit(new_crtc_state); 1781 /* update DSPCNTR to configure gamma for pipe bottom color */ 1782 intel_disable_primary_plane(new_crtc_state); 1783 1784 intel_initial_watermarks(state, crtc); 1785 intel_enable_transcoder(new_crtc_state); 1786 1787 if (new_crtc_state->has_pch_encoder) 1788 ilk_pch_enable(state, crtc); 1789 1790 intel_crtc_vblank_on(new_crtc_state); 1791 1792 intel_encoders_enable(state, crtc); 1793 1794 if (HAS_PCH_CPT(dev_priv)) 1795 cpt_verify_modeset(dev_priv, pipe); 1796 1797 /* 1798 * Must wait for vblank to avoid spurious PCH FIFO underruns. 1799 * And a second vblank wait is needed at least on ILK with 1800 * some interlaced HDMI modes. Let's do the double wait always 1801 * in case there are more corner cases we don't know about. 1802 */ 1803 if (new_crtc_state->has_pch_encoder) { 1804 intel_crtc_wait_for_next_vblank(crtc); 1805 intel_crtc_wait_for_next_vblank(crtc); 1806 } 1807 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 1808 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 1809 } 1810 1811 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, 1812 enum pipe pipe, bool apply) 1813 { 1814 u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)); 1815 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 1816 1817 if (apply) 1818 val |= mask; 1819 else 1820 val &= ~mask; 1821 1822 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val); 1823 } 1824 1825 static void icl_pipe_mbus_enable(struct intel_crtc *crtc, bool joined_mbus) 1826 { 1827 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1828 enum pipe pipe = crtc->pipe; 1829 u32 val; 1830 1831 /* Wa_22010947358:adl-p */ 1832 if (IS_ALDERLAKE_P(dev_priv)) 1833 val = joined_mbus ? MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4); 1834 else 1835 val = MBUS_DBOX_A_CREDIT(2); 1836 1837 if (DISPLAY_VER(dev_priv) >= 12) { 1838 val |= MBUS_DBOX_BW_CREDIT(2); 1839 val |= MBUS_DBOX_B_CREDIT(12); 1840 } else { 1841 val |= MBUS_DBOX_BW_CREDIT(1); 1842 val |= MBUS_DBOX_B_CREDIT(8); 1843 } 1844 1845 intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val); 1846 } 1847 1848 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state) 1849 { 1850 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1851 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1852 1853 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe), 1854 HSW_LINETIME(crtc_state->linetime) | 1855 HSW_IPS_LINETIME(crtc_state->ips_linetime)); 1856 } 1857 1858 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) 1859 { 1860 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1861 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1862 i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder); 1863 u32 val; 1864 1865 val = intel_de_read(dev_priv, reg); 1866 val &= ~HSW_FRAME_START_DELAY_MASK; 1867 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 1868 intel_de_write(dev_priv, reg, val); 1869 } 1870 1871 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, 1872 const struct intel_crtc_state *crtc_state) 1873 { 1874 struct intel_crtc *master_crtc = intel_master_crtc(crtc_state); 1875 1876 /* 1877 * Enable sequence steps 1-7 on bigjoiner master 1878 */ 1879 if (intel_crtc_is_bigjoiner_slave(crtc_state)) 1880 intel_encoders_pre_pll_enable(state, master_crtc); 1881 1882 if (crtc_state->shared_dpll) 1883 intel_enable_shared_dpll(crtc_state); 1884 1885 if (intel_crtc_is_bigjoiner_slave(crtc_state)) 1886 intel_encoders_pre_enable(state, master_crtc); 1887 } 1888 1889 static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1890 { 1891 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1892 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1893 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1894 1895 if (crtc_state->has_pch_encoder) { 1896 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1897 &crtc_state->fdi_m_n); 1898 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1899 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1900 &crtc_state->dp_m_n); 1901 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 1902 &crtc_state->dp_m2_n2); 1903 } 1904 1905 intel_set_transcoder_timings(crtc_state); 1906 1907 if (cpu_transcoder != TRANSCODER_EDP) 1908 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder), 1909 crtc_state->pixel_multiplier - 1); 1910 1911 hsw_set_frame_start_delay(crtc_state); 1912 1913 hsw_set_transconf(crtc_state); 1914 } 1915 1916 static void hsw_crtc_enable(struct intel_atomic_state *state, 1917 struct intel_crtc *crtc) 1918 { 1919 const struct intel_crtc_state *new_crtc_state = 1920 intel_atomic_get_new_crtc_state(state, crtc); 1921 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1922 enum pipe pipe = crtc->pipe, hsw_workaround_pipe; 1923 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 1924 bool psl_clkgate_wa; 1925 1926 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 1927 return; 1928 1929 if (!new_crtc_state->bigjoiner) { 1930 intel_encoders_pre_pll_enable(state, crtc); 1931 1932 if (new_crtc_state->shared_dpll) 1933 intel_enable_shared_dpll(new_crtc_state); 1934 1935 intel_encoders_pre_enable(state, crtc); 1936 } else { 1937 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state); 1938 } 1939 1940 intel_dsc_enable(new_crtc_state); 1941 1942 if (DISPLAY_VER(dev_priv) >= 13) 1943 intel_uncompressed_joiner_enable(new_crtc_state); 1944 1945 intel_set_pipe_src_size(new_crtc_state); 1946 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 1947 bdw_set_pipemisc(new_crtc_state); 1948 1949 if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) && 1950 !transcoder_is_dsi(cpu_transcoder)) 1951 hsw_configure_cpu_transcoder(new_crtc_state); 1952 1953 crtc->active = true; 1954 1955 /* Display WA #1180: WaDisableScalarClockGating: glk */ 1956 psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 && 1957 new_crtc_state->pch_pfit.enabled; 1958 if (psl_clkgate_wa) 1959 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); 1960 1961 if (DISPLAY_VER(dev_priv) >= 9) 1962 skl_pfit_enable(new_crtc_state); 1963 else 1964 ilk_pfit_enable(new_crtc_state); 1965 1966 /* 1967 * On ILK+ LUT must be loaded before the pipe is running but with 1968 * clocks enabled 1969 */ 1970 intel_color_load_luts(new_crtc_state); 1971 intel_color_commit(new_crtc_state); 1972 /* update DSPCNTR to configure gamma/csc for pipe bottom color */ 1973 if (DISPLAY_VER(dev_priv) < 9) 1974 intel_disable_primary_plane(new_crtc_state); 1975 1976 hsw_set_linetime_wm(new_crtc_state); 1977 1978 if (DISPLAY_VER(dev_priv) >= 11) 1979 icl_set_pipe_chicken(new_crtc_state); 1980 1981 intel_initial_watermarks(state, crtc); 1982 1983 if (DISPLAY_VER(dev_priv) >= 11) { 1984 const struct intel_dbuf_state *dbuf_state = 1985 intel_atomic_get_new_dbuf_state(state); 1986 1987 icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus); 1988 } 1989 1990 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) 1991 intel_crtc_vblank_on(new_crtc_state); 1992 1993 intel_encoders_enable(state, crtc); 1994 1995 if (psl_clkgate_wa) { 1996 intel_crtc_wait_for_next_vblank(crtc); 1997 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); 1998 } 1999 2000 /* If we change the relative order between pipe/planes enabling, we need 2001 * to change the workaround. */ 2002 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe; 2003 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 2004 struct intel_crtc *wa_crtc; 2005 2006 wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe); 2007 2008 intel_crtc_wait_for_next_vblank(wa_crtc); 2009 intel_crtc_wait_for_next_vblank(wa_crtc); 2010 } 2011 } 2012 2013 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state) 2014 { 2015 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 2016 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2017 enum pipe pipe = crtc->pipe; 2018 2019 /* To avoid upsetting the power well on haswell only disable the pfit if 2020 * it's in use. The hw state code will make sure we get this right. */ 2021 if (!old_crtc_state->pch_pfit.enabled) 2022 return; 2023 2024 intel_de_write(dev_priv, PF_CTL(pipe), 0); 2025 intel_de_write(dev_priv, PF_WIN_POS(pipe), 0); 2026 intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0); 2027 } 2028 2029 static void ilk_crtc_disable(struct intel_atomic_state *state, 2030 struct intel_crtc *crtc) 2031 { 2032 const struct intel_crtc_state *old_crtc_state = 2033 intel_atomic_get_old_crtc_state(state, crtc); 2034 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2035 enum pipe pipe = crtc->pipe; 2036 2037 /* 2038 * Sometimes spurious CPU pipe underruns happen when the 2039 * pipe is already disabled, but FDI RX/TX is still enabled. 2040 * Happens at least with VGA+HDMI cloning. Suppress them. 2041 */ 2042 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 2043 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 2044 2045 intel_encoders_disable(state, crtc); 2046 2047 intel_crtc_vblank_off(old_crtc_state); 2048 2049 intel_disable_transcoder(old_crtc_state); 2050 2051 ilk_pfit_disable(old_crtc_state); 2052 2053 if (old_crtc_state->has_pch_encoder) 2054 ilk_pch_disable(state, crtc); 2055 2056 intel_encoders_post_disable(state, crtc); 2057 2058 if (old_crtc_state->has_pch_encoder) 2059 ilk_pch_post_disable(state, crtc); 2060 2061 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 2062 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 2063 } 2064 2065 static void hsw_crtc_disable(struct intel_atomic_state *state, 2066 struct intel_crtc *crtc) 2067 { 2068 const struct intel_crtc_state *old_crtc_state = 2069 intel_atomic_get_old_crtc_state(state, crtc); 2070 2071 /* 2072 * FIXME collapse everything to one hook. 2073 * Need care with mst->ddi interactions. 2074 */ 2075 if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) { 2076 intel_encoders_disable(state, crtc); 2077 intel_encoders_post_disable(state, crtc); 2078 } 2079 } 2080 2081 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) 2082 { 2083 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2084 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2085 2086 if (!crtc_state->gmch_pfit.control) 2087 return; 2088 2089 /* 2090 * The panel fitter should only be adjusted whilst the pipe is disabled, 2091 * according to register description and PRM. 2092 */ 2093 drm_WARN_ON(&dev_priv->drm, 2094 intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE); 2095 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); 2096 2097 intel_de_write(dev_priv, PFIT_PGM_RATIOS, 2098 crtc_state->gmch_pfit.pgm_ratios); 2099 intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control); 2100 2101 /* Border color in case we don't scale up to the full screen. Black by 2102 * default, change to something else for debugging. */ 2103 intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0); 2104 } 2105 2106 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) 2107 { 2108 if (phy == PHY_NONE) 2109 return false; 2110 else if (IS_DG2(dev_priv)) 2111 /* 2112 * DG2 outputs labelled as "combo PHY" in the bspec use 2113 * SNPS PHYs with completely different programming, 2114 * hence we always return false here. 2115 */ 2116 return false; 2117 else if (IS_ALDERLAKE_S(dev_priv)) 2118 return phy <= PHY_E; 2119 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) 2120 return phy <= PHY_D; 2121 else if (IS_JSL_EHL(dev_priv)) 2122 return phy <= PHY_C; 2123 else if (DISPLAY_VER(dev_priv) >= 11) 2124 return phy <= PHY_B; 2125 else 2126 return false; 2127 } 2128 2129 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) 2130 { 2131 if (IS_DG2(dev_priv)) 2132 /* DG2's "TC1" output uses a SNPS PHY */ 2133 return false; 2134 else if (IS_ALDERLAKE_P(dev_priv)) 2135 return phy >= PHY_F && phy <= PHY_I; 2136 else if (IS_TIGERLAKE(dev_priv)) 2137 return phy >= PHY_D && phy <= PHY_I; 2138 else if (IS_ICELAKE(dev_priv)) 2139 return phy >= PHY_C && phy <= PHY_F; 2140 else 2141 return false; 2142 } 2143 2144 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy) 2145 { 2146 if (phy == PHY_NONE) 2147 return false; 2148 else if (IS_DG2(dev_priv)) 2149 /* 2150 * All four "combo" ports and the TC1 port (PHY E) use 2151 * Synopsis PHYs. 2152 */ 2153 return phy <= PHY_E; 2154 2155 return false; 2156 } 2157 2158 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) 2159 { 2160 if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD) 2161 return PHY_D + port - PORT_D_XELPD; 2162 else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1) 2163 return PHY_F + port - PORT_TC1; 2164 else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1) 2165 return PHY_B + port - PORT_TC1; 2166 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1) 2167 return PHY_C + port - PORT_TC1; 2168 else if (IS_JSL_EHL(i915) && port == PORT_D) 2169 return PHY_A; 2170 2171 return PHY_A + port - PORT_A; 2172 } 2173 2174 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) 2175 { 2176 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) 2177 return TC_PORT_NONE; 2178 2179 if (DISPLAY_VER(dev_priv) >= 12) 2180 return TC_PORT_1 + port - PORT_TC1; 2181 else 2182 return TC_PORT_1 + port - PORT_C; 2183 } 2184 2185 enum intel_display_power_domain intel_port_to_power_domain(enum port port) 2186 { 2187 switch (port) { 2188 case PORT_A: 2189 return POWER_DOMAIN_PORT_DDI_A_LANES; 2190 case PORT_B: 2191 return POWER_DOMAIN_PORT_DDI_B_LANES; 2192 case PORT_C: 2193 return POWER_DOMAIN_PORT_DDI_C_LANES; 2194 case PORT_D: 2195 return POWER_DOMAIN_PORT_DDI_D_LANES; 2196 case PORT_E: 2197 return POWER_DOMAIN_PORT_DDI_E_LANES; 2198 case PORT_F: 2199 return POWER_DOMAIN_PORT_DDI_F_LANES; 2200 case PORT_G: 2201 return POWER_DOMAIN_PORT_DDI_G_LANES; 2202 case PORT_H: 2203 return POWER_DOMAIN_PORT_DDI_H_LANES; 2204 case PORT_I: 2205 return POWER_DOMAIN_PORT_DDI_I_LANES; 2206 default: 2207 MISSING_CASE(port); 2208 return POWER_DOMAIN_PORT_OTHER; 2209 } 2210 } 2211 2212 enum intel_display_power_domain 2213 intel_aux_power_domain(struct intel_digital_port *dig_port) 2214 { 2215 if (intel_tc_port_in_tbt_alt_mode(dig_port)) { 2216 switch (dig_port->aux_ch) { 2217 case AUX_CH_C: 2218 return POWER_DOMAIN_AUX_C_TBT; 2219 case AUX_CH_D: 2220 return POWER_DOMAIN_AUX_D_TBT; 2221 case AUX_CH_E: 2222 return POWER_DOMAIN_AUX_E_TBT; 2223 case AUX_CH_F: 2224 return POWER_DOMAIN_AUX_F_TBT; 2225 case AUX_CH_G: 2226 return POWER_DOMAIN_AUX_G_TBT; 2227 case AUX_CH_H: 2228 return POWER_DOMAIN_AUX_H_TBT; 2229 case AUX_CH_I: 2230 return POWER_DOMAIN_AUX_I_TBT; 2231 default: 2232 MISSING_CASE(dig_port->aux_ch); 2233 return POWER_DOMAIN_AUX_C_TBT; 2234 } 2235 } 2236 2237 return intel_legacy_aux_to_power_domain(dig_port->aux_ch); 2238 } 2239 2240 /* 2241 * Converts aux_ch to power_domain without caring about TBT ports for that use 2242 * intel_aux_power_domain() 2243 */ 2244 enum intel_display_power_domain 2245 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch) 2246 { 2247 switch (aux_ch) { 2248 case AUX_CH_A: 2249 return POWER_DOMAIN_AUX_A; 2250 case AUX_CH_B: 2251 return POWER_DOMAIN_AUX_B; 2252 case AUX_CH_C: 2253 return POWER_DOMAIN_AUX_C; 2254 case AUX_CH_D: 2255 return POWER_DOMAIN_AUX_D; 2256 case AUX_CH_E: 2257 return POWER_DOMAIN_AUX_E; 2258 case AUX_CH_F: 2259 return POWER_DOMAIN_AUX_F; 2260 case AUX_CH_G: 2261 return POWER_DOMAIN_AUX_G; 2262 case AUX_CH_H: 2263 return POWER_DOMAIN_AUX_H; 2264 case AUX_CH_I: 2265 return POWER_DOMAIN_AUX_I; 2266 default: 2267 MISSING_CASE(aux_ch); 2268 return POWER_DOMAIN_AUX_A; 2269 } 2270 } 2271 2272 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) 2273 { 2274 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2275 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2276 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2277 struct drm_encoder *encoder; 2278 enum pipe pipe = crtc->pipe; 2279 u64 mask; 2280 2281 if (!crtc_state->hw.active) 2282 return 0; 2283 2284 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe)); 2285 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 2286 if (crtc_state->pch_pfit.enabled || 2287 crtc_state->pch_pfit.force_thru) 2288 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 2289 2290 drm_for_each_encoder_mask(encoder, &dev_priv->drm, 2291 crtc_state->uapi.encoder_mask) { 2292 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 2293 2294 mask |= BIT_ULL(intel_encoder->power_domain); 2295 } 2296 2297 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 2298 mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO); 2299 2300 if (crtc_state->shared_dpll) 2301 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE); 2302 2303 if (crtc_state->dsc.compression_enable) 2304 mask |= BIT_ULL(intel_dsc_power_domain(crtc, cpu_transcoder)); 2305 2306 return mask; 2307 } 2308 2309 static u64 2310 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state) 2311 { 2312 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2313 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2314 enum intel_display_power_domain domain; 2315 u64 domains, new_domains, old_domains; 2316 2317 domains = get_crtc_power_domains(crtc_state); 2318 2319 new_domains = domains & ~crtc->enabled_power_domains.mask; 2320 old_domains = crtc->enabled_power_domains.mask & ~domains; 2321 2322 for_each_power_domain(domain, new_domains) 2323 intel_display_power_get_in_set(dev_priv, 2324 &crtc->enabled_power_domains, 2325 domain); 2326 2327 return old_domains; 2328 } 2329 2330 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc, 2331 u64 domains) 2332 { 2333 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev), 2334 &crtc->enabled_power_domains, 2335 domains); 2336 } 2337 2338 static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 2339 { 2340 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2341 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2342 2343 if (intel_crtc_has_dp_encoder(crtc_state)) { 2344 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 2345 &crtc_state->dp_m_n); 2346 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 2347 &crtc_state->dp_m2_n2); 2348 } 2349 2350 intel_set_transcoder_timings(crtc_state); 2351 2352 i9xx_set_pipeconf(crtc_state); 2353 } 2354 2355 static void valleyview_crtc_enable(struct intel_atomic_state *state, 2356 struct intel_crtc *crtc) 2357 { 2358 const struct intel_crtc_state *new_crtc_state = 2359 intel_atomic_get_new_crtc_state(state, crtc); 2360 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2361 enum pipe pipe = crtc->pipe; 2362 2363 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 2364 return; 2365 2366 i9xx_configure_cpu_transcoder(new_crtc_state); 2367 2368 intel_set_pipe_src_size(new_crtc_state); 2369 2370 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 2371 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY); 2372 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0); 2373 } 2374 2375 crtc->active = true; 2376 2377 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 2378 2379 intel_encoders_pre_pll_enable(state, crtc); 2380 2381 if (IS_CHERRYVIEW(dev_priv)) 2382 chv_enable_pll(new_crtc_state); 2383 else 2384 vlv_enable_pll(new_crtc_state); 2385 2386 intel_encoders_pre_enable(state, crtc); 2387 2388 i9xx_pfit_enable(new_crtc_state); 2389 2390 intel_color_load_luts(new_crtc_state); 2391 intel_color_commit(new_crtc_state); 2392 /* update DSPCNTR to configure gamma for pipe bottom color */ 2393 intel_disable_primary_plane(new_crtc_state); 2394 2395 intel_initial_watermarks(state, crtc); 2396 intel_enable_transcoder(new_crtc_state); 2397 2398 intel_crtc_vblank_on(new_crtc_state); 2399 2400 intel_encoders_enable(state, crtc); 2401 } 2402 2403 static void i9xx_crtc_enable(struct intel_atomic_state *state, 2404 struct intel_crtc *crtc) 2405 { 2406 const struct intel_crtc_state *new_crtc_state = 2407 intel_atomic_get_new_crtc_state(state, crtc); 2408 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2409 enum pipe pipe = crtc->pipe; 2410 2411 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 2412 return; 2413 2414 i9xx_configure_cpu_transcoder(new_crtc_state); 2415 2416 intel_set_pipe_src_size(new_crtc_state); 2417 2418 crtc->active = true; 2419 2420 if (DISPLAY_VER(dev_priv) != 2) 2421 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 2422 2423 intel_encoders_pre_enable(state, crtc); 2424 2425 i9xx_enable_pll(new_crtc_state); 2426 2427 i9xx_pfit_enable(new_crtc_state); 2428 2429 intel_color_load_luts(new_crtc_state); 2430 intel_color_commit(new_crtc_state); 2431 /* update DSPCNTR to configure gamma for pipe bottom color */ 2432 intel_disable_primary_plane(new_crtc_state); 2433 2434 if (!intel_initial_watermarks(state, crtc)) 2435 intel_update_watermarks(dev_priv); 2436 intel_enable_transcoder(new_crtc_state); 2437 2438 intel_crtc_vblank_on(new_crtc_state); 2439 2440 intel_encoders_enable(state, crtc); 2441 2442 /* prevents spurious underruns */ 2443 if (DISPLAY_VER(dev_priv) == 2) 2444 intel_crtc_wait_for_next_vblank(crtc); 2445 } 2446 2447 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) 2448 { 2449 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 2450 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2451 2452 if (!old_crtc_state->gmch_pfit.control) 2453 return; 2454 2455 assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder); 2456 2457 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n", 2458 intel_de_read(dev_priv, PFIT_CONTROL)); 2459 intel_de_write(dev_priv, PFIT_CONTROL, 0); 2460 } 2461 2462 static void i9xx_crtc_disable(struct intel_atomic_state *state, 2463 struct intel_crtc *crtc) 2464 { 2465 struct intel_crtc_state *old_crtc_state = 2466 intel_atomic_get_old_crtc_state(state, crtc); 2467 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2468 enum pipe pipe = crtc->pipe; 2469 2470 /* 2471 * On gen2 planes are double buffered but the pipe isn't, so we must 2472 * wait for planes to fully turn off before disabling the pipe. 2473 */ 2474 if (DISPLAY_VER(dev_priv) == 2) 2475 intel_crtc_wait_for_next_vblank(crtc); 2476 2477 intel_encoders_disable(state, crtc); 2478 2479 intel_crtc_vblank_off(old_crtc_state); 2480 2481 intel_disable_transcoder(old_crtc_state); 2482 2483 i9xx_pfit_disable(old_crtc_state); 2484 2485 intel_encoders_post_disable(state, crtc); 2486 2487 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 2488 if (IS_CHERRYVIEW(dev_priv)) 2489 chv_disable_pll(dev_priv, pipe); 2490 else if (IS_VALLEYVIEW(dev_priv)) 2491 vlv_disable_pll(dev_priv, pipe); 2492 else 2493 i9xx_disable_pll(old_crtc_state); 2494 } 2495 2496 intel_encoders_post_pll_disable(state, crtc); 2497 2498 if (DISPLAY_VER(dev_priv) != 2) 2499 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 2500 2501 if (!dev_priv->wm_disp->initial_watermarks) 2502 intel_update_watermarks(dev_priv); 2503 2504 /* clock the pipe down to 640x480@60 to potentially save power */ 2505 if (IS_I830(dev_priv)) 2506 i830_enable_pipe(dev_priv, pipe); 2507 } 2508 2509 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, 2510 struct drm_modeset_acquire_ctx *ctx) 2511 { 2512 struct intel_encoder *encoder; 2513 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2514 struct intel_bw_state *bw_state = 2515 to_intel_bw_state(dev_priv->bw_obj.state); 2516 struct intel_cdclk_state *cdclk_state = 2517 to_intel_cdclk_state(dev_priv->cdclk.obj.state); 2518 struct intel_dbuf_state *dbuf_state = 2519 to_intel_dbuf_state(dev_priv->dbuf.obj.state); 2520 struct intel_crtc_state *crtc_state = 2521 to_intel_crtc_state(crtc->base.state); 2522 struct intel_plane *plane; 2523 struct drm_atomic_state *state; 2524 struct intel_crtc_state *temp_crtc_state; 2525 enum pipe pipe = crtc->pipe; 2526 int ret; 2527 2528 if (!crtc_state->hw.active) 2529 return; 2530 2531 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 2532 const struct intel_plane_state *plane_state = 2533 to_intel_plane_state(plane->base.state); 2534 2535 if (plane_state->uapi.visible) 2536 intel_plane_disable_noatomic(crtc, plane); 2537 } 2538 2539 state = drm_atomic_state_alloc(&dev_priv->drm); 2540 if (!state) { 2541 drm_dbg_kms(&dev_priv->drm, 2542 "failed to disable [CRTC:%d:%s], out of memory", 2543 crtc->base.base.id, crtc->base.name); 2544 return; 2545 } 2546 2547 state->acquire_ctx = ctx; 2548 2549 /* Everything's already locked, -EDEADLK can't happen. */ 2550 temp_crtc_state = intel_atomic_get_crtc_state(state, crtc); 2551 ret = drm_atomic_add_affected_connectors(state, &crtc->base); 2552 2553 drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret); 2554 2555 dev_priv->display->crtc_disable(to_intel_atomic_state(state), crtc); 2556 2557 drm_atomic_state_put(state); 2558 2559 drm_dbg_kms(&dev_priv->drm, 2560 "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", 2561 crtc->base.base.id, crtc->base.name); 2562 2563 crtc->active = false; 2564 crtc->base.enabled = false; 2565 2566 drm_WARN_ON(&dev_priv->drm, 2567 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0); 2568 crtc_state->uapi.active = false; 2569 crtc_state->uapi.connector_mask = 0; 2570 crtc_state->uapi.encoder_mask = 0; 2571 intel_crtc_free_hw_state(crtc_state); 2572 memset(&crtc_state->hw, 0, sizeof(crtc_state->hw)); 2573 2574 for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder) 2575 encoder->base.crtc = NULL; 2576 2577 intel_fbc_disable(crtc); 2578 intel_update_watermarks(dev_priv); 2579 intel_disable_shared_dpll(crtc_state); 2580 2581 intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains); 2582 2583 cdclk_state->min_cdclk[pipe] = 0; 2584 cdclk_state->min_voltage_level[pipe] = 0; 2585 cdclk_state->active_pipes &= ~BIT(pipe); 2586 2587 dbuf_state->active_pipes &= ~BIT(pipe); 2588 2589 bw_state->data_rate[pipe] = 0; 2590 bw_state->num_active_planes[pipe] = 0; 2591 } 2592 2593 /* 2594 * turn all crtc's off, but do not adjust state 2595 * This has to be paired with a call to intel_modeset_setup_hw_state. 2596 */ 2597 int intel_display_suspend(struct drm_device *dev) 2598 { 2599 struct drm_i915_private *dev_priv = to_i915(dev); 2600 struct drm_atomic_state *state; 2601 int ret; 2602 2603 if (!HAS_DISPLAY(dev_priv)) 2604 return 0; 2605 2606 state = drm_atomic_helper_suspend(dev); 2607 ret = PTR_ERR_OR_ZERO(state); 2608 if (ret) 2609 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n", 2610 ret); 2611 else 2612 dev_priv->modeset_restore_state = state; 2613 return ret; 2614 } 2615 2616 void intel_encoder_destroy(struct drm_encoder *encoder) 2617 { 2618 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 2619 2620 drm_encoder_cleanup(encoder); 2621 kfree(intel_encoder); 2622 } 2623 2624 /* Cross check the actual hw state with our own modeset state tracking (and it's 2625 * internal consistency). */ 2626 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, 2627 struct drm_connector_state *conn_state) 2628 { 2629 struct intel_connector *connector = to_intel_connector(conn_state->connector); 2630 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2631 2632 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n", 2633 connector->base.base.id, connector->base.name); 2634 2635 if (connector->get_hw_state(connector)) { 2636 struct intel_encoder *encoder = intel_attached_encoder(connector); 2637 2638 I915_STATE_WARN(!crtc_state, 2639 "connector enabled without attached crtc\n"); 2640 2641 if (!crtc_state) 2642 return; 2643 2644 I915_STATE_WARN(!crtc_state->hw.active, 2645 "connector is active, but attached crtc isn't\n"); 2646 2647 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 2648 return; 2649 2650 I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 2651 "atomic encoder doesn't match attached encoder\n"); 2652 2653 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 2654 "attached encoder crtc differs from connector crtc\n"); 2655 } else { 2656 I915_STATE_WARN(crtc_state && crtc_state->hw.active, 2657 "attached crtc is active, but connector isn't\n"); 2658 I915_STATE_WARN(!crtc_state && conn_state->best_encoder, 2659 "best encoder set without crtc!\n"); 2660 } 2661 } 2662 2663 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 2664 { 2665 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2666 2667 /* GDG double wide on either pipe, otherwise pipe A only */ 2668 return DISPLAY_VER(dev_priv) < 4 && 2669 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 2670 } 2671 2672 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) 2673 { 2674 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock; 2675 struct drm_rect src; 2676 2677 /* 2678 * We only use IF-ID interlacing. If we ever use 2679 * PF-ID we'll need to adjust the pixel_rate here. 2680 */ 2681 2682 if (!crtc_state->pch_pfit.enabled) 2683 return pixel_rate; 2684 2685 drm_rect_init(&src, 0, 0, 2686 crtc_state->pipe_src_w << 16, 2687 crtc_state->pipe_src_h << 16); 2688 2689 return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst, 2690 pixel_rate); 2691 } 2692 2693 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode, 2694 const struct drm_display_mode *timings) 2695 { 2696 mode->hdisplay = timings->crtc_hdisplay; 2697 mode->htotal = timings->crtc_htotal; 2698 mode->hsync_start = timings->crtc_hsync_start; 2699 mode->hsync_end = timings->crtc_hsync_end; 2700 2701 mode->vdisplay = timings->crtc_vdisplay; 2702 mode->vtotal = timings->crtc_vtotal; 2703 mode->vsync_start = timings->crtc_vsync_start; 2704 mode->vsync_end = timings->crtc_vsync_end; 2705 2706 mode->flags = timings->flags; 2707 mode->type = DRM_MODE_TYPE_DRIVER; 2708 2709 mode->clock = timings->crtc_clock; 2710 2711 drm_mode_set_name(mode); 2712 } 2713 2714 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 2715 { 2716 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 2717 2718 if (HAS_GMCH(dev_priv)) 2719 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 2720 crtc_state->pixel_rate = 2721 crtc_state->hw.pipe_mode.crtc_clock; 2722 else 2723 crtc_state->pixel_rate = 2724 ilk_pipe_pixel_rate(crtc_state); 2725 } 2726 2727 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state) 2728 { 2729 struct drm_display_mode *mode = &crtc_state->hw.mode; 2730 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 2731 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2732 2733 drm_mode_copy(pipe_mode, adjusted_mode); 2734 2735 if (crtc_state->bigjoiner) { 2736 /* 2737 * transcoder is programmed to the full mode, 2738 * but pipe timings are half of the transcoder mode 2739 */ 2740 pipe_mode->crtc_hdisplay /= 2; 2741 pipe_mode->crtc_hblank_start /= 2; 2742 pipe_mode->crtc_hblank_end /= 2; 2743 pipe_mode->crtc_hsync_start /= 2; 2744 pipe_mode->crtc_hsync_end /= 2; 2745 pipe_mode->crtc_htotal /= 2; 2746 pipe_mode->crtc_clock /= 2; 2747 } 2748 2749 if (crtc_state->splitter.enable) { 2750 int n = crtc_state->splitter.link_count; 2751 int overlap = crtc_state->splitter.pixel_overlap; 2752 2753 /* 2754 * eDP MSO uses segment timings from EDID for transcoder 2755 * timings, but full mode for everything else. 2756 * 2757 * h_full = (h_segment - pixel_overlap) * link_count 2758 */ 2759 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n; 2760 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n; 2761 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n; 2762 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n; 2763 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n; 2764 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n; 2765 pipe_mode->crtc_clock *= n; 2766 2767 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2768 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode); 2769 } else { 2770 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2771 intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode); 2772 } 2773 2774 intel_crtc_compute_pixel_rate(crtc_state); 2775 2776 drm_mode_copy(mode, adjusted_mode); 2777 mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner; 2778 mode->vdisplay = crtc_state->pipe_src_h; 2779 } 2780 2781 static void intel_encoder_get_config(struct intel_encoder *encoder, 2782 struct intel_crtc_state *crtc_state) 2783 { 2784 encoder->get_config(encoder, crtc_state); 2785 2786 intel_crtc_readout_derived_state(crtc_state); 2787 } 2788 2789 static int intel_crtc_compute_config(struct intel_crtc *crtc, 2790 struct intel_crtc_state *pipe_config) 2791 { 2792 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2793 struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode; 2794 int clock_limit = dev_priv->max_dotclk_freq; 2795 2796 drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode); 2797 2798 /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */ 2799 if (pipe_config->bigjoiner) { 2800 pipe_mode->crtc_clock /= 2; 2801 pipe_mode->crtc_hdisplay /= 2; 2802 pipe_mode->crtc_hblank_start /= 2; 2803 pipe_mode->crtc_hblank_end /= 2; 2804 pipe_mode->crtc_hsync_start /= 2; 2805 pipe_mode->crtc_hsync_end /= 2; 2806 pipe_mode->crtc_htotal /= 2; 2807 pipe_config->pipe_src_w /= 2; 2808 } 2809 2810 if (pipe_config->splitter.enable) { 2811 int n = pipe_config->splitter.link_count; 2812 int overlap = pipe_config->splitter.pixel_overlap; 2813 2814 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n; 2815 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n; 2816 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n; 2817 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n; 2818 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n; 2819 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n; 2820 pipe_mode->crtc_clock *= n; 2821 } 2822 2823 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2824 2825 if (DISPLAY_VER(dev_priv) < 4) { 2826 clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 2827 2828 /* 2829 * Enable double wide mode when the dot clock 2830 * is > 90% of the (display) core speed. 2831 */ 2832 if (intel_crtc_supports_double_wide(crtc) && 2833 pipe_mode->crtc_clock > clock_limit) { 2834 clock_limit = dev_priv->max_dotclk_freq; 2835 pipe_config->double_wide = true; 2836 } 2837 } 2838 2839 if (pipe_mode->crtc_clock > clock_limit) { 2840 drm_dbg_kms(&dev_priv->drm, 2841 "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 2842 pipe_mode->crtc_clock, clock_limit, 2843 yesno(pipe_config->double_wide)); 2844 return -EINVAL; 2845 } 2846 2847 /* 2848 * Pipe horizontal size must be even in: 2849 * - DVO ganged mode 2850 * - LVDS dual channel mode 2851 * - Double wide pipe 2852 */ 2853 if (pipe_config->pipe_src_w & 1) { 2854 if (pipe_config->double_wide) { 2855 drm_dbg_kms(&dev_priv->drm, 2856 "Odd pipe source width not supported with double wide pipe\n"); 2857 return -EINVAL; 2858 } 2859 2860 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && 2861 intel_is_dual_link_lvds(dev_priv)) { 2862 drm_dbg_kms(&dev_priv->drm, 2863 "Odd pipe source width not supported with dual link LVDS\n"); 2864 return -EINVAL; 2865 } 2866 } 2867 2868 intel_crtc_compute_pixel_rate(pipe_config); 2869 2870 if (pipe_config->has_pch_encoder) 2871 return ilk_fdi_compute_config(crtc, pipe_config); 2872 2873 return 0; 2874 } 2875 2876 static void 2877 intel_reduce_m_n_ratio(u32 *num, u32 *den) 2878 { 2879 while (*num > DATA_LINK_M_N_MASK || 2880 *den > DATA_LINK_M_N_MASK) { 2881 *num >>= 1; 2882 *den >>= 1; 2883 } 2884 } 2885 2886 static void compute_m_n(unsigned int m, unsigned int n, 2887 u32 *ret_m, u32 *ret_n, 2888 bool constant_n) 2889 { 2890 /* 2891 * Several DP dongles in particular seem to be fussy about 2892 * too large link M/N values. Give N value as 0x8000 that 2893 * should be acceptable by specific devices. 0x8000 is the 2894 * specified fixed N value for asynchronous clock mode, 2895 * which the devices expect also in synchronous clock mode. 2896 */ 2897 if (constant_n) 2898 *ret_n = DP_LINK_CONSTANT_N_VALUE; 2899 else 2900 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 2901 2902 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 2903 intel_reduce_m_n_ratio(ret_m, ret_n); 2904 } 2905 2906 void 2907 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, 2908 int pixel_clock, int link_clock, 2909 struct intel_link_m_n *m_n, 2910 bool constant_n, bool fec_enable) 2911 { 2912 u32 data_clock = bits_per_pixel * pixel_clock; 2913 2914 if (fec_enable) 2915 data_clock = intel_dp_mode_to_fec_clock(data_clock); 2916 2917 m_n->tu = 64; 2918 compute_m_n(data_clock, 2919 link_clock * nlanes * 8, 2920 &m_n->data_m, &m_n->data_n, 2921 constant_n); 2922 2923 compute_m_n(pixel_clock, link_clock, 2924 &m_n->link_m, &m_n->link_n, 2925 constant_n); 2926 } 2927 2928 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) 2929 { 2930 /* 2931 * There may be no VBT; and if the BIOS enabled SSC we can 2932 * just keep using it to avoid unnecessary flicker. Whereas if the 2933 * BIOS isn't using it, don't assume it will work even if the VBT 2934 * indicates as much. 2935 */ 2936 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 2937 bool bios_lvds_use_ssc = intel_de_read(dev_priv, 2938 PCH_DREF_CONTROL) & 2939 DREF_SSC1_ENABLE; 2940 2941 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 2942 drm_dbg_kms(&dev_priv->drm, 2943 "SSC %s by BIOS, overriding VBT which says %s\n", 2944 enableddisabled(bios_lvds_use_ssc), 2945 enableddisabled(dev_priv->vbt.lvds_use_ssc)); 2946 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 2947 } 2948 } 2949 } 2950 2951 void intel_zero_m_n(struct intel_link_m_n *m_n) 2952 { 2953 /* corresponds to 0 register value */ 2954 memset(m_n, 0, sizeof(*m_n)); 2955 m_n->tu = 1; 2956 } 2957 2958 void intel_set_m_n(struct drm_i915_private *i915, 2959 const struct intel_link_m_n *m_n, 2960 i915_reg_t data_m_reg, i915_reg_t data_n_reg, 2961 i915_reg_t link_m_reg, i915_reg_t link_n_reg) 2962 { 2963 intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m); 2964 intel_de_write(i915, data_n_reg, m_n->data_n); 2965 intel_de_write(i915, link_m_reg, m_n->link_m); 2966 /* 2967 * On BDW+ writing LINK_N arms the double buffered update 2968 * of all the M/N registers, so it must be written last. 2969 */ 2970 intel_de_write(i915, link_n_reg, m_n->link_n); 2971 } 2972 2973 bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv, 2974 enum transcoder transcoder) 2975 { 2976 if (IS_HASWELL(dev_priv)) 2977 return transcoder == TRANSCODER_EDP; 2978 2979 return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv); 2980 } 2981 2982 void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc, 2983 enum transcoder transcoder, 2984 const struct intel_link_m_n *m_n) 2985 { 2986 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2987 enum pipe pipe = crtc->pipe; 2988 2989 if (DISPLAY_VER(dev_priv) >= 5) 2990 intel_set_m_n(dev_priv, m_n, 2991 PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder), 2992 PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder)); 2993 else 2994 intel_set_m_n(dev_priv, m_n, 2995 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 2996 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 2997 } 2998 2999 void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc, 3000 enum transcoder transcoder, 3001 const struct intel_link_m_n *m_n) 3002 { 3003 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3004 3005 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) 3006 return; 3007 3008 intel_set_m_n(dev_priv, m_n, 3009 PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), 3010 PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); 3011 } 3012 3013 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state) 3014 { 3015 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3016 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3017 enum pipe pipe = crtc->pipe; 3018 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3019 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 3020 u32 crtc_vtotal, crtc_vblank_end; 3021 int vsyncshift = 0; 3022 3023 /* We need to be careful not to changed the adjusted mode, for otherwise 3024 * the hw state checker will get angry at the mismatch. */ 3025 crtc_vtotal = adjusted_mode->crtc_vtotal; 3026 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 3027 3028 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 3029 /* the chip adds 2 halflines automatically */ 3030 crtc_vtotal -= 1; 3031 crtc_vblank_end -= 1; 3032 3033 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 3034 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 3035 else 3036 vsyncshift = adjusted_mode->crtc_hsync_start - 3037 adjusted_mode->crtc_htotal / 2; 3038 if (vsyncshift < 0) 3039 vsyncshift += adjusted_mode->crtc_htotal; 3040 } 3041 3042 if (DISPLAY_VER(dev_priv) > 3) 3043 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder), 3044 vsyncshift); 3045 3046 intel_de_write(dev_priv, HTOTAL(cpu_transcoder), 3047 (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); 3048 intel_de_write(dev_priv, HBLANK(cpu_transcoder), 3049 (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); 3050 intel_de_write(dev_priv, HSYNC(cpu_transcoder), 3051 (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); 3052 3053 intel_de_write(dev_priv, VTOTAL(cpu_transcoder), 3054 (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16)); 3055 intel_de_write(dev_priv, VBLANK(cpu_transcoder), 3056 (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16)); 3057 intel_de_write(dev_priv, VSYNC(cpu_transcoder), 3058 (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); 3059 3060 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 3061 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 3062 * documented on the DDI_FUNC_CTL register description, EDP Input Select 3063 * bits. */ 3064 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 3065 (pipe == PIPE_B || pipe == PIPE_C)) 3066 intel_de_write(dev_priv, VTOTAL(pipe), 3067 intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); 3068 3069 } 3070 3071 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 3072 { 3073 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3074 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3075 enum pipe pipe = crtc->pipe; 3076 3077 /* pipesrc controls the size that is scaled from, which should 3078 * always be the user's requested size. 3079 */ 3080 intel_de_write(dev_priv, PIPESRC(pipe), 3081 PIPESRC_WIDTH(crtc_state->pipe_src_w - 1) | 3082 PIPESRC_HEIGHT(crtc_state->pipe_src_h - 1)); 3083 } 3084 3085 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) 3086 { 3087 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 3088 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3089 3090 if (DISPLAY_VER(dev_priv) == 2) 3091 return false; 3092 3093 if (DISPLAY_VER(dev_priv) >= 9 || 3094 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 3095 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW; 3096 else 3097 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK; 3098 } 3099 3100 static void intel_get_transcoder_timings(struct intel_crtc *crtc, 3101 struct intel_crtc_state *pipe_config) 3102 { 3103 struct drm_device *dev = crtc->base.dev; 3104 struct drm_i915_private *dev_priv = to_i915(dev); 3105 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 3106 u32 tmp; 3107 3108 tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder)); 3109 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 3110 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 3111 3112 if (!transcoder_is_dsi(cpu_transcoder)) { 3113 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder)); 3114 pipe_config->hw.adjusted_mode.crtc_hblank_start = 3115 (tmp & 0xffff) + 1; 3116 pipe_config->hw.adjusted_mode.crtc_hblank_end = 3117 ((tmp >> 16) & 0xffff) + 1; 3118 } 3119 tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder)); 3120 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 3121 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 3122 3123 tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder)); 3124 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 3125 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 3126 3127 if (!transcoder_is_dsi(cpu_transcoder)) { 3128 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder)); 3129 pipe_config->hw.adjusted_mode.crtc_vblank_start = 3130 (tmp & 0xffff) + 1; 3131 pipe_config->hw.adjusted_mode.crtc_vblank_end = 3132 ((tmp >> 16) & 0xffff) + 1; 3133 } 3134 tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder)); 3135 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 3136 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 3137 3138 if (intel_pipe_is_interlaced(pipe_config)) { 3139 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 3140 pipe_config->hw.adjusted_mode.crtc_vtotal += 1; 3141 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1; 3142 } 3143 } 3144 3145 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 3146 struct intel_crtc_state *pipe_config) 3147 { 3148 struct drm_device *dev = crtc->base.dev; 3149 struct drm_i915_private *dev_priv = to_i915(dev); 3150 u32 tmp; 3151 3152 tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe)); 3153 pipe_config->pipe_src_w = REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1; 3154 pipe_config->pipe_src_h = REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1; 3155 } 3156 3157 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 3158 { 3159 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3160 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3161 u32 pipeconf = 0; 3162 3163 /* we keep both pipes enabled on 830 */ 3164 if (IS_I830(dev_priv)) 3165 pipeconf |= PIPECONF_ENABLE; 3166 3167 if (crtc_state->double_wide) 3168 pipeconf |= PIPECONF_DOUBLE_WIDE; 3169 3170 /* only g4x and later have fancy bpc/dither controls */ 3171 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 3172 IS_CHERRYVIEW(dev_priv)) { 3173 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 3174 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 3175 pipeconf |= PIPECONF_DITHER_EN | 3176 PIPECONF_DITHER_TYPE_SP; 3177 3178 switch (crtc_state->pipe_bpp) { 3179 case 18: 3180 pipeconf |= PIPECONF_BPC_6; 3181 break; 3182 case 24: 3183 pipeconf |= PIPECONF_BPC_8; 3184 break; 3185 case 30: 3186 pipeconf |= PIPECONF_BPC_10; 3187 break; 3188 default: 3189 /* Case prevented by intel_choose_pipe_bpp_dither. */ 3190 BUG(); 3191 } 3192 } 3193 3194 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 3195 if (DISPLAY_VER(dev_priv) < 4 || 3196 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 3197 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 3198 else 3199 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 3200 } else { 3201 pipeconf |= PIPECONF_INTERLACE_PROGRESSIVE; 3202 } 3203 3204 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 3205 crtc_state->limited_color_range) 3206 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 3207 3208 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 3209 3210 pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 3211 3212 intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf); 3213 intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe)); 3214 } 3215 3216 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) 3217 { 3218 if (IS_I830(dev_priv)) 3219 return false; 3220 3221 return DISPLAY_VER(dev_priv) >= 4 || 3222 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 3223 } 3224 3225 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state) 3226 { 3227 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3228 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3229 u32 tmp; 3230 3231 if (!i9xx_has_pfit(dev_priv)) 3232 return; 3233 3234 tmp = intel_de_read(dev_priv, PFIT_CONTROL); 3235 if (!(tmp & PFIT_ENABLE)) 3236 return; 3237 3238 /* Check whether the pfit is attached to our pipe. */ 3239 if (DISPLAY_VER(dev_priv) < 4) { 3240 if (crtc->pipe != PIPE_B) 3241 return; 3242 } else { 3243 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 3244 return; 3245 } 3246 3247 crtc_state->gmch_pfit.control = tmp; 3248 crtc_state->gmch_pfit.pgm_ratios = 3249 intel_de_read(dev_priv, PFIT_PGM_RATIOS); 3250 } 3251 3252 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 3253 struct intel_crtc_state *pipe_config) 3254 { 3255 struct drm_device *dev = crtc->base.dev; 3256 struct drm_i915_private *dev_priv = to_i915(dev); 3257 enum pipe pipe = crtc->pipe; 3258 struct dpll clock; 3259 u32 mdiv; 3260 int refclk = 100000; 3261 3262 /* In case of DSI, DPLL will not be used */ 3263 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 3264 return; 3265 3266 vlv_dpio_get(dev_priv); 3267 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 3268 vlv_dpio_put(dev_priv); 3269 3270 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 3271 clock.m2 = mdiv & DPIO_M2DIV_MASK; 3272 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 3273 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 3274 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 3275 3276 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 3277 } 3278 3279 static void chv_crtc_clock_get(struct intel_crtc *crtc, 3280 struct intel_crtc_state *pipe_config) 3281 { 3282 struct drm_device *dev = crtc->base.dev; 3283 struct drm_i915_private *dev_priv = to_i915(dev); 3284 enum pipe pipe = crtc->pipe; 3285 enum dpio_channel port = vlv_pipe_to_channel(pipe); 3286 struct dpll clock; 3287 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 3288 int refclk = 100000; 3289 3290 /* In case of DSI, DPLL will not be used */ 3291 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 3292 return; 3293 3294 vlv_dpio_get(dev_priv); 3295 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 3296 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 3297 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 3298 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 3299 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 3300 vlv_dpio_put(dev_priv); 3301 3302 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 3303 clock.m2 = (pll_dw0 & 0xff) << 22; 3304 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 3305 clock.m2 |= pll_dw2 & 0x3fffff; 3306 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 3307 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 3308 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 3309 3310 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 3311 } 3312 3313 static enum intel_output_format 3314 bdw_get_pipemisc_output_format(struct intel_crtc *crtc) 3315 { 3316 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3317 u32 tmp; 3318 3319 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); 3320 3321 if (tmp & PIPEMISC_YUV420_ENABLE) { 3322 /* We support 4:2:0 in full blend mode only */ 3323 drm_WARN_ON(&dev_priv->drm, 3324 (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0); 3325 3326 return INTEL_OUTPUT_FORMAT_YCBCR420; 3327 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { 3328 return INTEL_OUTPUT_FORMAT_YCBCR444; 3329 } else { 3330 return INTEL_OUTPUT_FORMAT_RGB; 3331 } 3332 } 3333 3334 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) 3335 { 3336 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3337 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 3338 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3339 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 3340 u32 tmp; 3341 3342 tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); 3343 3344 if (tmp & DISP_PIPE_GAMMA_ENABLE) 3345 crtc_state->gamma_enable = true; 3346 3347 if (!HAS_GMCH(dev_priv) && 3348 tmp & DISP_PIPE_CSC_ENABLE) 3349 crtc_state->csc_enable = true; 3350 } 3351 3352 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 3353 struct intel_crtc_state *pipe_config) 3354 { 3355 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3356 enum intel_display_power_domain power_domain; 3357 intel_wakeref_t wakeref; 3358 u32 tmp; 3359 bool ret; 3360 3361 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 3362 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 3363 if (!wakeref) 3364 return false; 3365 3366 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3367 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 3368 pipe_config->shared_dpll = NULL; 3369 3370 ret = false; 3371 3372 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); 3373 if (!(tmp & PIPECONF_ENABLE)) 3374 goto out; 3375 3376 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 3377 IS_CHERRYVIEW(dev_priv)) { 3378 switch (tmp & PIPECONF_BPC_MASK) { 3379 case PIPECONF_BPC_6: 3380 pipe_config->pipe_bpp = 18; 3381 break; 3382 case PIPECONF_BPC_8: 3383 pipe_config->pipe_bpp = 24; 3384 break; 3385 case PIPECONF_BPC_10: 3386 pipe_config->pipe_bpp = 30; 3387 break; 3388 default: 3389 MISSING_CASE(tmp); 3390 break; 3391 } 3392 } 3393 3394 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 3395 (tmp & PIPECONF_COLOR_RANGE_SELECT)) 3396 pipe_config->limited_color_range = true; 3397 3398 pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp); 3399 3400 if (IS_CHERRYVIEW(dev_priv)) 3401 pipe_config->cgm_mode = intel_de_read(dev_priv, 3402 CGM_PIPE_MODE(crtc->pipe)); 3403 3404 i9xx_get_pipe_color_config(pipe_config); 3405 intel_color_get_config(pipe_config); 3406 3407 if (DISPLAY_VER(dev_priv) < 4) 3408 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 3409 3410 intel_get_transcoder_timings(crtc, pipe_config); 3411 intel_get_pipe_src_size(crtc, pipe_config); 3412 3413 i9xx_get_pfit_config(pipe_config); 3414 3415 if (DISPLAY_VER(dev_priv) >= 4) { 3416 /* No way to read it out on pipes B and C */ 3417 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 3418 tmp = dev_priv->chv_dpll_md[crtc->pipe]; 3419 else 3420 tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe)); 3421 pipe_config->pixel_multiplier = 3422 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 3423 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 3424 pipe_config->dpll_hw_state.dpll_md = tmp; 3425 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 3426 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 3427 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe)); 3428 pipe_config->pixel_multiplier = 3429 ((tmp & SDVO_MULTIPLIER_MASK) 3430 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 3431 } else { 3432 /* Note that on i915G/GM the pixel multiplier is in the sdvo 3433 * port and will be fixed up in the encoder->get_config 3434 * function. */ 3435 pipe_config->pixel_multiplier = 1; 3436 } 3437 pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv, 3438 DPLL(crtc->pipe)); 3439 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 3440 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv, 3441 FP0(crtc->pipe)); 3442 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv, 3443 FP1(crtc->pipe)); 3444 } else { 3445 /* Mask out read-only status bits. */ 3446 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 3447 DPLL_PORTC_READY_MASK | 3448 DPLL_PORTB_READY_MASK); 3449 } 3450 3451 if (IS_CHERRYVIEW(dev_priv)) 3452 chv_crtc_clock_get(crtc, pipe_config); 3453 else if (IS_VALLEYVIEW(dev_priv)) 3454 vlv_crtc_clock_get(crtc, pipe_config); 3455 else 3456 i9xx_crtc_clock_get(crtc, pipe_config); 3457 3458 /* 3459 * Normally the dotclock is filled in by the encoder .get_config() 3460 * but in case the pipe is enabled w/o any ports we need a sane 3461 * default. 3462 */ 3463 pipe_config->hw.adjusted_mode.crtc_clock = 3464 pipe_config->port_clock / pipe_config->pixel_multiplier; 3465 3466 ret = true; 3467 3468 out: 3469 intel_display_power_put(dev_priv, power_domain, wakeref); 3470 3471 return ret; 3472 } 3473 3474 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) 3475 { 3476 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3477 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3478 enum pipe pipe = crtc->pipe; 3479 u32 val; 3480 3481 val = 0; 3482 3483 switch (crtc_state->pipe_bpp) { 3484 case 18: 3485 val |= PIPECONF_BPC_6; 3486 break; 3487 case 24: 3488 val |= PIPECONF_BPC_8; 3489 break; 3490 case 30: 3491 val |= PIPECONF_BPC_10; 3492 break; 3493 case 36: 3494 val |= PIPECONF_BPC_12; 3495 break; 3496 default: 3497 /* Case prevented by intel_choose_pipe_bpp_dither. */ 3498 BUG(); 3499 } 3500 3501 if (crtc_state->dither) 3502 val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP; 3503 3504 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3505 val |= PIPECONF_INTERLACE_IF_ID_ILK; 3506 else 3507 val |= PIPECONF_INTERLACE_PF_PD_ILK; 3508 3509 /* 3510 * This would end up with an odd purple hue over 3511 * the entire display. Make sure we don't do it. 3512 */ 3513 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range && 3514 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); 3515 3516 if (crtc_state->limited_color_range && 3517 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 3518 val |= PIPECONF_COLOR_RANGE_SELECT; 3519 3520 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3521 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709; 3522 3523 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 3524 3525 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 3526 3527 intel_de_write(dev_priv, PIPECONF(pipe), val); 3528 intel_de_posting_read(dev_priv, PIPECONF(pipe)); 3529 } 3530 3531 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) 3532 { 3533 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3534 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3535 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3536 u32 val = 0; 3537 3538 if (IS_HASWELL(dev_priv) && crtc_state->dither) 3539 val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP; 3540 3541 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3542 val |= PIPECONF_INTERLACE_IF_ID_ILK; 3543 else 3544 val |= PIPECONF_INTERLACE_PF_PD_ILK; 3545 3546 if (IS_HASWELL(dev_priv) && 3547 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3548 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW; 3549 3550 intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val); 3551 intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder)); 3552 } 3553 3554 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) 3555 { 3556 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3557 const struct intel_crtc_scaler_state *scaler_state = 3558 &crtc_state->scaler_state; 3559 3560 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3561 u32 val = 0; 3562 int i; 3563 3564 switch (crtc_state->pipe_bpp) { 3565 case 18: 3566 val |= PIPEMISC_BPC_6; 3567 break; 3568 case 24: 3569 val |= PIPEMISC_BPC_8; 3570 break; 3571 case 30: 3572 val |= PIPEMISC_BPC_10; 3573 break; 3574 case 36: 3575 /* Port output 12BPC defined for ADLP+ */ 3576 if (DISPLAY_VER(dev_priv) > 12) 3577 val |= PIPEMISC_BPC_12_ADLP; 3578 break; 3579 default: 3580 MISSING_CASE(crtc_state->pipe_bpp); 3581 break; 3582 } 3583 3584 if (crtc_state->dither) 3585 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 3586 3587 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 3588 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 3589 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV; 3590 3591 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 3592 val |= PIPEMISC_YUV420_ENABLE | 3593 PIPEMISC_YUV420_MODE_FULL_BLEND; 3594 3595 if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state)) 3596 val |= PIPEMISC_HDR_MODE_PRECISION; 3597 3598 if (DISPLAY_VER(dev_priv) >= 12) 3599 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC; 3600 3601 if (IS_ALDERLAKE_P(dev_priv)) { 3602 bool scaler_in_use = false; 3603 3604 for (i = 0; i < crtc->num_scalers; i++) { 3605 if (!scaler_state->scalers[i].in_use) 3606 continue; 3607 3608 scaler_in_use = true; 3609 break; 3610 } 3611 3612 intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe), 3613 PIPE_MISC2_BUBBLE_COUNTER_MASK, 3614 scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN : 3615 PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS); 3616 } 3617 3618 intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val); 3619 } 3620 3621 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) 3622 { 3623 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3624 u32 tmp; 3625 3626 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); 3627 3628 switch (tmp & PIPEMISC_BPC_MASK) { 3629 case PIPEMISC_BPC_6: 3630 return 18; 3631 case PIPEMISC_BPC_8: 3632 return 24; 3633 case PIPEMISC_BPC_10: 3634 return 30; 3635 /* 3636 * PORT OUTPUT 12 BPC defined for ADLP+. 3637 * 3638 * TODO: 3639 * For previous platforms with DSI interface, bits 5:7 3640 * are used for storing pipe_bpp irrespective of dithering. 3641 * Since the value of 12 BPC is not defined for these bits 3642 * on older platforms, need to find a workaround for 12 BPC 3643 * MIPI DSI HW readout. 3644 */ 3645 case PIPEMISC_BPC_12_ADLP: 3646 if (DISPLAY_VER(dev_priv) > 12) 3647 return 36; 3648 fallthrough; 3649 default: 3650 MISSING_CASE(tmp); 3651 return 0; 3652 } 3653 } 3654 3655 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) 3656 { 3657 /* 3658 * Account for spread spectrum to avoid 3659 * oversubscribing the link. Max center spread 3660 * is 2.5%; use 5% for safety's sake. 3661 */ 3662 u32 bps = target_clock * bpp * 21 / 20; 3663 return DIV_ROUND_UP(bps, link_bw * 8); 3664 } 3665 3666 void intel_get_m_n(struct drm_i915_private *i915, 3667 struct intel_link_m_n *m_n, 3668 i915_reg_t data_m_reg, i915_reg_t data_n_reg, 3669 i915_reg_t link_m_reg, i915_reg_t link_n_reg) 3670 { 3671 m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK; 3672 m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK; 3673 m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK; 3674 m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK; 3675 m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1; 3676 } 3677 3678 void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc, 3679 enum transcoder transcoder, 3680 struct intel_link_m_n *m_n) 3681 { 3682 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3683 enum pipe pipe = crtc->pipe; 3684 3685 if (DISPLAY_VER(dev_priv) >= 5) 3686 intel_get_m_n(dev_priv, m_n, 3687 PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder), 3688 PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder)); 3689 else 3690 intel_get_m_n(dev_priv, m_n, 3691 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 3692 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 3693 } 3694 3695 void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, 3696 enum transcoder transcoder, 3697 struct intel_link_m_n *m_n) 3698 { 3699 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3700 3701 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) 3702 return; 3703 3704 intel_get_m_n(dev_priv, m_n, 3705 PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), 3706 PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); 3707 } 3708 3709 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state, 3710 u32 pos, u32 size) 3711 { 3712 drm_rect_init(&crtc_state->pch_pfit.dst, 3713 pos >> 16, pos & 0xffff, 3714 size >> 16, size & 0xffff); 3715 } 3716 3717 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state) 3718 { 3719 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3720 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3721 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; 3722 int id = -1; 3723 int i; 3724 3725 /* find scaler attached to this pipe */ 3726 for (i = 0; i < crtc->num_scalers; i++) { 3727 u32 ctl, pos, size; 3728 3729 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i)); 3730 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN) 3731 continue; 3732 3733 id = i; 3734 crtc_state->pch_pfit.enabled = true; 3735 3736 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i)); 3737 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i)); 3738 3739 ilk_get_pfit_pos_size(crtc_state, pos, size); 3740 3741 scaler_state->scalers[i].in_use = true; 3742 break; 3743 } 3744 3745 scaler_state->scaler_id = id; 3746 if (id >= 0) 3747 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 3748 else 3749 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 3750 } 3751 3752 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state) 3753 { 3754 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3755 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3756 u32 ctl, pos, size; 3757 3758 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe)); 3759 if ((ctl & PF_ENABLE) == 0) 3760 return; 3761 3762 crtc_state->pch_pfit.enabled = true; 3763 3764 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe)); 3765 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe)); 3766 3767 ilk_get_pfit_pos_size(crtc_state, pos, size); 3768 3769 /* 3770 * We currently do not free assignements of panel fitters on 3771 * ivb/hsw (since we don't use the higher upscaling modes which 3772 * differentiates them) so just WARN about this case for now. 3773 */ 3774 drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 && 3775 (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe)); 3776 } 3777 3778 static bool ilk_get_pipe_config(struct intel_crtc *crtc, 3779 struct intel_crtc_state *pipe_config) 3780 { 3781 struct drm_device *dev = crtc->base.dev; 3782 struct drm_i915_private *dev_priv = to_i915(dev); 3783 enum intel_display_power_domain power_domain; 3784 intel_wakeref_t wakeref; 3785 u32 tmp; 3786 bool ret; 3787 3788 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 3789 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 3790 if (!wakeref) 3791 return false; 3792 3793 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 3794 pipe_config->shared_dpll = NULL; 3795 3796 ret = false; 3797 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); 3798 if (!(tmp & PIPECONF_ENABLE)) 3799 goto out; 3800 3801 switch (tmp & PIPECONF_BPC_MASK) { 3802 case PIPECONF_BPC_6: 3803 pipe_config->pipe_bpp = 18; 3804 break; 3805 case PIPECONF_BPC_8: 3806 pipe_config->pipe_bpp = 24; 3807 break; 3808 case PIPECONF_BPC_10: 3809 pipe_config->pipe_bpp = 30; 3810 break; 3811 case PIPECONF_BPC_12: 3812 pipe_config->pipe_bpp = 36; 3813 break; 3814 default: 3815 break; 3816 } 3817 3818 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 3819 pipe_config->limited_color_range = true; 3820 3821 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) { 3822 case PIPECONF_OUTPUT_COLORSPACE_YUV601: 3823 case PIPECONF_OUTPUT_COLORSPACE_YUV709: 3824 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 3825 break; 3826 default: 3827 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3828 break; 3829 } 3830 3831 pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp); 3832 3833 pipe_config->csc_mode = intel_de_read(dev_priv, 3834 PIPE_CSC_MODE(crtc->pipe)); 3835 3836 i9xx_get_pipe_color_config(pipe_config); 3837 intel_color_get_config(pipe_config); 3838 3839 pipe_config->pixel_multiplier = 1; 3840 3841 ilk_pch_get_config(pipe_config); 3842 3843 intel_get_transcoder_timings(crtc, pipe_config); 3844 intel_get_pipe_src_size(crtc, pipe_config); 3845 3846 ilk_get_pfit_config(pipe_config); 3847 3848 ret = true; 3849 3850 out: 3851 intel_display_power_put(dev_priv, power_domain, wakeref); 3852 3853 return ret; 3854 } 3855 3856 static u8 bigjoiner_pipes(struct drm_i915_private *i915) 3857 { 3858 if (DISPLAY_VER(i915) >= 12) 3859 return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D); 3860 else if (DISPLAY_VER(i915) >= 11) 3861 return BIT(PIPE_B) | BIT(PIPE_C); 3862 else 3863 return 0; 3864 } 3865 3866 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, 3867 enum transcoder cpu_transcoder) 3868 { 3869 enum intel_display_power_domain power_domain; 3870 intel_wakeref_t wakeref; 3871 u32 tmp = 0; 3872 3873 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 3874 3875 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) 3876 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); 3877 3878 return tmp & TRANS_DDI_FUNC_ENABLE; 3879 } 3880 3881 static void enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv, 3882 u8 *master_pipes, u8 *slave_pipes) 3883 { 3884 struct intel_crtc *crtc; 3885 3886 *master_pipes = 0; 3887 *slave_pipes = 0; 3888 3889 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, 3890 bigjoiner_pipes(dev_priv)) { 3891 enum intel_display_power_domain power_domain; 3892 enum pipe pipe = crtc->pipe; 3893 intel_wakeref_t wakeref; 3894 3895 power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe); 3896 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { 3897 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); 3898 3899 if (!(tmp & BIG_JOINER_ENABLE)) 3900 continue; 3901 3902 if (tmp & MASTER_BIG_JOINER_ENABLE) 3903 *master_pipes |= BIT(pipe); 3904 else 3905 *slave_pipes |= BIT(pipe); 3906 } 3907 3908 if (DISPLAY_VER(dev_priv) < 13) 3909 continue; 3910 3911 power_domain = POWER_DOMAIN_PIPE(pipe); 3912 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { 3913 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); 3914 3915 if (tmp & UNCOMPRESSED_JOINER_MASTER) 3916 *master_pipes |= BIT(pipe); 3917 if (tmp & UNCOMPRESSED_JOINER_SLAVE) 3918 *slave_pipes |= BIT(pipe); 3919 } 3920 } 3921 3922 /* Bigjoiner pipes should always be consecutive master and slave */ 3923 drm_WARN(&dev_priv->drm, *slave_pipes != *master_pipes << 1, 3924 "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n", 3925 *master_pipes, *slave_pipes); 3926 } 3927 3928 static enum pipe get_bigjoiner_master_pipe(enum pipe pipe, u8 master_pipes, u8 slave_pipes) 3929 { 3930 if ((slave_pipes & BIT(pipe)) == 0) 3931 return pipe; 3932 3933 /* ignore everything above our pipe */ 3934 master_pipes &= ~GENMASK(7, pipe); 3935 3936 /* highest remaining bit should be our master pipe */ 3937 return fls(master_pipes) - 1; 3938 } 3939 3940 static u8 get_bigjoiner_slave_pipes(enum pipe pipe, u8 master_pipes, u8 slave_pipes) 3941 { 3942 enum pipe master_pipe, next_master_pipe; 3943 3944 master_pipe = get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes); 3945 3946 if ((master_pipes & BIT(master_pipe)) == 0) 3947 return 0; 3948 3949 /* ignore our master pipe and everything below it */ 3950 master_pipes &= ~GENMASK(master_pipe, 0); 3951 /* make sure a high bit is set for the ffs() */ 3952 master_pipes |= BIT(7); 3953 /* lowest remaining bit should be the next master pipe */ 3954 next_master_pipe = ffs(master_pipes) - 1; 3955 3956 return slave_pipes & GENMASK(next_master_pipe - 1, master_pipe); 3957 } 3958 3959 static u8 hsw_panel_transcoders(struct drm_i915_private *i915) 3960 { 3961 u8 panel_transcoder_mask = BIT(TRANSCODER_EDP); 3962 3963 if (DISPLAY_VER(i915) >= 11) 3964 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 3965 3966 return panel_transcoder_mask; 3967 } 3968 3969 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) 3970 { 3971 struct drm_device *dev = crtc->base.dev; 3972 struct drm_i915_private *dev_priv = to_i915(dev); 3973 u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv); 3974 enum transcoder cpu_transcoder; 3975 u8 master_pipes, slave_pipes; 3976 u8 enabled_transcoders = 0; 3977 3978 /* 3979 * XXX: Do intel_display_power_get_if_enabled before reading this (for 3980 * consistency and less surprising code; it's in always on power). 3981 */ 3982 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, 3983 panel_transcoder_mask) { 3984 enum intel_display_power_domain power_domain; 3985 intel_wakeref_t wakeref; 3986 enum pipe trans_pipe; 3987 u32 tmp = 0; 3988 3989 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 3990 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) 3991 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); 3992 3993 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 3994 continue; 3995 3996 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 3997 default: 3998 drm_WARN(dev, 1, 3999 "unknown pipe linked to transcoder %s\n", 4000 transcoder_name(cpu_transcoder)); 4001 fallthrough; 4002 case TRANS_DDI_EDP_INPUT_A_ONOFF: 4003 case TRANS_DDI_EDP_INPUT_A_ON: 4004 trans_pipe = PIPE_A; 4005 break; 4006 case TRANS_DDI_EDP_INPUT_B_ONOFF: 4007 trans_pipe = PIPE_B; 4008 break; 4009 case TRANS_DDI_EDP_INPUT_C_ONOFF: 4010 trans_pipe = PIPE_C; 4011 break; 4012 case TRANS_DDI_EDP_INPUT_D_ONOFF: 4013 trans_pipe = PIPE_D; 4014 break; 4015 } 4016 4017 if (trans_pipe == crtc->pipe) 4018 enabled_transcoders |= BIT(cpu_transcoder); 4019 } 4020 4021 /* single pipe or bigjoiner master */ 4022 cpu_transcoder = (enum transcoder) crtc->pipe; 4023 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) 4024 enabled_transcoders |= BIT(cpu_transcoder); 4025 4026 /* bigjoiner slave -> consider the master pipe's transcoder as well */ 4027 enabled_bigjoiner_pipes(dev_priv, &master_pipes, &slave_pipes); 4028 if (slave_pipes & BIT(crtc->pipe)) { 4029 cpu_transcoder = (enum transcoder) 4030 get_bigjoiner_master_pipe(crtc->pipe, master_pipes, slave_pipes); 4031 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) 4032 enabled_transcoders |= BIT(cpu_transcoder); 4033 } 4034 4035 return enabled_transcoders; 4036 } 4037 4038 static bool has_edp_transcoders(u8 enabled_transcoders) 4039 { 4040 return enabled_transcoders & BIT(TRANSCODER_EDP); 4041 } 4042 4043 static bool has_dsi_transcoders(u8 enabled_transcoders) 4044 { 4045 return enabled_transcoders & (BIT(TRANSCODER_DSI_0) | 4046 BIT(TRANSCODER_DSI_1)); 4047 } 4048 4049 static bool has_pipe_transcoders(u8 enabled_transcoders) 4050 { 4051 return enabled_transcoders & ~(BIT(TRANSCODER_EDP) | 4052 BIT(TRANSCODER_DSI_0) | 4053 BIT(TRANSCODER_DSI_1)); 4054 } 4055 4056 static void assert_enabled_transcoders(struct drm_i915_private *i915, 4057 u8 enabled_transcoders) 4058 { 4059 /* Only one type of transcoder please */ 4060 drm_WARN_ON(&i915->drm, 4061 has_edp_transcoders(enabled_transcoders) + 4062 has_dsi_transcoders(enabled_transcoders) + 4063 has_pipe_transcoders(enabled_transcoders) > 1); 4064 4065 /* Only DSI transcoders can be ganged */ 4066 drm_WARN_ON(&i915->drm, 4067 !has_dsi_transcoders(enabled_transcoders) && 4068 !is_power_of_2(enabled_transcoders)); 4069 } 4070 4071 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 4072 struct intel_crtc_state *pipe_config, 4073 struct intel_display_power_domain_set *power_domain_set) 4074 { 4075 struct drm_device *dev = crtc->base.dev; 4076 struct drm_i915_private *dev_priv = to_i915(dev); 4077 unsigned long enabled_transcoders; 4078 u32 tmp; 4079 4080 enabled_transcoders = hsw_enabled_transcoders(crtc); 4081 if (!enabled_transcoders) 4082 return false; 4083 4084 assert_enabled_transcoders(dev_priv, enabled_transcoders); 4085 4086 /* 4087 * With the exception of DSI we should only ever have 4088 * a single enabled transcoder. With DSI let's just 4089 * pick the first one. 4090 */ 4091 pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1; 4092 4093 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, 4094 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 4095 return false; 4096 4097 if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) { 4098 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 4099 4100 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF) 4101 pipe_config->pch_pfit.force_thru = true; 4102 } 4103 4104 tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder)); 4105 4106 return tmp & PIPECONF_ENABLE; 4107 } 4108 4109 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 4110 struct intel_crtc_state *pipe_config, 4111 struct intel_display_power_domain_set *power_domain_set) 4112 { 4113 struct drm_device *dev = crtc->base.dev; 4114 struct drm_i915_private *dev_priv = to_i915(dev); 4115 enum transcoder cpu_transcoder; 4116 enum port port; 4117 u32 tmp; 4118 4119 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 4120 if (port == PORT_A) 4121 cpu_transcoder = TRANSCODER_DSI_A; 4122 else 4123 cpu_transcoder = TRANSCODER_DSI_C; 4124 4125 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, 4126 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) 4127 continue; 4128 4129 /* 4130 * The PLL needs to be enabled with a valid divider 4131 * configuration, otherwise accessing DSI registers will hang 4132 * the machine. See BSpec North Display Engine 4133 * registers/MIPI[BXT]. We can break out here early, since we 4134 * need the same DSI PLL to be enabled for both DSI ports. 4135 */ 4136 if (!bxt_dsi_pll_is_enabled(dev_priv)) 4137 break; 4138 4139 /* XXX: this works for video mode only */ 4140 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)); 4141 if (!(tmp & DPI_ENABLE)) 4142 continue; 4143 4144 tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); 4145 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 4146 continue; 4147 4148 pipe_config->cpu_transcoder = cpu_transcoder; 4149 break; 4150 } 4151 4152 return transcoder_is_dsi(pipe_config->cpu_transcoder); 4153 } 4154 4155 static void intel_bigjoiner_get_config(struct intel_crtc_state *crtc_state) 4156 { 4157 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4158 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4159 u8 master_pipes, slave_pipes; 4160 enum pipe pipe = crtc->pipe; 4161 4162 enabled_bigjoiner_pipes(i915, &master_pipes, &slave_pipes); 4163 4164 if (((master_pipes | slave_pipes) & BIT(pipe)) == 0) 4165 return; 4166 4167 crtc_state->bigjoiner = true; 4168 crtc_state->bigjoiner_pipes = 4169 BIT(get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes)) | 4170 get_bigjoiner_slave_pipes(pipe, master_pipes, slave_pipes); 4171 } 4172 4173 static bool hsw_get_pipe_config(struct intel_crtc *crtc, 4174 struct intel_crtc_state *pipe_config) 4175 { 4176 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4177 struct intel_display_power_domain_set power_domain_set = { }; 4178 bool active; 4179 u32 tmp; 4180 4181 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set, 4182 POWER_DOMAIN_PIPE(crtc->pipe))) 4183 return false; 4184 4185 pipe_config->shared_dpll = NULL; 4186 4187 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set); 4188 4189 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 4190 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) { 4191 drm_WARN_ON(&dev_priv->drm, active); 4192 active = true; 4193 } 4194 4195 if (!active) 4196 goto out; 4197 4198 intel_dsc_get_config(pipe_config); 4199 intel_bigjoiner_get_config(pipe_config); 4200 4201 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 4202 DISPLAY_VER(dev_priv) >= 11) 4203 intel_get_transcoder_timings(crtc, pipe_config); 4204 4205 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder)) 4206 intel_vrr_get_config(crtc, pipe_config); 4207 4208 intel_get_pipe_src_size(crtc, pipe_config); 4209 4210 if (IS_HASWELL(dev_priv)) { 4211 u32 tmp = intel_de_read(dev_priv, 4212 PIPECONF(pipe_config->cpu_transcoder)); 4213 4214 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW) 4215 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 4216 else 4217 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 4218 } else { 4219 pipe_config->output_format = 4220 bdw_get_pipemisc_output_format(crtc); 4221 } 4222 4223 pipe_config->gamma_mode = intel_de_read(dev_priv, 4224 GAMMA_MODE(crtc->pipe)); 4225 4226 pipe_config->csc_mode = intel_de_read(dev_priv, 4227 PIPE_CSC_MODE(crtc->pipe)); 4228 4229 if (DISPLAY_VER(dev_priv) >= 9) { 4230 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe)); 4231 4232 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) 4233 pipe_config->gamma_enable = true; 4234 4235 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE) 4236 pipe_config->csc_enable = true; 4237 } else { 4238 i9xx_get_pipe_color_config(pipe_config); 4239 } 4240 4241 intel_color_get_config(pipe_config); 4242 4243 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe)); 4244 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp); 4245 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 4246 pipe_config->ips_linetime = 4247 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp); 4248 4249 if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set, 4250 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) { 4251 if (DISPLAY_VER(dev_priv) >= 9) 4252 skl_get_pfit_config(pipe_config); 4253 else 4254 ilk_get_pfit_config(pipe_config); 4255 } 4256 4257 hsw_ips_get_config(pipe_config); 4258 4259 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 4260 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 4261 pipe_config->pixel_multiplier = 4262 intel_de_read(dev_priv, 4263 PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 4264 } else { 4265 pipe_config->pixel_multiplier = 1; 4266 } 4267 4268 out: 4269 intel_display_power_put_all_in_set(dev_priv, &power_domain_set); 4270 4271 return active; 4272 } 4273 4274 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state) 4275 { 4276 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4277 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4278 4279 if (!i915->display->get_pipe_config(crtc, crtc_state)) 4280 return false; 4281 4282 crtc_state->hw.active = true; 4283 4284 intel_crtc_readout_derived_state(crtc_state); 4285 4286 return true; 4287 } 4288 4289 /* VESA 640x480x72Hz mode to set on the pipe */ 4290 static const struct drm_display_mode load_detect_mode = { 4291 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 4292 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 4293 }; 4294 4295 static int intel_modeset_disable_planes(struct drm_atomic_state *state, 4296 struct drm_crtc *crtc) 4297 { 4298 struct drm_plane *plane; 4299 struct drm_plane_state *plane_state; 4300 int ret, i; 4301 4302 ret = drm_atomic_add_affected_planes(state, crtc); 4303 if (ret) 4304 return ret; 4305 4306 for_each_new_plane_in_state(state, plane, plane_state, i) { 4307 if (plane_state->crtc != crtc) 4308 continue; 4309 4310 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); 4311 if (ret) 4312 return ret; 4313 4314 drm_atomic_set_fb_for_plane(plane_state, NULL); 4315 } 4316 4317 return 0; 4318 } 4319 4320 int intel_get_load_detect_pipe(struct drm_connector *connector, 4321 struct intel_load_detect_pipe *old, 4322 struct drm_modeset_acquire_ctx *ctx) 4323 { 4324 struct intel_encoder *encoder = 4325 intel_attached_encoder(to_intel_connector(connector)); 4326 struct intel_crtc *possible_crtc; 4327 struct intel_crtc *crtc = NULL; 4328 struct drm_device *dev = encoder->base.dev; 4329 struct drm_i915_private *dev_priv = to_i915(dev); 4330 struct drm_mode_config *config = &dev->mode_config; 4331 struct drm_atomic_state *state = NULL, *restore_state = NULL; 4332 struct drm_connector_state *connector_state; 4333 struct intel_crtc_state *crtc_state; 4334 int ret; 4335 4336 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 4337 connector->base.id, connector->name, 4338 encoder->base.base.id, encoder->base.name); 4339 4340 old->restore_state = NULL; 4341 4342 drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex)); 4343 4344 /* 4345 * Algorithm gets a little messy: 4346 * 4347 * - if the connector already has an assigned crtc, use it (but make 4348 * sure it's on first) 4349 * 4350 * - try to find the first unused crtc that can drive this connector, 4351 * and use that if we find one 4352 */ 4353 4354 /* See if we already have a CRTC for this connector */ 4355 if (connector->state->crtc) { 4356 crtc = to_intel_crtc(connector->state->crtc); 4357 4358 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 4359 if (ret) 4360 goto fail; 4361 4362 /* Make sure the crtc and connector are running */ 4363 goto found; 4364 } 4365 4366 /* Find an unused one (if possible) */ 4367 for_each_intel_crtc(dev, possible_crtc) { 4368 if (!(encoder->base.possible_crtcs & 4369 drm_crtc_mask(&possible_crtc->base))) 4370 continue; 4371 4372 ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx); 4373 if (ret) 4374 goto fail; 4375 4376 if (possible_crtc->base.state->enable) { 4377 drm_modeset_unlock(&possible_crtc->base.mutex); 4378 continue; 4379 } 4380 4381 crtc = possible_crtc; 4382 break; 4383 } 4384 4385 /* 4386 * If we didn't find an unused CRTC, don't use any. 4387 */ 4388 if (!crtc) { 4389 drm_dbg_kms(&dev_priv->drm, 4390 "no pipe available for load-detect\n"); 4391 ret = -ENODEV; 4392 goto fail; 4393 } 4394 4395 found: 4396 state = drm_atomic_state_alloc(dev); 4397 restore_state = drm_atomic_state_alloc(dev); 4398 if (!state || !restore_state) { 4399 ret = -ENOMEM; 4400 goto fail; 4401 } 4402 4403 state->acquire_ctx = ctx; 4404 restore_state->acquire_ctx = ctx; 4405 4406 connector_state = drm_atomic_get_connector_state(state, connector); 4407 if (IS_ERR(connector_state)) { 4408 ret = PTR_ERR(connector_state); 4409 goto fail; 4410 } 4411 4412 ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base); 4413 if (ret) 4414 goto fail; 4415 4416 crtc_state = intel_atomic_get_crtc_state(state, crtc); 4417 if (IS_ERR(crtc_state)) { 4418 ret = PTR_ERR(crtc_state); 4419 goto fail; 4420 } 4421 4422 crtc_state->uapi.active = true; 4423 4424 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi, 4425 &load_detect_mode); 4426 if (ret) 4427 goto fail; 4428 4429 ret = intel_modeset_disable_planes(state, &crtc->base); 4430 if (ret) 4431 goto fail; 4432 4433 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); 4434 if (!ret) 4435 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base)); 4436 if (!ret) 4437 ret = drm_atomic_add_affected_planes(restore_state, &crtc->base); 4438 if (ret) { 4439 drm_dbg_kms(&dev_priv->drm, 4440 "Failed to create a copy of old state to restore: %i\n", 4441 ret); 4442 goto fail; 4443 } 4444 4445 ret = drm_atomic_commit(state); 4446 if (ret) { 4447 drm_dbg_kms(&dev_priv->drm, 4448 "failed to set mode on load-detect pipe\n"); 4449 goto fail; 4450 } 4451 4452 old->restore_state = restore_state; 4453 drm_atomic_state_put(state); 4454 4455 /* let the connector get through one full cycle before testing */ 4456 intel_crtc_wait_for_next_vblank(crtc); 4457 4458 return true; 4459 4460 fail: 4461 if (state) { 4462 drm_atomic_state_put(state); 4463 state = NULL; 4464 } 4465 if (restore_state) { 4466 drm_atomic_state_put(restore_state); 4467 restore_state = NULL; 4468 } 4469 4470 if (ret == -EDEADLK) 4471 return ret; 4472 4473 return false; 4474 } 4475 4476 void intel_release_load_detect_pipe(struct drm_connector *connector, 4477 struct intel_load_detect_pipe *old, 4478 struct drm_modeset_acquire_ctx *ctx) 4479 { 4480 struct intel_encoder *intel_encoder = 4481 intel_attached_encoder(to_intel_connector(connector)); 4482 struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev); 4483 struct drm_encoder *encoder = &intel_encoder->base; 4484 struct drm_atomic_state *state = old->restore_state; 4485 int ret; 4486 4487 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 4488 connector->base.id, connector->name, 4489 encoder->base.id, encoder->name); 4490 4491 if (!state) 4492 return; 4493 4494 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 4495 if (ret) 4496 drm_dbg_kms(&i915->drm, 4497 "Couldn't release load detect pipe: %i\n", ret); 4498 drm_atomic_state_put(state); 4499 } 4500 4501 static int i9xx_pll_refclk(struct drm_device *dev, 4502 const struct intel_crtc_state *pipe_config) 4503 { 4504 struct drm_i915_private *dev_priv = to_i915(dev); 4505 u32 dpll = pipe_config->dpll_hw_state.dpll; 4506 4507 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 4508 return dev_priv->vbt.lvds_ssc_freq; 4509 else if (HAS_PCH_SPLIT(dev_priv)) 4510 return 120000; 4511 else if (DISPLAY_VER(dev_priv) != 2) 4512 return 96000; 4513 else 4514 return 48000; 4515 } 4516 4517 /* Returns the clock of the currently programmed mode of the given pipe. */ 4518 void i9xx_crtc_clock_get(struct intel_crtc *crtc, 4519 struct intel_crtc_state *pipe_config) 4520 { 4521 struct drm_device *dev = crtc->base.dev; 4522 struct drm_i915_private *dev_priv = to_i915(dev); 4523 u32 dpll = pipe_config->dpll_hw_state.dpll; 4524 u32 fp; 4525 struct dpll clock; 4526 int port_clock; 4527 int refclk = i9xx_pll_refclk(dev, pipe_config); 4528 4529 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 4530 fp = pipe_config->dpll_hw_state.fp0; 4531 else 4532 fp = pipe_config->dpll_hw_state.fp1; 4533 4534 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 4535 if (IS_PINEVIEW(dev_priv)) { 4536 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 4537 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 4538 } else { 4539 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 4540 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 4541 } 4542 4543 if (DISPLAY_VER(dev_priv) != 2) { 4544 if (IS_PINEVIEW(dev_priv)) 4545 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 4546 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 4547 else 4548 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 4549 DPLL_FPA01_P1_POST_DIV_SHIFT); 4550 4551 switch (dpll & DPLL_MODE_MASK) { 4552 case DPLLB_MODE_DAC_SERIAL: 4553 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 4554 5 : 10; 4555 break; 4556 case DPLLB_MODE_LVDS: 4557 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 4558 7 : 14; 4559 break; 4560 default: 4561 drm_dbg_kms(&dev_priv->drm, 4562 "Unknown DPLL mode %08x in programmed " 4563 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 4564 return; 4565 } 4566 4567 if (IS_PINEVIEW(dev_priv)) 4568 port_clock = pnv_calc_dpll_params(refclk, &clock); 4569 else 4570 port_clock = i9xx_calc_dpll_params(refclk, &clock); 4571 } else { 4572 enum pipe lvds_pipe; 4573 4574 if (IS_I85X(dev_priv) && 4575 intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) && 4576 lvds_pipe == crtc->pipe) { 4577 u32 lvds = intel_de_read(dev_priv, LVDS); 4578 4579 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 4580 DPLL_FPA01_P1_POST_DIV_SHIFT); 4581 4582 if (lvds & LVDS_CLKB_POWER_UP) 4583 clock.p2 = 7; 4584 else 4585 clock.p2 = 14; 4586 } else { 4587 if (dpll & PLL_P1_DIVIDE_BY_TWO) 4588 clock.p1 = 2; 4589 else { 4590 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 4591 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 4592 } 4593 if (dpll & PLL_P2_DIVIDE_BY_4) 4594 clock.p2 = 4; 4595 else 4596 clock.p2 = 2; 4597 } 4598 4599 port_clock = i9xx_calc_dpll_params(refclk, &clock); 4600 } 4601 4602 /* 4603 * This value includes pixel_multiplier. We will use 4604 * port_clock to compute adjusted_mode.crtc_clock in the 4605 * encoder's get_config() function. 4606 */ 4607 pipe_config->port_clock = port_clock; 4608 } 4609 4610 int intel_dotclock_calculate(int link_freq, 4611 const struct intel_link_m_n *m_n) 4612 { 4613 /* 4614 * The calculation for the data clock is: 4615 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 4616 * But we want to avoid losing precison if possible, so: 4617 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 4618 * 4619 * and the link clock is simpler: 4620 * link_clock = (m * link_clock) / n 4621 */ 4622 4623 if (!m_n->link_n) 4624 return 0; 4625 4626 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); 4627 } 4628 4629 /* Returns the currently programmed mode of the given encoder. */ 4630 struct drm_display_mode * 4631 intel_encoder_current_mode(struct intel_encoder *encoder) 4632 { 4633 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4634 struct intel_crtc_state *crtc_state; 4635 struct drm_display_mode *mode; 4636 struct intel_crtc *crtc; 4637 enum pipe pipe; 4638 4639 if (!encoder->get_hw_state(encoder, &pipe)) 4640 return NULL; 4641 4642 crtc = intel_crtc_for_pipe(dev_priv, pipe); 4643 4644 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 4645 if (!mode) 4646 return NULL; 4647 4648 crtc_state = intel_crtc_state_alloc(crtc); 4649 if (!crtc_state) { 4650 kfree(mode); 4651 return NULL; 4652 } 4653 4654 if (!intel_crtc_get_pipe_config(crtc_state)) { 4655 kfree(crtc_state); 4656 kfree(mode); 4657 return NULL; 4658 } 4659 4660 intel_encoder_get_config(encoder, crtc_state); 4661 4662 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode); 4663 4664 kfree(crtc_state); 4665 4666 return mode; 4667 } 4668 4669 static bool encoders_cloneable(const struct intel_encoder *a, 4670 const struct intel_encoder *b) 4671 { 4672 /* masks could be asymmetric, so check both ways */ 4673 return a == b || (a->cloneable & (1 << b->type) && 4674 b->cloneable & (1 << a->type)); 4675 } 4676 4677 static bool check_single_encoder_cloning(struct intel_atomic_state *state, 4678 struct intel_crtc *crtc, 4679 struct intel_encoder *encoder) 4680 { 4681 struct intel_encoder *source_encoder; 4682 struct drm_connector *connector; 4683 struct drm_connector_state *connector_state; 4684 int i; 4685 4686 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4687 if (connector_state->crtc != &crtc->base) 4688 continue; 4689 4690 source_encoder = 4691 to_intel_encoder(connector_state->best_encoder); 4692 if (!encoders_cloneable(encoder, source_encoder)) 4693 return false; 4694 } 4695 4696 return true; 4697 } 4698 4699 static int icl_add_linked_planes(struct intel_atomic_state *state) 4700 { 4701 struct intel_plane *plane, *linked; 4702 struct intel_plane_state *plane_state, *linked_plane_state; 4703 int i; 4704 4705 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4706 linked = plane_state->planar_linked_plane; 4707 4708 if (!linked) 4709 continue; 4710 4711 linked_plane_state = intel_atomic_get_plane_state(state, linked); 4712 if (IS_ERR(linked_plane_state)) 4713 return PTR_ERR(linked_plane_state); 4714 4715 drm_WARN_ON(state->base.dev, 4716 linked_plane_state->planar_linked_plane != plane); 4717 drm_WARN_ON(state->base.dev, 4718 linked_plane_state->planar_slave == plane_state->planar_slave); 4719 } 4720 4721 return 0; 4722 } 4723 4724 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) 4725 { 4726 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4727 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4728 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); 4729 struct intel_plane *plane, *linked; 4730 struct intel_plane_state *plane_state; 4731 int i; 4732 4733 if (DISPLAY_VER(dev_priv) < 11) 4734 return 0; 4735 4736 /* 4737 * Destroy all old plane links and make the slave plane invisible 4738 * in the crtc_state->active_planes mask. 4739 */ 4740 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4741 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane) 4742 continue; 4743 4744 plane_state->planar_linked_plane = NULL; 4745 if (plane_state->planar_slave && !plane_state->uapi.visible) { 4746 crtc_state->enabled_planes &= ~BIT(plane->id); 4747 crtc_state->active_planes &= ~BIT(plane->id); 4748 crtc_state->update_planes |= BIT(plane->id); 4749 } 4750 4751 plane_state->planar_slave = false; 4752 } 4753 4754 if (!crtc_state->nv12_planes) 4755 return 0; 4756 4757 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4758 struct intel_plane_state *linked_state = NULL; 4759 4760 if (plane->pipe != crtc->pipe || 4761 !(crtc_state->nv12_planes & BIT(plane->id))) 4762 continue; 4763 4764 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { 4765 if (!icl_is_nv12_y_plane(dev_priv, linked->id)) 4766 continue; 4767 4768 if (crtc_state->active_planes & BIT(linked->id)) 4769 continue; 4770 4771 linked_state = intel_atomic_get_plane_state(state, linked); 4772 if (IS_ERR(linked_state)) 4773 return PTR_ERR(linked_state); 4774 4775 break; 4776 } 4777 4778 if (!linked_state) { 4779 drm_dbg_kms(&dev_priv->drm, 4780 "Need %d free Y planes for planar YUV\n", 4781 hweight8(crtc_state->nv12_planes)); 4782 4783 return -EINVAL; 4784 } 4785 4786 plane_state->planar_linked_plane = linked; 4787 4788 linked_state->planar_slave = true; 4789 linked_state->planar_linked_plane = plane; 4790 crtc_state->enabled_planes |= BIT(linked->id); 4791 crtc_state->active_planes |= BIT(linked->id); 4792 crtc_state->update_planes |= BIT(linked->id); 4793 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n", 4794 linked->base.name, plane->base.name); 4795 4796 /* Copy parameters to slave plane */ 4797 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; 4798 linked_state->color_ctl = plane_state->color_ctl; 4799 linked_state->view = plane_state->view; 4800 linked_state->decrypt = plane_state->decrypt; 4801 4802 intel_plane_copy_hw_state(linked_state, plane_state); 4803 linked_state->uapi.src = plane_state->uapi.src; 4804 linked_state->uapi.dst = plane_state->uapi.dst; 4805 4806 if (icl_is_hdr_plane(dev_priv, plane->id)) { 4807 if (linked->id == PLANE_SPRITE5) 4808 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL; 4809 else if (linked->id == PLANE_SPRITE4) 4810 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL; 4811 else if (linked->id == PLANE_SPRITE3) 4812 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL; 4813 else if (linked->id == PLANE_SPRITE2) 4814 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL; 4815 else 4816 MISSING_CASE(linked->id); 4817 } 4818 } 4819 4820 return 0; 4821 } 4822 4823 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) 4824 { 4825 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 4826 struct intel_atomic_state *state = 4827 to_intel_atomic_state(new_crtc_state->uapi.state); 4828 const struct intel_crtc_state *old_crtc_state = 4829 intel_atomic_get_old_crtc_state(state, crtc); 4830 4831 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; 4832 } 4833 4834 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state) 4835 { 4836 const struct drm_display_mode *pipe_mode = 4837 &crtc_state->hw.pipe_mode; 4838 int linetime_wm; 4839 4840 if (!crtc_state->hw.enable) 4841 return 0; 4842 4843 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4844 pipe_mode->crtc_clock); 4845 4846 return min(linetime_wm, 0x1ff); 4847 } 4848 4849 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, 4850 const struct intel_cdclk_state *cdclk_state) 4851 { 4852 const struct drm_display_mode *pipe_mode = 4853 &crtc_state->hw.pipe_mode; 4854 int linetime_wm; 4855 4856 if (!crtc_state->hw.enable) 4857 return 0; 4858 4859 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4860 cdclk_state->logical.cdclk); 4861 4862 return min(linetime_wm, 0x1ff); 4863 } 4864 4865 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) 4866 { 4867 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4868 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4869 const struct drm_display_mode *pipe_mode = 4870 &crtc_state->hw.pipe_mode; 4871 int linetime_wm; 4872 4873 if (!crtc_state->hw.enable) 4874 return 0; 4875 4876 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8, 4877 crtc_state->pixel_rate); 4878 4879 /* Display WA #1135: BXT:ALL GLK:ALL */ 4880 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 4881 dev_priv->ipc_enabled) 4882 linetime_wm /= 2; 4883 4884 return min(linetime_wm, 0x1ff); 4885 } 4886 4887 static int hsw_compute_linetime_wm(struct intel_atomic_state *state, 4888 struct intel_crtc *crtc) 4889 { 4890 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4891 struct intel_crtc_state *crtc_state = 4892 intel_atomic_get_new_crtc_state(state, crtc); 4893 const struct intel_cdclk_state *cdclk_state; 4894 4895 if (DISPLAY_VER(dev_priv) >= 9) 4896 crtc_state->linetime = skl_linetime_wm(crtc_state); 4897 else 4898 crtc_state->linetime = hsw_linetime_wm(crtc_state); 4899 4900 if (!hsw_crtc_supports_ips(crtc)) 4901 return 0; 4902 4903 cdclk_state = intel_atomic_get_cdclk_state(state); 4904 if (IS_ERR(cdclk_state)) 4905 return PTR_ERR(cdclk_state); 4906 4907 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state, 4908 cdclk_state); 4909 4910 return 0; 4911 } 4912 4913 static int intel_crtc_atomic_check(struct intel_atomic_state *state, 4914 struct intel_crtc *crtc) 4915 { 4916 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4917 struct intel_crtc_state *crtc_state = 4918 intel_atomic_get_new_crtc_state(state, crtc); 4919 bool mode_changed = intel_crtc_needs_modeset(crtc_state); 4920 int ret; 4921 4922 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) && 4923 mode_changed && !crtc_state->hw.active) 4924 crtc_state->update_wm_post = true; 4925 4926 if (mode_changed && crtc_state->hw.enable && 4927 !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) { 4928 ret = intel_dpll_crtc_compute_clock(crtc_state); 4929 if (ret) 4930 return ret; 4931 } 4932 4933 /* 4934 * May need to update pipe gamma enable bits 4935 * when C8 planes are getting enabled/disabled. 4936 */ 4937 if (c8_planes_changed(crtc_state)) 4938 crtc_state->uapi.color_mgmt_changed = true; 4939 4940 if (mode_changed || crtc_state->update_pipe || 4941 crtc_state->uapi.color_mgmt_changed) { 4942 ret = intel_color_check(crtc_state); 4943 if (ret) 4944 return ret; 4945 } 4946 4947 ret = intel_compute_pipe_wm(state, crtc); 4948 if (ret) { 4949 drm_dbg_kms(&dev_priv->drm, 4950 "Target pipe watermarks are invalid\n"); 4951 return ret; 4952 } 4953 4954 /* 4955 * Calculate 'intermediate' watermarks that satisfy both the 4956 * old state and the new state. We can program these 4957 * immediately. 4958 */ 4959 ret = intel_compute_intermediate_wm(state, crtc); 4960 if (ret) { 4961 drm_dbg_kms(&dev_priv->drm, 4962 "No valid intermediate pipe watermarks are possible\n"); 4963 return ret; 4964 } 4965 4966 if (DISPLAY_VER(dev_priv) >= 9) { 4967 if (mode_changed || crtc_state->update_pipe) { 4968 ret = skl_update_scaler_crtc(crtc_state); 4969 if (ret) 4970 return ret; 4971 } 4972 4973 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state); 4974 if (ret) 4975 return ret; 4976 } 4977 4978 if (HAS_IPS(dev_priv)) { 4979 ret = hsw_ips_compute_config(state, crtc); 4980 if (ret) 4981 return ret; 4982 } 4983 4984 if (DISPLAY_VER(dev_priv) >= 9 || 4985 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 4986 ret = hsw_compute_linetime_wm(state, crtc); 4987 if (ret) 4988 return ret; 4989 4990 } 4991 4992 ret = intel_psr2_sel_fetch_update(state, crtc); 4993 if (ret) 4994 return ret; 4995 4996 return 0; 4997 } 4998 4999 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 5000 { 5001 struct intel_connector *connector; 5002 struct drm_connector_list_iter conn_iter; 5003 5004 drm_connector_list_iter_begin(dev, &conn_iter); 5005 for_each_intel_connector_iter(connector, &conn_iter) { 5006 struct drm_connector_state *conn_state = connector->base.state; 5007 struct intel_encoder *encoder = 5008 to_intel_encoder(connector->base.encoder); 5009 5010 if (conn_state->crtc) 5011 drm_connector_put(&connector->base); 5012 5013 if (encoder) { 5014 struct intel_crtc *crtc = 5015 to_intel_crtc(encoder->base.crtc); 5016 const struct intel_crtc_state *crtc_state = 5017 to_intel_crtc_state(crtc->base.state); 5018 5019 conn_state->best_encoder = &encoder->base; 5020 conn_state->crtc = &crtc->base; 5021 conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3; 5022 5023 drm_connector_get(&connector->base); 5024 } else { 5025 conn_state->best_encoder = NULL; 5026 conn_state->crtc = NULL; 5027 } 5028 } 5029 drm_connector_list_iter_end(&conn_iter); 5030 } 5031 5032 static int 5033 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 5034 struct intel_crtc_state *pipe_config) 5035 { 5036 struct drm_connector *connector = conn_state->connector; 5037 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); 5038 const struct drm_display_info *info = &connector->display_info; 5039 int bpp; 5040 5041 switch (conn_state->max_bpc) { 5042 case 6 ... 7: 5043 bpp = 6 * 3; 5044 break; 5045 case 8 ... 9: 5046 bpp = 8 * 3; 5047 break; 5048 case 10 ... 11: 5049 bpp = 10 * 3; 5050 break; 5051 case 12 ... 16: 5052 bpp = 12 * 3; 5053 break; 5054 default: 5055 MISSING_CASE(conn_state->max_bpc); 5056 return -EINVAL; 5057 } 5058 5059 if (bpp < pipe_config->pipe_bpp) { 5060 drm_dbg_kms(&i915->drm, 5061 "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of " 5062 "EDID bpp %d, requested bpp %d, max platform bpp %d\n", 5063 connector->base.id, connector->name, 5064 bpp, 3 * info->bpc, 5065 3 * conn_state->max_requested_bpc, 5066 pipe_config->pipe_bpp); 5067 5068 pipe_config->pipe_bpp = bpp; 5069 } 5070 5071 return 0; 5072 } 5073 5074 static int 5075 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 5076 struct intel_crtc_state *pipe_config) 5077 { 5078 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5079 struct drm_atomic_state *state = pipe_config->uapi.state; 5080 struct drm_connector *connector; 5081 struct drm_connector_state *connector_state; 5082 int bpp, i; 5083 5084 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 5085 IS_CHERRYVIEW(dev_priv))) 5086 bpp = 10*3; 5087 else if (DISPLAY_VER(dev_priv) >= 5) 5088 bpp = 12*3; 5089 else 5090 bpp = 8*3; 5091 5092 pipe_config->pipe_bpp = bpp; 5093 5094 /* Clamp display bpp to connector max bpp */ 5095 for_each_new_connector_in_state(state, connector, connector_state, i) { 5096 int ret; 5097 5098 if (connector_state->crtc != &crtc->base) 5099 continue; 5100 5101 ret = compute_sink_pipe_bpp(connector_state, pipe_config); 5102 if (ret) 5103 return ret; 5104 } 5105 5106 return 0; 5107 } 5108 5109 static void intel_dump_crtc_timings(struct drm_i915_private *i915, 5110 const struct drm_display_mode *mode) 5111 { 5112 drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, " 5113 "type: 0x%x flags: 0x%x\n", 5114 mode->crtc_clock, 5115 mode->crtc_hdisplay, mode->crtc_hsync_start, 5116 mode->crtc_hsync_end, mode->crtc_htotal, 5117 mode->crtc_vdisplay, mode->crtc_vsync_start, 5118 mode->crtc_vsync_end, mode->crtc_vtotal, 5119 mode->type, mode->flags); 5120 } 5121 5122 static void 5123 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config, 5124 const char *id, unsigned int lane_count, 5125 const struct intel_link_m_n *m_n) 5126 { 5127 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); 5128 5129 drm_dbg_kms(&i915->drm, 5130 "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n", 5131 id, lane_count, 5132 m_n->data_m, m_n->data_n, 5133 m_n->link_m, m_n->link_n, m_n->tu); 5134 } 5135 5136 static void 5137 intel_dump_infoframe(struct drm_i915_private *dev_priv, 5138 const union hdmi_infoframe *frame) 5139 { 5140 if (!drm_debug_enabled(DRM_UT_KMS)) 5141 return; 5142 5143 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame); 5144 } 5145 5146 static void 5147 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv, 5148 const struct drm_dp_vsc_sdp *vsc) 5149 { 5150 if (!drm_debug_enabled(DRM_UT_KMS)) 5151 return; 5152 5153 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc); 5154 } 5155 5156 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x 5157 5158 static const char * const output_type_str[] = { 5159 OUTPUT_TYPE(UNUSED), 5160 OUTPUT_TYPE(ANALOG), 5161 OUTPUT_TYPE(DVO), 5162 OUTPUT_TYPE(SDVO), 5163 OUTPUT_TYPE(LVDS), 5164 OUTPUT_TYPE(TVOUT), 5165 OUTPUT_TYPE(HDMI), 5166 OUTPUT_TYPE(DP), 5167 OUTPUT_TYPE(EDP), 5168 OUTPUT_TYPE(DSI), 5169 OUTPUT_TYPE(DDI), 5170 OUTPUT_TYPE(DP_MST), 5171 }; 5172 5173 #undef OUTPUT_TYPE 5174 5175 static void snprintf_output_types(char *buf, size_t len, 5176 unsigned int output_types) 5177 { 5178 char *str = buf; 5179 int i; 5180 5181 str[0] = '\0'; 5182 5183 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) { 5184 int r; 5185 5186 if ((output_types & BIT(i)) == 0) 5187 continue; 5188 5189 r = snprintf(str, len, "%s%s", 5190 str != buf ? "," : "", output_type_str[i]); 5191 if (r >= len) 5192 break; 5193 str += r; 5194 len -= r; 5195 5196 output_types &= ~BIT(i); 5197 } 5198 5199 WARN_ON_ONCE(output_types != 0); 5200 } 5201 5202 static const char * const output_format_str[] = { 5203 [INTEL_OUTPUT_FORMAT_RGB] = "RGB", 5204 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0", 5205 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4", 5206 }; 5207 5208 static const char *output_formats(enum intel_output_format format) 5209 { 5210 if (format >= ARRAY_SIZE(output_format_str)) 5211 return "invalid"; 5212 return output_format_str[format]; 5213 } 5214 5215 static void intel_dump_plane_state(const struct intel_plane_state *plane_state) 5216 { 5217 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 5218 struct drm_i915_private *i915 = to_i915(plane->base.dev); 5219 const struct drm_framebuffer *fb = plane_state->hw.fb; 5220 5221 if (!fb) { 5222 drm_dbg_kms(&i915->drm, 5223 "[PLANE:%d:%s] fb: [NOFB], visible: %s\n", 5224 plane->base.base.id, plane->base.name, 5225 yesno(plane_state->uapi.visible)); 5226 return; 5227 } 5228 5229 drm_dbg_kms(&i915->drm, 5230 "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n", 5231 plane->base.base.id, plane->base.name, 5232 fb->base.id, fb->width, fb->height, &fb->format->format, 5233 fb->modifier, yesno(plane_state->uapi.visible)); 5234 drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n", 5235 plane_state->hw.rotation, plane_state->scaler_id); 5236 if (plane_state->uapi.visible) 5237 drm_dbg_kms(&i915->drm, 5238 "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n", 5239 DRM_RECT_FP_ARG(&plane_state->uapi.src), 5240 DRM_RECT_ARG(&plane_state->uapi.dst)); 5241 } 5242 5243 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, 5244 struct intel_atomic_state *state, 5245 const char *context) 5246 { 5247 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 5248 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5249 const struct intel_plane_state *plane_state; 5250 struct intel_plane *plane; 5251 char buf[64]; 5252 int i; 5253 5254 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n", 5255 crtc->base.base.id, crtc->base.name, 5256 yesno(pipe_config->hw.enable), context); 5257 5258 if (!pipe_config->hw.enable) 5259 goto dump_planes; 5260 5261 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types); 5262 drm_dbg_kms(&dev_priv->drm, 5263 "active: %s, output_types: %s (0x%x), output format: %s\n", 5264 yesno(pipe_config->hw.active), 5265 buf, pipe_config->output_types, 5266 output_formats(pipe_config->output_format)); 5267 5268 drm_dbg_kms(&dev_priv->drm, 5269 "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", 5270 transcoder_name(pipe_config->cpu_transcoder), 5271 pipe_config->pipe_bpp, pipe_config->dither); 5272 5273 drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n", 5274 transcoder_name(pipe_config->mst_master_transcoder)); 5275 5276 drm_dbg_kms(&dev_priv->drm, 5277 "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n", 5278 transcoder_name(pipe_config->master_transcoder), 5279 pipe_config->sync_mode_slaves_mask); 5280 5281 drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s, pipes: 0x%x\n", 5282 intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" : 5283 intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no", 5284 pipe_config->bigjoiner_pipes); 5285 5286 drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n", 5287 enableddisabled(pipe_config->splitter.enable), 5288 pipe_config->splitter.link_count, 5289 pipe_config->splitter.pixel_overlap); 5290 5291 if (pipe_config->has_pch_encoder) 5292 intel_dump_m_n_config(pipe_config, "fdi", 5293 pipe_config->fdi_lanes, 5294 &pipe_config->fdi_m_n); 5295 5296 if (intel_crtc_has_dp_encoder(pipe_config)) { 5297 intel_dump_m_n_config(pipe_config, "dp m_n", 5298 pipe_config->lane_count, 5299 &pipe_config->dp_m_n); 5300 intel_dump_m_n_config(pipe_config, "dp m2_n2", 5301 pipe_config->lane_count, 5302 &pipe_config->dp_m2_n2); 5303 } 5304 5305 drm_dbg_kms(&dev_priv->drm, 5306 "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n", 5307 pipe_config->has_audio, pipe_config->has_infoframe, 5308 pipe_config->infoframes.enable); 5309 5310 if (pipe_config->infoframes.enable & 5311 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) 5312 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n", 5313 pipe_config->infoframes.gcp); 5314 if (pipe_config->infoframes.enable & 5315 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) 5316 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi); 5317 if (pipe_config->infoframes.enable & 5318 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD)) 5319 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd); 5320 if (pipe_config->infoframes.enable & 5321 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR)) 5322 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi); 5323 if (pipe_config->infoframes.enable & 5324 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM)) 5325 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm); 5326 if (pipe_config->infoframes.enable & 5327 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA)) 5328 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm); 5329 if (pipe_config->infoframes.enable & 5330 intel_hdmi_infoframe_enable(DP_SDP_VSC)) 5331 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc); 5332 5333 drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n", 5334 yesno(pipe_config->vrr.enable), 5335 pipe_config->vrr.vmin, pipe_config->vrr.vmax, 5336 pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband, 5337 pipe_config->vrr.flipline, 5338 intel_vrr_vmin_vblank_start(pipe_config), 5339 intel_vrr_vmax_vblank_start(pipe_config)); 5340 5341 drm_dbg_kms(&dev_priv->drm, "requested mode:\n"); 5342 drm_mode_debug_printmodeline(&pipe_config->hw.mode); 5343 drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n"); 5344 drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode); 5345 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode); 5346 drm_dbg_kms(&dev_priv->drm, "pipe mode:\n"); 5347 drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode); 5348 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode); 5349 drm_dbg_kms(&dev_priv->drm, 5350 "port clock: %d, pipe src size: %dx%d, pixel rate %d\n", 5351 pipe_config->port_clock, 5352 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 5353 pipe_config->pixel_rate); 5354 5355 drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n", 5356 pipe_config->linetime, pipe_config->ips_linetime); 5357 5358 if (DISPLAY_VER(dev_priv) >= 9) 5359 drm_dbg_kms(&dev_priv->drm, 5360 "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 5361 crtc->num_scalers, 5362 pipe_config->scaler_state.scaler_users, 5363 pipe_config->scaler_state.scaler_id); 5364 5365 if (HAS_GMCH(dev_priv)) 5366 drm_dbg_kms(&dev_priv->drm, 5367 "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 5368 pipe_config->gmch_pfit.control, 5369 pipe_config->gmch_pfit.pgm_ratios, 5370 pipe_config->gmch_pfit.lvds_border_bits); 5371 else 5372 drm_dbg_kms(&dev_priv->drm, 5373 "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n", 5374 DRM_RECT_ARG(&pipe_config->pch_pfit.dst), 5375 enableddisabled(pipe_config->pch_pfit.enabled), 5376 yesno(pipe_config->pch_pfit.force_thru)); 5377 5378 drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n", 5379 pipe_config->ips_enabled, pipe_config->double_wide); 5380 5381 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state); 5382 5383 if (IS_CHERRYVIEW(dev_priv)) 5384 drm_dbg_kms(&dev_priv->drm, 5385 "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", 5386 pipe_config->cgm_mode, pipe_config->gamma_mode, 5387 pipe_config->gamma_enable, pipe_config->csc_enable); 5388 else 5389 drm_dbg_kms(&dev_priv->drm, 5390 "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", 5391 pipe_config->csc_mode, pipe_config->gamma_mode, 5392 pipe_config->gamma_enable, pipe_config->csc_enable); 5393 5394 drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n", 5395 pipe_config->hw.degamma_lut ? 5396 drm_color_lut_size(pipe_config->hw.degamma_lut) : 0, 5397 pipe_config->hw.gamma_lut ? 5398 drm_color_lut_size(pipe_config->hw.gamma_lut) : 0); 5399 5400 dump_planes: 5401 if (!state) 5402 return; 5403 5404 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 5405 if (plane->pipe == crtc->pipe) 5406 intel_dump_plane_state(plane_state); 5407 } 5408 } 5409 5410 static bool check_digital_port_conflicts(struct intel_atomic_state *state) 5411 { 5412 struct drm_device *dev = state->base.dev; 5413 struct drm_connector *connector; 5414 struct drm_connector_list_iter conn_iter; 5415 unsigned int used_ports = 0; 5416 unsigned int used_mst_ports = 0; 5417 bool ret = true; 5418 5419 /* 5420 * We're going to peek into connector->state, 5421 * hence connection_mutex must be held. 5422 */ 5423 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex); 5424 5425 /* 5426 * Walk the connector list instead of the encoder 5427 * list to detect the problem on ddi platforms 5428 * where there's just one encoder per digital port. 5429 */ 5430 drm_connector_list_iter_begin(dev, &conn_iter); 5431 drm_for_each_connector_iter(connector, &conn_iter) { 5432 struct drm_connector_state *connector_state; 5433 struct intel_encoder *encoder; 5434 5435 connector_state = 5436 drm_atomic_get_new_connector_state(&state->base, 5437 connector); 5438 if (!connector_state) 5439 connector_state = connector->state; 5440 5441 if (!connector_state->best_encoder) 5442 continue; 5443 5444 encoder = to_intel_encoder(connector_state->best_encoder); 5445 5446 drm_WARN_ON(dev, !connector_state->crtc); 5447 5448 switch (encoder->type) { 5449 case INTEL_OUTPUT_DDI: 5450 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev)))) 5451 break; 5452 fallthrough; 5453 case INTEL_OUTPUT_DP: 5454 case INTEL_OUTPUT_HDMI: 5455 case INTEL_OUTPUT_EDP: 5456 /* the same port mustn't appear more than once */ 5457 if (used_ports & BIT(encoder->port)) 5458 ret = false; 5459 5460 used_ports |= BIT(encoder->port); 5461 break; 5462 case INTEL_OUTPUT_DP_MST: 5463 used_mst_ports |= 5464 1 << encoder->port; 5465 break; 5466 default: 5467 break; 5468 } 5469 } 5470 drm_connector_list_iter_end(&conn_iter); 5471 5472 /* can't mix MST and SST/HDMI on the same port */ 5473 if (used_ports & used_mst_ports) 5474 return false; 5475 5476 return ret; 5477 } 5478 5479 static void 5480 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state, 5481 struct intel_crtc *crtc) 5482 { 5483 struct intel_crtc_state *crtc_state = 5484 intel_atomic_get_new_crtc_state(state, crtc); 5485 5486 WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state)); 5487 5488 drm_property_replace_blob(&crtc_state->hw.degamma_lut, 5489 crtc_state->uapi.degamma_lut); 5490 drm_property_replace_blob(&crtc_state->hw.gamma_lut, 5491 crtc_state->uapi.gamma_lut); 5492 drm_property_replace_blob(&crtc_state->hw.ctm, 5493 crtc_state->uapi.ctm); 5494 } 5495 5496 static void 5497 intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state, 5498 struct intel_crtc *crtc) 5499 { 5500 struct intel_crtc_state *crtc_state = 5501 intel_atomic_get_new_crtc_state(state, crtc); 5502 5503 WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state)); 5504 5505 crtc_state->hw.enable = crtc_state->uapi.enable; 5506 crtc_state->hw.active = crtc_state->uapi.active; 5507 crtc_state->hw.mode = crtc_state->uapi.mode; 5508 crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode; 5509 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter; 5510 5511 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 5512 } 5513 5514 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state) 5515 { 5516 if (intel_crtc_is_bigjoiner_slave(crtc_state)) 5517 return; 5518 5519 crtc_state->uapi.enable = crtc_state->hw.enable; 5520 crtc_state->uapi.active = crtc_state->hw.active; 5521 drm_WARN_ON(crtc_state->uapi.crtc->dev, 5522 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0); 5523 5524 crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode; 5525 crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter; 5526 5527 drm_property_replace_blob(&crtc_state->uapi.degamma_lut, 5528 crtc_state->hw.degamma_lut); 5529 drm_property_replace_blob(&crtc_state->uapi.gamma_lut, 5530 crtc_state->hw.gamma_lut); 5531 drm_property_replace_blob(&crtc_state->uapi.ctm, 5532 crtc_state->hw.ctm); 5533 } 5534 5535 static void 5536 copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state, 5537 struct intel_crtc *slave_crtc) 5538 { 5539 struct intel_crtc_state *slave_crtc_state = 5540 intel_atomic_get_new_crtc_state(state, slave_crtc); 5541 struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state); 5542 const struct intel_crtc_state *master_crtc_state = 5543 intel_atomic_get_new_crtc_state(state, master_crtc); 5544 5545 drm_property_replace_blob(&slave_crtc_state->hw.degamma_lut, 5546 master_crtc_state->hw.degamma_lut); 5547 drm_property_replace_blob(&slave_crtc_state->hw.gamma_lut, 5548 master_crtc_state->hw.gamma_lut); 5549 drm_property_replace_blob(&slave_crtc_state->hw.ctm, 5550 master_crtc_state->hw.ctm); 5551 5552 slave_crtc_state->uapi.color_mgmt_changed = master_crtc_state->uapi.color_mgmt_changed; 5553 } 5554 5555 static int 5556 copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state, 5557 struct intel_crtc *slave_crtc) 5558 { 5559 struct intel_crtc_state *slave_crtc_state = 5560 intel_atomic_get_new_crtc_state(state, slave_crtc); 5561 struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state); 5562 const struct intel_crtc_state *master_crtc_state = 5563 intel_atomic_get_new_crtc_state(state, master_crtc); 5564 struct intel_crtc_state *saved_state; 5565 5566 saved_state = kmemdup(master_crtc_state, sizeof(*saved_state), GFP_KERNEL); 5567 if (!saved_state) 5568 return -ENOMEM; 5569 5570 /* preserve some things from the slave's original crtc state */ 5571 saved_state->uapi = slave_crtc_state->uapi; 5572 saved_state->scaler_state = slave_crtc_state->scaler_state; 5573 saved_state->shared_dpll = slave_crtc_state->shared_dpll; 5574 saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state; 5575 saved_state->crc_enabled = slave_crtc_state->crc_enabled; 5576 5577 intel_crtc_free_hw_state(slave_crtc_state); 5578 memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state)); 5579 kfree(saved_state); 5580 5581 /* Re-init hw state */ 5582 memset(&slave_crtc_state->hw, 0, sizeof(slave_crtc_state->hw)); 5583 slave_crtc_state->hw.enable = master_crtc_state->hw.enable; 5584 slave_crtc_state->hw.active = master_crtc_state->hw.active; 5585 slave_crtc_state->hw.mode = master_crtc_state->hw.mode; 5586 slave_crtc_state->hw.pipe_mode = master_crtc_state->hw.pipe_mode; 5587 slave_crtc_state->hw.adjusted_mode = master_crtc_state->hw.adjusted_mode; 5588 slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter; 5589 5590 copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc); 5591 5592 /* Some fixups */ 5593 slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed; 5594 slave_crtc_state->uapi.connectors_changed = master_crtc_state->uapi.connectors_changed; 5595 slave_crtc_state->uapi.active_changed = master_crtc_state->uapi.active_changed; 5596 slave_crtc_state->cpu_transcoder = master_crtc_state->cpu_transcoder; 5597 slave_crtc_state->has_audio = master_crtc_state->has_audio; 5598 5599 return 0; 5600 } 5601 5602 static int 5603 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state, 5604 struct intel_crtc *crtc) 5605 { 5606 struct intel_crtc_state *crtc_state = 5607 intel_atomic_get_new_crtc_state(state, crtc); 5608 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5609 struct intel_crtc_state *saved_state; 5610 5611 saved_state = intel_crtc_state_alloc(crtc); 5612 if (!saved_state) 5613 return -ENOMEM; 5614 5615 /* free the old crtc_state->hw members */ 5616 intel_crtc_free_hw_state(crtc_state); 5617 5618 /* FIXME: before the switch to atomic started, a new pipe_config was 5619 * kzalloc'd. Code that depends on any field being zero should be 5620 * fixed, so that the crtc_state can be safely duplicated. For now, 5621 * only fields that are know to not cause problems are preserved. */ 5622 5623 saved_state->uapi = crtc_state->uapi; 5624 saved_state->scaler_state = crtc_state->scaler_state; 5625 saved_state->shared_dpll = crtc_state->shared_dpll; 5626 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 5627 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 5628 sizeof(saved_state->icl_port_dplls)); 5629 saved_state->crc_enabled = crtc_state->crc_enabled; 5630 if (IS_G4X(dev_priv) || 5631 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5632 saved_state->wm = crtc_state->wm; 5633 5634 memcpy(crtc_state, saved_state, sizeof(*crtc_state)); 5635 kfree(saved_state); 5636 5637 intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc); 5638 5639 return 0; 5640 } 5641 5642 static int 5643 intel_modeset_pipe_config(struct intel_atomic_state *state, 5644 struct intel_crtc_state *pipe_config) 5645 { 5646 struct drm_crtc *crtc = pipe_config->uapi.crtc; 5647 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); 5648 struct drm_connector *connector; 5649 struct drm_connector_state *connector_state; 5650 int base_bpp, ret, i; 5651 bool retry = true; 5652 5653 pipe_config->cpu_transcoder = 5654 (enum transcoder) to_intel_crtc(crtc)->pipe; 5655 5656 /* 5657 * Sanitize sync polarity flags based on requested ones. If neither 5658 * positive or negative polarity is requested, treat this as meaning 5659 * negative polarity. 5660 */ 5661 if (!(pipe_config->hw.adjusted_mode.flags & 5662 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 5663 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 5664 5665 if (!(pipe_config->hw.adjusted_mode.flags & 5666 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 5667 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 5668 5669 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 5670 pipe_config); 5671 if (ret) 5672 return ret; 5673 5674 base_bpp = pipe_config->pipe_bpp; 5675 5676 /* 5677 * Determine the real pipe dimensions. Note that stereo modes can 5678 * increase the actual pipe size due to the frame doubling and 5679 * insertion of additional space for blanks between the frame. This 5680 * is stored in the crtc timings. We use the requested mode to do this 5681 * computation to clearly distinguish it from the adjusted mode, which 5682 * can be changed by the connectors in the below retry loop. 5683 */ 5684 drm_mode_get_hv_timing(&pipe_config->hw.mode, 5685 &pipe_config->pipe_src_w, 5686 &pipe_config->pipe_src_h); 5687 5688 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 5689 struct intel_encoder *encoder = 5690 to_intel_encoder(connector_state->best_encoder); 5691 5692 if (connector_state->crtc != crtc) 5693 continue; 5694 5695 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { 5696 drm_dbg_kms(&i915->drm, 5697 "rejecting invalid cloning configuration\n"); 5698 return -EINVAL; 5699 } 5700 5701 /* 5702 * Determine output_types before calling the .compute_config() 5703 * hooks so that the hooks can use this information safely. 5704 */ 5705 if (encoder->compute_output_type) 5706 pipe_config->output_types |= 5707 BIT(encoder->compute_output_type(encoder, pipe_config, 5708 connector_state)); 5709 else 5710 pipe_config->output_types |= BIT(encoder->type); 5711 } 5712 5713 encoder_retry: 5714 /* Ensure the port clock defaults are reset when retrying. */ 5715 pipe_config->port_clock = 0; 5716 pipe_config->pixel_multiplier = 1; 5717 5718 /* Fill in default crtc timings, allow encoders to overwrite them. */ 5719 drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode, 5720 CRTC_STEREO_DOUBLE); 5721 5722 /* Pass our mode to the connectors and the CRTC to give them a chance to 5723 * adjust it according to limitations or connector properties, and also 5724 * a chance to reject the mode entirely. 5725 */ 5726 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 5727 struct intel_encoder *encoder = 5728 to_intel_encoder(connector_state->best_encoder); 5729 5730 if (connector_state->crtc != crtc) 5731 continue; 5732 5733 ret = encoder->compute_config(encoder, pipe_config, 5734 connector_state); 5735 if (ret == -EDEADLK) 5736 return ret; 5737 if (ret < 0) { 5738 drm_dbg_kms(&i915->drm, "Encoder config failure: %d\n", ret); 5739 return ret; 5740 } 5741 } 5742 5743 /* Set default port clock if not overwritten by the encoder. Needs to be 5744 * done afterwards in case the encoder adjusts the mode. */ 5745 if (!pipe_config->port_clock) 5746 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock 5747 * pipe_config->pixel_multiplier; 5748 5749 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 5750 if (ret == -EDEADLK) 5751 return ret; 5752 if (ret == -EAGAIN) { 5753 if (drm_WARN(&i915->drm, !retry, 5754 "loop in pipe configuration computation\n")) 5755 return -EINVAL; 5756 5757 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n"); 5758 retry = false; 5759 goto encoder_retry; 5760 } 5761 if (ret < 0) { 5762 drm_dbg_kms(&i915->drm, "CRTC config failure: %d\n", ret); 5763 return ret; 5764 } 5765 5766 /* Dithering seems to not pass-through bits correctly when it should, so 5767 * only enable it on 6bpc panels and when its not a compliance 5768 * test requesting 6bpc video pattern. 5769 */ 5770 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) && 5771 !pipe_config->dither_force_disable; 5772 drm_dbg_kms(&i915->drm, 5773 "hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 5774 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 5775 5776 return 0; 5777 } 5778 5779 static int 5780 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state) 5781 { 5782 struct intel_atomic_state *state = 5783 to_intel_atomic_state(crtc_state->uapi.state); 5784 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5785 struct drm_connector_state *conn_state; 5786 struct drm_connector *connector; 5787 int i; 5788 5789 for_each_new_connector_in_state(&state->base, connector, 5790 conn_state, i) { 5791 struct intel_encoder *encoder = 5792 to_intel_encoder(conn_state->best_encoder); 5793 int ret; 5794 5795 if (conn_state->crtc != &crtc->base || 5796 !encoder->compute_config_late) 5797 continue; 5798 5799 ret = encoder->compute_config_late(encoder, crtc_state, 5800 conn_state); 5801 if (ret) 5802 return ret; 5803 } 5804 5805 return 0; 5806 } 5807 5808 bool intel_fuzzy_clock_check(int clock1, int clock2) 5809 { 5810 int diff; 5811 5812 if (clock1 == clock2) 5813 return true; 5814 5815 if (!clock1 || !clock2) 5816 return false; 5817 5818 diff = abs(clock1 - clock2); 5819 5820 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 5821 return true; 5822 5823 return false; 5824 } 5825 5826 static bool 5827 intel_compare_m_n(unsigned int m, unsigned int n, 5828 unsigned int m2, unsigned int n2, 5829 bool exact) 5830 { 5831 if (m == m2 && n == n2) 5832 return true; 5833 5834 if (exact || !m || !n || !m2 || !n2) 5835 return false; 5836 5837 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 5838 5839 if (n > n2) { 5840 while (n > n2) { 5841 m2 <<= 1; 5842 n2 <<= 1; 5843 } 5844 } else if (n < n2) { 5845 while (n < n2) { 5846 m <<= 1; 5847 n <<= 1; 5848 } 5849 } 5850 5851 if (n != n2) 5852 return false; 5853 5854 return intel_fuzzy_clock_check(m, m2); 5855 } 5856 5857 static bool 5858 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 5859 const struct intel_link_m_n *m2_n2, 5860 bool exact) 5861 { 5862 return m_n->tu == m2_n2->tu && 5863 intel_compare_m_n(m_n->data_m, m_n->data_n, 5864 m2_n2->data_m, m2_n2->data_n, exact) && 5865 intel_compare_m_n(m_n->link_m, m_n->link_n, 5866 m2_n2->link_m, m2_n2->link_n, exact); 5867 } 5868 5869 static bool 5870 intel_compare_infoframe(const union hdmi_infoframe *a, 5871 const union hdmi_infoframe *b) 5872 { 5873 return memcmp(a, b, sizeof(*a)) == 0; 5874 } 5875 5876 static bool 5877 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a, 5878 const struct drm_dp_vsc_sdp *b) 5879 { 5880 return memcmp(a, b, sizeof(*a)) == 0; 5881 } 5882 5883 static void 5884 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, 5885 bool fastset, const char *name, 5886 const union hdmi_infoframe *a, 5887 const union hdmi_infoframe *b) 5888 { 5889 if (fastset) { 5890 if (!drm_debug_enabled(DRM_UT_KMS)) 5891 return; 5892 5893 drm_dbg_kms(&dev_priv->drm, 5894 "fastset mismatch in %s infoframe\n", name); 5895 drm_dbg_kms(&dev_priv->drm, "expected:\n"); 5896 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); 5897 drm_dbg_kms(&dev_priv->drm, "found:\n"); 5898 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); 5899 } else { 5900 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name); 5901 drm_err(&dev_priv->drm, "expected:\n"); 5902 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); 5903 drm_err(&dev_priv->drm, "found:\n"); 5904 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); 5905 } 5906 } 5907 5908 static void 5909 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv, 5910 bool fastset, const char *name, 5911 const struct drm_dp_vsc_sdp *a, 5912 const struct drm_dp_vsc_sdp *b) 5913 { 5914 if (fastset) { 5915 if (!drm_debug_enabled(DRM_UT_KMS)) 5916 return; 5917 5918 drm_dbg_kms(&dev_priv->drm, 5919 "fastset mismatch in %s dp sdp\n", name); 5920 drm_dbg_kms(&dev_priv->drm, "expected:\n"); 5921 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a); 5922 drm_dbg_kms(&dev_priv->drm, "found:\n"); 5923 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b); 5924 } else { 5925 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name); 5926 drm_err(&dev_priv->drm, "expected:\n"); 5927 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a); 5928 drm_err(&dev_priv->drm, "found:\n"); 5929 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b); 5930 } 5931 } 5932 5933 static void __printf(4, 5) 5934 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, 5935 const char *name, const char *format, ...) 5936 { 5937 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 5938 struct va_format vaf; 5939 va_list args; 5940 5941 va_start(args, format); 5942 vaf.fmt = format; 5943 vaf.va = &args; 5944 5945 if (fastset) 5946 drm_dbg_kms(&i915->drm, 5947 "[CRTC:%d:%s] fastset mismatch in %s %pV\n", 5948 crtc->base.base.id, crtc->base.name, name, &vaf); 5949 else 5950 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n", 5951 crtc->base.base.id, crtc->base.name, name, &vaf); 5952 5953 va_end(args); 5954 } 5955 5956 static bool fastboot_enabled(struct drm_i915_private *dev_priv) 5957 { 5958 if (dev_priv->params.fastboot != -1) 5959 return dev_priv->params.fastboot; 5960 5961 /* Enable fastboot by default on Skylake and newer */ 5962 if (DISPLAY_VER(dev_priv) >= 9) 5963 return true; 5964 5965 /* Enable fastboot by default on VLV and CHV */ 5966 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5967 return true; 5968 5969 /* Disabled by default on all others */ 5970 return false; 5971 } 5972 5973 static bool 5974 intel_pipe_config_compare(const struct intel_crtc_state *current_config, 5975 const struct intel_crtc_state *pipe_config, 5976 bool fastset) 5977 { 5978 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev); 5979 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 5980 bool ret = true; 5981 u32 bp_gamma = 0; 5982 bool fixup_inherited = fastset && 5983 current_config->inherited && !pipe_config->inherited; 5984 5985 if (fixup_inherited && !fastboot_enabled(dev_priv)) { 5986 drm_dbg_kms(&dev_priv->drm, 5987 "initial modeset and fastboot not set\n"); 5988 ret = false; 5989 } 5990 5991 #define PIPE_CONF_CHECK_X(name) do { \ 5992 if (current_config->name != pipe_config->name) { \ 5993 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5994 "(expected 0x%08x, found 0x%08x)", \ 5995 current_config->name, \ 5996 pipe_config->name); \ 5997 ret = false; \ 5998 } \ 5999 } while (0) 6000 6001 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \ 6002 if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \ 6003 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 6004 "(expected 0x%08x, found 0x%08x)", \ 6005 current_config->name & (mask), \ 6006 pipe_config->name & (mask)); \ 6007 ret = false; \ 6008 } \ 6009 } while (0) 6010 6011 #define PIPE_CONF_CHECK_I(name) do { \ 6012 if (current_config->name != pipe_config->name) { \ 6013 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 6014 "(expected %i, found %i)", \ 6015 current_config->name, \ 6016 pipe_config->name); \ 6017 ret = false; \ 6018 } \ 6019 } while (0) 6020 6021 #define PIPE_CONF_CHECK_BOOL(name) do { \ 6022 if (current_config->name != pipe_config->name) { \ 6023 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 6024 "(expected %s, found %s)", \ 6025 yesno(current_config->name), \ 6026 yesno(pipe_config->name)); \ 6027 ret = false; \ 6028 } \ 6029 } while (0) 6030 6031 /* 6032 * Checks state where we only read out the enabling, but not the entire 6033 * state itself (like full infoframes or ELD for audio). These states 6034 * require a full modeset on bootup to fix up. 6035 */ 6036 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \ 6037 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \ 6038 PIPE_CONF_CHECK_BOOL(name); \ 6039 } else { \ 6040 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 6041 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \ 6042 yesno(current_config->name), \ 6043 yesno(pipe_config->name)); \ 6044 ret = false; \ 6045 } \ 6046 } while (0) 6047 6048 #define PIPE_CONF_CHECK_P(name) do { \ 6049 if (current_config->name != pipe_config->name) { \ 6050 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 6051 "(expected %p, found %p)", \ 6052 current_config->name, \ 6053 pipe_config->name); \ 6054 ret = false; \ 6055 } \ 6056 } while (0) 6057 6058 #define PIPE_CONF_CHECK_M_N(name) do { \ 6059 if (!intel_compare_link_m_n(¤t_config->name, \ 6060 &pipe_config->name,\ 6061 !fastset)) { \ 6062 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 6063 "(expected tu %i data %i/%i link %i/%i, " \ 6064 "found tu %i, data %i/%i link %i/%i)", \ 6065 current_config->name.tu, \ 6066 current_config->name.data_m, \ 6067 current_config->name.data_n, \ 6068 current_config->name.link_m, \ 6069 current_config->name.link_n, \ 6070 pipe_config->name.tu, \ 6071 pipe_config->name.data_m, \ 6072 pipe_config->name.data_n, \ 6073 pipe_config->name.link_m, \ 6074 pipe_config->name.link_n); \ 6075 ret = false; \ 6076 } \ 6077 } while (0) 6078 6079 /* This is required for BDW+ where there is only one set of registers for 6080 * switching between high and low RR. 6081 * This macro can be used whenever a comparison has to be made between one 6082 * hw state and multiple sw state variables. 6083 */ 6084 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \ 6085 if (!intel_compare_link_m_n(¤t_config->name, \ 6086 &pipe_config->name, !fastset) && \ 6087 !intel_compare_link_m_n(¤t_config->alt_name, \ 6088 &pipe_config->name, !fastset)) { \ 6089 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 6090 "(expected tu %i data %i/%i link %i/%i, " \ 6091 "or tu %i data %i/%i link %i/%i, " \ 6092 "found tu %i, data %i/%i link %i/%i)", \ 6093 current_config->name.tu, \ 6094 current_config->name.data_m, \ 6095 current_config->name.data_n, \ 6096 current_config->name.link_m, \ 6097 current_config->name.link_n, \ 6098 current_config->alt_name.tu, \ 6099 current_config->alt_name.data_m, \ 6100 current_config->alt_name.data_n, \ 6101 current_config->alt_name.link_m, \ 6102 current_config->alt_name.link_n, \ 6103 pipe_config->name.tu, \ 6104 pipe_config->name.data_m, \ 6105 pipe_config->name.data_n, \ 6106 pipe_config->name.link_m, \ 6107 pipe_config->name.link_n); \ 6108 ret = false; \ 6109 } \ 6110 } while (0) 6111 6112 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 6113 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 6114 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 6115 "(%x) (expected %i, found %i)", \ 6116 (mask), \ 6117 current_config->name & (mask), \ 6118 pipe_config->name & (mask)); \ 6119 ret = false; \ 6120 } \ 6121 } while (0) 6122 6123 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \ 6124 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 6125 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 6126 "(expected %i, found %i)", \ 6127 current_config->name, \ 6128 pipe_config->name); \ 6129 ret = false; \ 6130 } \ 6131 } while (0) 6132 6133 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 6134 if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 6135 &pipe_config->infoframes.name)) { \ 6136 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \ 6137 ¤t_config->infoframes.name, \ 6138 &pipe_config->infoframes.name); \ 6139 ret = false; \ 6140 } \ 6141 } while (0) 6142 6143 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \ 6144 if (!current_config->has_psr && !pipe_config->has_psr && \ 6145 !intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \ 6146 &pipe_config->infoframes.name)) { \ 6147 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \ 6148 ¤t_config->infoframes.name, \ 6149 &pipe_config->infoframes.name); \ 6150 ret = false; \ 6151 } \ 6152 } while (0) 6153 6154 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \ 6155 if (current_config->name1 != pipe_config->name1) { \ 6156 pipe_config_mismatch(fastset, crtc, __stringify(name1), \ 6157 "(expected %i, found %i, won't compare lut values)", \ 6158 current_config->name1, \ 6159 pipe_config->name1); \ 6160 ret = false;\ 6161 } else { \ 6162 if (!intel_color_lut_equal(current_config->name2, \ 6163 pipe_config->name2, pipe_config->name1, \ 6164 bit_precision)) { \ 6165 pipe_config_mismatch(fastset, crtc, __stringify(name2), \ 6166 "hw_state doesn't match sw_state"); \ 6167 ret = false; \ 6168 } \ 6169 } \ 6170 } while (0) 6171 6172 #define PIPE_CONF_QUIRK(quirk) \ 6173 ((current_config->quirks | pipe_config->quirks) & (quirk)) 6174 6175 PIPE_CONF_CHECK_I(cpu_transcoder); 6176 6177 PIPE_CONF_CHECK_BOOL(has_pch_encoder); 6178 PIPE_CONF_CHECK_I(fdi_lanes); 6179 PIPE_CONF_CHECK_M_N(fdi_m_n); 6180 6181 PIPE_CONF_CHECK_I(lane_count); 6182 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 6183 6184 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) { 6185 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 6186 } else { 6187 PIPE_CONF_CHECK_M_N(dp_m_n); 6188 PIPE_CONF_CHECK_M_N(dp_m2_n2); 6189 } 6190 6191 PIPE_CONF_CHECK_X(output_types); 6192 6193 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay); 6194 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal); 6195 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start); 6196 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end); 6197 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start); 6198 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end); 6199 6200 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay); 6201 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal); 6202 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start); 6203 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end); 6204 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start); 6205 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end); 6206 6207 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay); 6208 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal); 6209 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start); 6210 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end); 6211 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start); 6212 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end); 6213 6214 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay); 6215 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal); 6216 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start); 6217 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end); 6218 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start); 6219 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end); 6220 6221 PIPE_CONF_CHECK_I(pixel_multiplier); 6222 6223 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 6224 DRM_MODE_FLAG_INTERLACE); 6225 6226 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 6227 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 6228 DRM_MODE_FLAG_PHSYNC); 6229 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 6230 DRM_MODE_FLAG_NHSYNC); 6231 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 6232 DRM_MODE_FLAG_PVSYNC); 6233 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 6234 DRM_MODE_FLAG_NVSYNC); 6235 } 6236 6237 PIPE_CONF_CHECK_I(output_format); 6238 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 6239 if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 6240 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 6241 PIPE_CONF_CHECK_BOOL(limited_color_range); 6242 6243 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 6244 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 6245 PIPE_CONF_CHECK_BOOL(has_infoframe); 6246 PIPE_CONF_CHECK_BOOL(fec_enable); 6247 6248 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); 6249 6250 PIPE_CONF_CHECK_X(gmch_pfit.control); 6251 /* pfit ratios are autocomputed by the hw on gen4+ */ 6252 if (DISPLAY_VER(dev_priv) < 4) 6253 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 6254 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 6255 6256 /* 6257 * Changing the EDP transcoder input mux 6258 * (A_ONOFF vs. A_ON) requires a full modeset. 6259 */ 6260 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 6261 6262 if (!fastset) { 6263 PIPE_CONF_CHECK_I(pipe_src_w); 6264 PIPE_CONF_CHECK_I(pipe_src_h); 6265 6266 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 6267 if (current_config->pch_pfit.enabled) { 6268 PIPE_CONF_CHECK_I(pch_pfit.dst.x1); 6269 PIPE_CONF_CHECK_I(pch_pfit.dst.y1); 6270 PIPE_CONF_CHECK_I(pch_pfit.dst.x2); 6271 PIPE_CONF_CHECK_I(pch_pfit.dst.y2); 6272 } 6273 6274 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 6275 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); 6276 6277 PIPE_CONF_CHECK_X(gamma_mode); 6278 if (IS_CHERRYVIEW(dev_priv)) 6279 PIPE_CONF_CHECK_X(cgm_mode); 6280 else 6281 PIPE_CONF_CHECK_X(csc_mode); 6282 PIPE_CONF_CHECK_BOOL(gamma_enable); 6283 PIPE_CONF_CHECK_BOOL(csc_enable); 6284 6285 PIPE_CONF_CHECK_I(linetime); 6286 PIPE_CONF_CHECK_I(ips_linetime); 6287 6288 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config); 6289 if (bp_gamma) 6290 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma); 6291 6292 if (current_config->active_planes) { 6293 PIPE_CONF_CHECK_BOOL(has_psr); 6294 PIPE_CONF_CHECK_BOOL(has_psr2); 6295 PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch); 6296 PIPE_CONF_CHECK_I(dc3co_exitline); 6297 } 6298 } 6299 6300 PIPE_CONF_CHECK_BOOL(double_wide); 6301 6302 if (dev_priv->dpll.mgr) { 6303 PIPE_CONF_CHECK_P(shared_dpll); 6304 6305 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 6306 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 6307 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 6308 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 6309 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 6310 PIPE_CONF_CHECK_X(dpll_hw_state.spll); 6311 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 6312 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 6313 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 6314 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0); 6315 PIPE_CONF_CHECK_X(dpll_hw_state.div0); 6316 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0); 6317 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4); 6318 PIPE_CONF_CHECK_X(dpll_hw_state.pll0); 6319 PIPE_CONF_CHECK_X(dpll_hw_state.pll1); 6320 PIPE_CONF_CHECK_X(dpll_hw_state.pll2); 6321 PIPE_CONF_CHECK_X(dpll_hw_state.pll3); 6322 PIPE_CONF_CHECK_X(dpll_hw_state.pll6); 6323 PIPE_CONF_CHECK_X(dpll_hw_state.pll8); 6324 PIPE_CONF_CHECK_X(dpll_hw_state.pll9); 6325 PIPE_CONF_CHECK_X(dpll_hw_state.pll10); 6326 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12); 6327 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl); 6328 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1); 6329 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl); 6330 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0); 6331 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1); 6332 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf); 6333 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock); 6334 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc); 6335 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias); 6336 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); 6337 } 6338 6339 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 6340 PIPE_CONF_CHECK_X(dsi_pll.div); 6341 6342 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) 6343 PIPE_CONF_CHECK_I(pipe_bpp); 6344 6345 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock); 6346 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); 6347 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 6348 6349 PIPE_CONF_CHECK_I(min_voltage_level); 6350 6351 if (current_config->has_psr || pipe_config->has_psr) 6352 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable, 6353 ~intel_hdmi_infoframe_enable(DP_SDP_VSC)); 6354 else 6355 PIPE_CONF_CHECK_X(infoframes.enable); 6356 6357 PIPE_CONF_CHECK_X(infoframes.gcp); 6358 PIPE_CONF_CHECK_INFOFRAME(avi); 6359 PIPE_CONF_CHECK_INFOFRAME(spd); 6360 PIPE_CONF_CHECK_INFOFRAME(hdmi); 6361 PIPE_CONF_CHECK_INFOFRAME(drm); 6362 PIPE_CONF_CHECK_DP_VSC_SDP(vsc); 6363 6364 PIPE_CONF_CHECK_X(sync_mode_slaves_mask); 6365 PIPE_CONF_CHECK_I(master_transcoder); 6366 PIPE_CONF_CHECK_BOOL(bigjoiner); 6367 PIPE_CONF_CHECK_X(bigjoiner_pipes); 6368 6369 PIPE_CONF_CHECK_I(dsc.compression_enable); 6370 PIPE_CONF_CHECK_I(dsc.dsc_split); 6371 PIPE_CONF_CHECK_I(dsc.compressed_bpp); 6372 6373 PIPE_CONF_CHECK_BOOL(splitter.enable); 6374 PIPE_CONF_CHECK_I(splitter.link_count); 6375 PIPE_CONF_CHECK_I(splitter.pixel_overlap); 6376 6377 PIPE_CONF_CHECK_I(mst_master_transcoder); 6378 6379 PIPE_CONF_CHECK_BOOL(vrr.enable); 6380 PIPE_CONF_CHECK_I(vrr.vmin); 6381 PIPE_CONF_CHECK_I(vrr.vmax); 6382 PIPE_CONF_CHECK_I(vrr.flipline); 6383 PIPE_CONF_CHECK_I(vrr.pipeline_full); 6384 PIPE_CONF_CHECK_I(vrr.guardband); 6385 6386 #undef PIPE_CONF_CHECK_X 6387 #undef PIPE_CONF_CHECK_I 6388 #undef PIPE_CONF_CHECK_BOOL 6389 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE 6390 #undef PIPE_CONF_CHECK_P 6391 #undef PIPE_CONF_CHECK_FLAGS 6392 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 6393 #undef PIPE_CONF_CHECK_COLOR_LUT 6394 #undef PIPE_CONF_QUIRK 6395 6396 return ret; 6397 } 6398 6399 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, 6400 const struct intel_crtc_state *pipe_config) 6401 { 6402 if (pipe_config->has_pch_encoder) { 6403 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 6404 &pipe_config->fdi_m_n); 6405 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock; 6406 6407 /* 6408 * FDI already provided one idea for the dotclock. 6409 * Yell if the encoder disagrees. 6410 */ 6411 drm_WARN(&dev_priv->drm, 6412 !intel_fuzzy_clock_check(fdi_dotclock, dotclock), 6413 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 6414 fdi_dotclock, dotclock); 6415 } 6416 } 6417 6418 static void verify_wm_state(struct intel_crtc *crtc, 6419 struct intel_crtc_state *new_crtc_state) 6420 { 6421 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6422 struct skl_hw_state { 6423 struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; 6424 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES]; 6425 struct skl_pipe_wm wm; 6426 } *hw; 6427 const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal; 6428 int level, max_level = ilk_wm_max_level(dev_priv); 6429 struct intel_plane *plane; 6430 u8 hw_enabled_slices; 6431 6432 if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active) 6433 return; 6434 6435 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 6436 if (!hw) 6437 return; 6438 6439 skl_pipe_wm_get_hw_state(crtc, &hw->wm); 6440 6441 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv); 6442 6443 hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv); 6444 6445 if (DISPLAY_VER(dev_priv) >= 11 && 6446 hw_enabled_slices != dev_priv->dbuf.enabled_slices) 6447 drm_err(&dev_priv->drm, 6448 "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n", 6449 dev_priv->dbuf.enabled_slices, 6450 hw_enabled_slices); 6451 6452 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 6453 const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; 6454 const struct skl_wm_level *hw_wm_level, *sw_wm_level; 6455 6456 /* Watermarks */ 6457 for (level = 0; level <= max_level; level++) { 6458 hw_wm_level = &hw->wm.planes[plane->id].wm[level]; 6459 sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level); 6460 6461 if (skl_wm_level_equals(hw_wm_level, sw_wm_level)) 6462 continue; 6463 6464 drm_err(&dev_priv->drm, 6465 "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 6466 plane->base.base.id, plane->base.name, level, 6467 sw_wm_level->enable, 6468 sw_wm_level->blocks, 6469 sw_wm_level->lines, 6470 hw_wm_level->enable, 6471 hw_wm_level->blocks, 6472 hw_wm_level->lines); 6473 } 6474 6475 hw_wm_level = &hw->wm.planes[plane->id].trans_wm; 6476 sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id); 6477 6478 if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) { 6479 drm_err(&dev_priv->drm, 6480 "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 6481 plane->base.base.id, plane->base.name, 6482 sw_wm_level->enable, 6483 sw_wm_level->blocks, 6484 sw_wm_level->lines, 6485 hw_wm_level->enable, 6486 hw_wm_level->blocks, 6487 hw_wm_level->lines); 6488 } 6489 6490 hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0; 6491 sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0; 6492 6493 if (HAS_HW_SAGV_WM(dev_priv) && 6494 !skl_wm_level_equals(hw_wm_level, sw_wm_level)) { 6495 drm_err(&dev_priv->drm, 6496 "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 6497 plane->base.base.id, plane->base.name, 6498 sw_wm_level->enable, 6499 sw_wm_level->blocks, 6500 sw_wm_level->lines, 6501 hw_wm_level->enable, 6502 hw_wm_level->blocks, 6503 hw_wm_level->lines); 6504 } 6505 6506 hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm; 6507 sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm; 6508 6509 if (HAS_HW_SAGV_WM(dev_priv) && 6510 !skl_wm_level_equals(hw_wm_level, sw_wm_level)) { 6511 drm_err(&dev_priv->drm, 6512 "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 6513 plane->base.base.id, plane->base.name, 6514 sw_wm_level->enable, 6515 sw_wm_level->blocks, 6516 sw_wm_level->lines, 6517 hw_wm_level->enable, 6518 hw_wm_level->blocks, 6519 hw_wm_level->lines); 6520 } 6521 6522 /* DDB */ 6523 hw_ddb_entry = &hw->ddb_y[plane->id]; 6524 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id]; 6525 6526 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 6527 drm_err(&dev_priv->drm, 6528 "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n", 6529 plane->base.base.id, plane->base.name, 6530 sw_ddb_entry->start, sw_ddb_entry->end, 6531 hw_ddb_entry->start, hw_ddb_entry->end); 6532 } 6533 } 6534 6535 kfree(hw); 6536 } 6537 6538 static void 6539 verify_connector_state(struct intel_atomic_state *state, 6540 struct intel_crtc *crtc) 6541 { 6542 struct drm_connector *connector; 6543 struct drm_connector_state *new_conn_state; 6544 int i; 6545 6546 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { 6547 struct drm_encoder *encoder = connector->encoder; 6548 struct intel_crtc_state *crtc_state = NULL; 6549 6550 if (new_conn_state->crtc != &crtc->base) 6551 continue; 6552 6553 if (crtc) 6554 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6555 6556 intel_connector_verify_state(crtc_state, new_conn_state); 6557 6558 I915_STATE_WARN(new_conn_state->best_encoder != encoder, 6559 "connector's atomic encoder doesn't match legacy encoder\n"); 6560 } 6561 } 6562 6563 static void 6564 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state) 6565 { 6566 struct intel_encoder *encoder; 6567 struct drm_connector *connector; 6568 struct drm_connector_state *old_conn_state, *new_conn_state; 6569 int i; 6570 6571 for_each_intel_encoder(&dev_priv->drm, encoder) { 6572 bool enabled = false, found = false; 6573 enum pipe pipe; 6574 6575 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n", 6576 encoder->base.base.id, 6577 encoder->base.name); 6578 6579 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state, 6580 new_conn_state, i) { 6581 if (old_conn_state->best_encoder == &encoder->base) 6582 found = true; 6583 6584 if (new_conn_state->best_encoder != &encoder->base) 6585 continue; 6586 found = enabled = true; 6587 6588 I915_STATE_WARN(new_conn_state->crtc != 6589 encoder->base.crtc, 6590 "connector's crtc doesn't match encoder crtc\n"); 6591 } 6592 6593 if (!found) 6594 continue; 6595 6596 I915_STATE_WARN(!!encoder->base.crtc != enabled, 6597 "encoder's enabled state mismatch " 6598 "(expected %i, found %i)\n", 6599 !!encoder->base.crtc, enabled); 6600 6601 if (!encoder->base.crtc) { 6602 bool active; 6603 6604 active = encoder->get_hw_state(encoder, &pipe); 6605 I915_STATE_WARN(active, 6606 "encoder detached but still enabled on pipe %c.\n", 6607 pipe_name(pipe)); 6608 } 6609 } 6610 } 6611 6612 static void 6613 verify_crtc_state(struct intel_crtc *crtc, 6614 struct intel_crtc_state *old_crtc_state, 6615 struct intel_crtc_state *new_crtc_state) 6616 { 6617 struct drm_device *dev = crtc->base.dev; 6618 struct drm_i915_private *dev_priv = to_i915(dev); 6619 struct intel_encoder *encoder; 6620 struct intel_crtc_state *pipe_config = old_crtc_state; 6621 struct drm_atomic_state *state = old_crtc_state->uapi.state; 6622 struct intel_crtc *master_crtc; 6623 6624 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi); 6625 intel_crtc_free_hw_state(old_crtc_state); 6626 intel_crtc_state_reset(old_crtc_state, crtc); 6627 old_crtc_state->uapi.state = state; 6628 6629 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id, 6630 crtc->base.name); 6631 6632 pipe_config->hw.enable = new_crtc_state->hw.enable; 6633 6634 intel_crtc_get_pipe_config(pipe_config); 6635 6636 /* we keep both pipes enabled on 830 */ 6637 if (IS_I830(dev_priv) && pipe_config->hw.active) 6638 pipe_config->hw.active = new_crtc_state->hw.active; 6639 6640 I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active, 6641 "crtc active state doesn't match with hw state " 6642 "(expected %i, found %i)\n", 6643 new_crtc_state->hw.active, pipe_config->hw.active); 6644 6645 I915_STATE_WARN(crtc->active != new_crtc_state->hw.active, 6646 "transitional active state does not match atomic hw state " 6647 "(expected %i, found %i)\n", 6648 new_crtc_state->hw.active, crtc->active); 6649 6650 master_crtc = intel_master_crtc(new_crtc_state); 6651 6652 for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) { 6653 enum pipe pipe; 6654 bool active; 6655 6656 active = encoder->get_hw_state(encoder, &pipe); 6657 I915_STATE_WARN(active != new_crtc_state->hw.active, 6658 "[ENCODER:%i] active %i with crtc active %i\n", 6659 encoder->base.base.id, active, 6660 new_crtc_state->hw.active); 6661 6662 I915_STATE_WARN(active && master_crtc->pipe != pipe, 6663 "Encoder connected to wrong pipe %c\n", 6664 pipe_name(pipe)); 6665 6666 if (active) 6667 intel_encoder_get_config(encoder, pipe_config); 6668 } 6669 6670 if (!new_crtc_state->hw.active) 6671 return; 6672 6673 intel_pipe_config_sanity_check(dev_priv, pipe_config); 6674 6675 if (!intel_pipe_config_compare(new_crtc_state, 6676 pipe_config, false)) { 6677 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 6678 intel_dump_pipe_config(pipe_config, NULL, "[hw state]"); 6679 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]"); 6680 } 6681 } 6682 6683 static void 6684 intel_verify_planes(struct intel_atomic_state *state) 6685 { 6686 struct intel_plane *plane; 6687 const struct intel_plane_state *plane_state; 6688 int i; 6689 6690 for_each_new_intel_plane_in_state(state, plane, 6691 plane_state, i) 6692 assert_plane(plane, plane_state->planar_slave || 6693 plane_state->uapi.visible); 6694 } 6695 6696 static void 6697 verify_single_dpll_state(struct drm_i915_private *dev_priv, 6698 struct intel_shared_dpll *pll, 6699 struct intel_crtc *crtc, 6700 struct intel_crtc_state *new_crtc_state) 6701 { 6702 struct intel_dpll_hw_state dpll_hw_state; 6703 u8 pipe_mask; 6704 bool active; 6705 6706 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 6707 6708 drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name); 6709 6710 active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state); 6711 6712 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) { 6713 I915_STATE_WARN(!pll->on && pll->active_mask, 6714 "pll in active use but not on in sw tracking\n"); 6715 I915_STATE_WARN(pll->on && !pll->active_mask, 6716 "pll is on but not used by any active pipe\n"); 6717 I915_STATE_WARN(pll->on != active, 6718 "pll on state mismatch (expected %i, found %i)\n", 6719 pll->on, active); 6720 } 6721 6722 if (!crtc) { 6723 I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask, 6724 "more active pll users than references: 0x%x vs 0x%x\n", 6725 pll->active_mask, pll->state.pipe_mask); 6726 6727 return; 6728 } 6729 6730 pipe_mask = BIT(crtc->pipe); 6731 6732 if (new_crtc_state->hw.active) 6733 I915_STATE_WARN(!(pll->active_mask & pipe_mask), 6734 "pll active mismatch (expected pipe %c in active mask 0x%x)\n", 6735 pipe_name(crtc->pipe), pll->active_mask); 6736 else 6737 I915_STATE_WARN(pll->active_mask & pipe_mask, 6738 "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n", 6739 pipe_name(crtc->pipe), pll->active_mask); 6740 6741 I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask), 6742 "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n", 6743 pipe_mask, pll->state.pipe_mask); 6744 6745 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state, 6746 &dpll_hw_state, 6747 sizeof(dpll_hw_state)), 6748 "pll hw state mismatch\n"); 6749 } 6750 6751 static void 6752 verify_shared_dpll_state(struct intel_crtc *crtc, 6753 struct intel_crtc_state *old_crtc_state, 6754 struct intel_crtc_state *new_crtc_state) 6755 { 6756 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6757 6758 if (new_crtc_state->shared_dpll) 6759 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state); 6760 6761 if (old_crtc_state->shared_dpll && 6762 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) { 6763 u8 pipe_mask = BIT(crtc->pipe); 6764 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll; 6765 6766 I915_STATE_WARN(pll->active_mask & pipe_mask, 6767 "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n", 6768 pipe_name(crtc->pipe), pll->active_mask); 6769 I915_STATE_WARN(pll->state.pipe_mask & pipe_mask, 6770 "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n", 6771 pipe_name(crtc->pipe), pll->state.pipe_mask); 6772 } 6773 } 6774 6775 static void 6776 verify_mpllb_state(struct intel_atomic_state *state, 6777 struct intel_crtc_state *new_crtc_state) 6778 { 6779 struct drm_i915_private *i915 = to_i915(state->base.dev); 6780 struct intel_mpllb_state mpllb_hw_state = { 0 }; 6781 struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state; 6782 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 6783 struct intel_encoder *encoder; 6784 6785 if (!IS_DG2(i915)) 6786 return; 6787 6788 if (!new_crtc_state->hw.active) 6789 return; 6790 6791 encoder = intel_get_crtc_new_encoder(state, new_crtc_state); 6792 intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state); 6793 6794 #define MPLLB_CHECK(name) do { \ 6795 if (mpllb_sw_state->name != mpllb_hw_state.name) { \ 6796 pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \ 6797 "(expected 0x%08x, found 0x%08x)", \ 6798 mpllb_sw_state->name, \ 6799 mpllb_hw_state.name); \ 6800 } \ 6801 } while (0) 6802 6803 MPLLB_CHECK(mpllb_cp); 6804 MPLLB_CHECK(mpllb_div); 6805 MPLLB_CHECK(mpllb_div2); 6806 MPLLB_CHECK(mpllb_fracn1); 6807 MPLLB_CHECK(mpllb_fracn2); 6808 MPLLB_CHECK(mpllb_sscen); 6809 MPLLB_CHECK(mpllb_sscstep); 6810 6811 /* 6812 * ref_control is handled by the hardware/firemware and never 6813 * programmed by the software, but the proper values are supplied 6814 * in the bspec for verification purposes. 6815 */ 6816 MPLLB_CHECK(ref_control); 6817 6818 #undef MPLLB_CHECK 6819 } 6820 6821 static void 6822 intel_modeset_verify_crtc(struct intel_crtc *crtc, 6823 struct intel_atomic_state *state, 6824 struct intel_crtc_state *old_crtc_state, 6825 struct intel_crtc_state *new_crtc_state) 6826 { 6827 if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) 6828 return; 6829 6830 verify_wm_state(crtc, new_crtc_state); 6831 verify_connector_state(state, crtc); 6832 verify_crtc_state(crtc, old_crtc_state, new_crtc_state); 6833 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state); 6834 verify_mpllb_state(state, new_crtc_state); 6835 } 6836 6837 static void 6838 verify_disabled_dpll_state(struct drm_i915_private *dev_priv) 6839 { 6840 int i; 6841 6842 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) 6843 verify_single_dpll_state(dev_priv, 6844 &dev_priv->dpll.shared_dplls[i], 6845 NULL, NULL); 6846 } 6847 6848 static void 6849 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv, 6850 struct intel_atomic_state *state) 6851 { 6852 verify_encoder_state(dev_priv, state); 6853 verify_connector_state(state, NULL); 6854 verify_disabled_dpll_state(dev_priv); 6855 } 6856 6857 int intel_modeset_all_pipes(struct intel_atomic_state *state) 6858 { 6859 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6860 struct intel_crtc *crtc; 6861 6862 /* 6863 * Add all pipes to the state, and force 6864 * a modeset on all the active ones. 6865 */ 6866 for_each_intel_crtc(&dev_priv->drm, crtc) { 6867 struct intel_crtc_state *crtc_state; 6868 int ret; 6869 6870 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6871 if (IS_ERR(crtc_state)) 6872 return PTR_ERR(crtc_state); 6873 6874 if (!crtc_state->hw.active || 6875 drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) 6876 continue; 6877 6878 crtc_state->uapi.mode_changed = true; 6879 6880 ret = drm_atomic_add_affected_connectors(&state->base, 6881 &crtc->base); 6882 if (ret) 6883 return ret; 6884 6885 ret = intel_atomic_add_affected_planes(state, crtc); 6886 if (ret) 6887 return ret; 6888 6889 crtc_state->update_planes |= crtc_state->active_planes; 6890 } 6891 6892 return 0; 6893 } 6894 6895 static void 6896 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) 6897 { 6898 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6899 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6900 struct drm_display_mode adjusted_mode = 6901 crtc_state->hw.adjusted_mode; 6902 6903 if (crtc_state->vrr.enable) { 6904 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax; 6905 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax; 6906 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state); 6907 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state); 6908 } 6909 6910 drm_calc_timestamping_constants(&crtc->base, &adjusted_mode); 6911 6912 crtc->mode_flags = crtc_state->mode_flags; 6913 6914 /* 6915 * The scanline counter increments at the leading edge of hsync. 6916 * 6917 * On most platforms it starts counting from vtotal-1 on the 6918 * first active line. That means the scanline counter value is 6919 * always one less than what we would expect. Ie. just after 6920 * start of vblank, which also occurs at start of hsync (on the 6921 * last active line), the scanline counter will read vblank_start-1. 6922 * 6923 * On gen2 the scanline counter starts counting from 1 instead 6924 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 6925 * to keep the value positive), instead of adding one. 6926 * 6927 * On HSW+ the behaviour of the scanline counter depends on the output 6928 * type. For DP ports it behaves like most other platforms, but on HDMI 6929 * there's an extra 1 line difference. So we need to add two instead of 6930 * one to the value. 6931 * 6932 * On VLV/CHV DSI the scanline counter would appear to increment 6933 * approx. 1/3 of a scanline before start of vblank. Unfortunately 6934 * that means we can't tell whether we're in vblank or not while 6935 * we're on that particular line. We must still set scanline_offset 6936 * to 1 so that the vblank timestamps come out correct when we query 6937 * the scanline counter from within the vblank interrupt handler. 6938 * However if queried just before the start of vblank we'll get an 6939 * answer that's slightly in the future. 6940 */ 6941 if (DISPLAY_VER(dev_priv) == 2) { 6942 int vtotal; 6943 6944 vtotal = adjusted_mode.crtc_vtotal; 6945 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 6946 vtotal /= 2; 6947 6948 crtc->scanline_offset = vtotal - 1; 6949 } else if (HAS_DDI(dev_priv) && 6950 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 6951 crtc->scanline_offset = 2; 6952 } else { 6953 crtc->scanline_offset = 1; 6954 } 6955 } 6956 6957 static void intel_modeset_clear_plls(struct intel_atomic_state *state) 6958 { 6959 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6960 struct intel_crtc_state *new_crtc_state; 6961 struct intel_crtc *crtc; 6962 int i; 6963 6964 if (!dev_priv->dpll_funcs) 6965 return; 6966 6967 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6968 if (!intel_crtc_needs_modeset(new_crtc_state)) 6969 continue; 6970 6971 intel_release_shared_dplls(state, crtc); 6972 } 6973 } 6974 6975 /* 6976 * This implements the workaround described in the "notes" section of the mode 6977 * set sequence documentation. When going from no pipes or single pipe to 6978 * multiple pipes, and planes are enabled after the pipe, we need to wait at 6979 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 6980 */ 6981 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) 6982 { 6983 struct intel_crtc_state *crtc_state; 6984 struct intel_crtc *crtc; 6985 struct intel_crtc_state *first_crtc_state = NULL; 6986 struct intel_crtc_state *other_crtc_state = NULL; 6987 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 6988 int i; 6989 6990 /* look at all crtc's that are going to be enabled in during modeset */ 6991 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6992 if (!crtc_state->hw.active || 6993 !intel_crtc_needs_modeset(crtc_state)) 6994 continue; 6995 6996 if (first_crtc_state) { 6997 other_crtc_state = crtc_state; 6998 break; 6999 } else { 7000 first_crtc_state = crtc_state; 7001 first_pipe = crtc->pipe; 7002 } 7003 } 7004 7005 /* No workaround needed? */ 7006 if (!first_crtc_state) 7007 return 0; 7008 7009 /* w/a possibly needed, check how many crtc's are already enabled. */ 7010 for_each_intel_crtc(state->base.dev, crtc) { 7011 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 7012 if (IS_ERR(crtc_state)) 7013 return PTR_ERR(crtc_state); 7014 7015 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 7016 7017 if (!crtc_state->hw.active || 7018 intel_crtc_needs_modeset(crtc_state)) 7019 continue; 7020 7021 /* 2 or more enabled crtcs means no need for w/a */ 7022 if (enabled_pipe != INVALID_PIPE) 7023 return 0; 7024 7025 enabled_pipe = crtc->pipe; 7026 } 7027 7028 if (enabled_pipe != INVALID_PIPE) 7029 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 7030 else if (other_crtc_state) 7031 other_crtc_state->hsw_workaround_pipe = first_pipe; 7032 7033 return 0; 7034 } 7035 7036 u8 intel_calc_active_pipes(struct intel_atomic_state *state, 7037 u8 active_pipes) 7038 { 7039 const struct intel_crtc_state *crtc_state; 7040 struct intel_crtc *crtc; 7041 int i; 7042 7043 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 7044 if (crtc_state->hw.active) 7045 active_pipes |= BIT(crtc->pipe); 7046 else 7047 active_pipes &= ~BIT(crtc->pipe); 7048 } 7049 7050 return active_pipes; 7051 } 7052 7053 static int intel_modeset_checks(struct intel_atomic_state *state) 7054 { 7055 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7056 7057 state->modeset = true; 7058 7059 if (IS_HASWELL(dev_priv)) 7060 return hsw_mode_set_planes_workaround(state); 7061 7062 return 0; 7063 } 7064 7065 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 7066 struct intel_crtc_state *new_crtc_state) 7067 { 7068 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) 7069 return; 7070 7071 new_crtc_state->uapi.mode_changed = false; 7072 new_crtc_state->update_pipe = true; 7073 } 7074 7075 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state, 7076 struct intel_crtc_state *new_crtc_state) 7077 { 7078 /* 7079 * If we're not doing the full modeset we want to 7080 * keep the current M/N values as they may be 7081 * sufficiently different to the computed values 7082 * to cause problems. 7083 * 7084 * FIXME: should really copy more fuzzy state here 7085 */ 7086 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n; 7087 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n; 7088 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2; 7089 new_crtc_state->has_drrs = old_crtc_state->has_drrs; 7090 } 7091 7092 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, 7093 struct intel_crtc *crtc, 7094 u8 plane_ids_mask) 7095 { 7096 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7097 struct intel_plane *plane; 7098 7099 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 7100 struct intel_plane_state *plane_state; 7101 7102 if ((plane_ids_mask & BIT(plane->id)) == 0) 7103 continue; 7104 7105 plane_state = intel_atomic_get_plane_state(state, plane); 7106 if (IS_ERR(plane_state)) 7107 return PTR_ERR(plane_state); 7108 } 7109 7110 return 0; 7111 } 7112 7113 int intel_atomic_add_affected_planes(struct intel_atomic_state *state, 7114 struct intel_crtc *crtc) 7115 { 7116 const struct intel_crtc_state *old_crtc_state = 7117 intel_atomic_get_old_crtc_state(state, crtc); 7118 const struct intel_crtc_state *new_crtc_state = 7119 intel_atomic_get_new_crtc_state(state, crtc); 7120 7121 return intel_crtc_add_planes_to_state(state, crtc, 7122 old_crtc_state->enabled_planes | 7123 new_crtc_state->enabled_planes); 7124 } 7125 7126 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) 7127 { 7128 /* See {hsw,vlv,ivb}_plane_ratio() */ 7129 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) || 7130 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) || 7131 IS_IVYBRIDGE(dev_priv); 7132 } 7133 7134 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state, 7135 struct intel_crtc *crtc, 7136 struct intel_crtc *other) 7137 { 7138 const struct intel_plane_state *plane_state; 7139 struct intel_plane *plane; 7140 u8 plane_ids = 0; 7141 int i; 7142 7143 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 7144 if (plane->pipe == crtc->pipe) 7145 plane_ids |= BIT(plane->id); 7146 } 7147 7148 return intel_crtc_add_planes_to_state(state, other, plane_ids); 7149 } 7150 7151 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state) 7152 { 7153 struct drm_i915_private *i915 = to_i915(state->base.dev); 7154 const struct intel_crtc_state *crtc_state; 7155 struct intel_crtc *crtc; 7156 int i; 7157 7158 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 7159 struct intel_crtc *other; 7160 7161 for_each_intel_crtc_in_pipe_mask(&i915->drm, other, 7162 crtc_state->bigjoiner_pipes) { 7163 int ret; 7164 7165 if (crtc == other) 7166 continue; 7167 7168 ret = intel_crtc_add_bigjoiner_planes(state, crtc, other); 7169 if (ret) 7170 return ret; 7171 } 7172 } 7173 7174 return 0; 7175 } 7176 7177 static int intel_atomic_check_planes(struct intel_atomic_state *state) 7178 { 7179 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7180 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 7181 struct intel_plane_state *plane_state; 7182 struct intel_plane *plane; 7183 struct intel_crtc *crtc; 7184 int i, ret; 7185 7186 ret = icl_add_linked_planes(state); 7187 if (ret) 7188 return ret; 7189 7190 ret = intel_bigjoiner_add_affected_planes(state); 7191 if (ret) 7192 return ret; 7193 7194 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 7195 ret = intel_plane_atomic_check(state, plane); 7196 if (ret) { 7197 drm_dbg_atomic(&dev_priv->drm, 7198 "[PLANE:%d:%s] atomic driver check failed\n", 7199 plane->base.base.id, plane->base.name); 7200 return ret; 7201 } 7202 } 7203 7204 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7205 new_crtc_state, i) { 7206 u8 old_active_planes, new_active_planes; 7207 7208 ret = icl_check_nv12_planes(new_crtc_state); 7209 if (ret) 7210 return ret; 7211 7212 /* 7213 * On some platforms the number of active planes affects 7214 * the planes' minimum cdclk calculation. Add such planes 7215 * to the state before we compute the minimum cdclk. 7216 */ 7217 if (!active_planes_affects_min_cdclk(dev_priv)) 7218 continue; 7219 7220 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 7221 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 7222 7223 if (hweight8(old_active_planes) == hweight8(new_active_planes)) 7224 continue; 7225 7226 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes); 7227 if (ret) 7228 return ret; 7229 } 7230 7231 return 0; 7232 } 7233 7234 static int intel_atomic_check_crtcs(struct intel_atomic_state *state) 7235 { 7236 struct intel_crtc_state *crtc_state; 7237 struct intel_crtc *crtc; 7238 int i; 7239 7240 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 7241 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 7242 int ret; 7243 7244 ret = intel_crtc_atomic_check(state, crtc); 7245 if (ret) { 7246 drm_dbg_atomic(&i915->drm, 7247 "[CRTC:%d:%s] atomic driver check failed\n", 7248 crtc->base.base.id, crtc->base.name); 7249 return ret; 7250 } 7251 } 7252 7253 return 0; 7254 } 7255 7256 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, 7257 u8 transcoders) 7258 { 7259 const struct intel_crtc_state *new_crtc_state; 7260 struct intel_crtc *crtc; 7261 int i; 7262 7263 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7264 if (new_crtc_state->hw.enable && 7265 transcoders & BIT(new_crtc_state->cpu_transcoder) && 7266 intel_crtc_needs_modeset(new_crtc_state)) 7267 return true; 7268 } 7269 7270 return false; 7271 } 7272 7273 static bool intel_pipes_need_modeset(struct intel_atomic_state *state, 7274 u8 pipes) 7275 { 7276 const struct intel_crtc_state *new_crtc_state; 7277 struct intel_crtc *crtc; 7278 int i; 7279 7280 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7281 if (new_crtc_state->hw.enable && 7282 pipes & BIT(crtc->pipe) && 7283 intel_crtc_needs_modeset(new_crtc_state)) 7284 return true; 7285 } 7286 7287 return false; 7288 } 7289 7290 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, 7291 struct intel_crtc *master_crtc) 7292 { 7293 struct drm_i915_private *i915 = to_i915(state->base.dev); 7294 struct intel_crtc_state *master_crtc_state = 7295 intel_atomic_get_new_crtc_state(state, master_crtc); 7296 struct intel_crtc *slave_crtc; 7297 u8 slave_pipes; 7298 7299 /* 7300 * TODO: encoder.compute_config() may be the best 7301 * place to populate the bitmask for the master crtc. 7302 * For now encoder.compute_config() just flags things 7303 * as needing bigjoiner and we populate the bitmask 7304 * here. 7305 */ 7306 WARN_ON(master_crtc_state->bigjoiner_pipes); 7307 7308 if (!master_crtc_state->bigjoiner) 7309 return 0; 7310 7311 slave_pipes = BIT(master_crtc->pipe + 1); 7312 7313 if (slave_pipes & ~bigjoiner_pipes(i915)) { 7314 drm_dbg_kms(&i915->drm, 7315 "[CRTC:%d:%s] Cannot act as big joiner master " 7316 "(need 0x%x as slave pipes, only 0x%x possible)\n", 7317 master_crtc->base.base.id, master_crtc->base.name, 7318 slave_pipes, bigjoiner_pipes(i915)); 7319 return -EINVAL; 7320 } 7321 7322 for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, slave_pipes) { 7323 struct intel_crtc_state *slave_crtc_state; 7324 int ret; 7325 7326 slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc); 7327 if (IS_ERR(slave_crtc_state)) 7328 return PTR_ERR(slave_crtc_state); 7329 7330 /* master being enabled, slave was already configured? */ 7331 if (slave_crtc_state->uapi.enable) { 7332 drm_dbg_kms(&i915->drm, 7333 "[CRTC:%d:%s] Slave is enabled as normal CRTC, but " 7334 "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n", 7335 slave_crtc->base.base.id, slave_crtc->base.name, 7336 master_crtc->base.base.id, master_crtc->base.name); 7337 return -EINVAL; 7338 } 7339 7340 /* 7341 * The state copy logic assumes the master crtc gets processed 7342 * before the slave crtc during the main compute_config loop. 7343 * This works because the crtcs are created in pipe order, 7344 * and the hardware requires master pipe < slave pipe as well. 7345 * Should that change we need to rethink the logic. 7346 */ 7347 if (WARN_ON(drm_crtc_index(&master_crtc->base) > 7348 drm_crtc_index(&slave_crtc->base))) 7349 return -EINVAL; 7350 7351 drm_dbg_kms(&i915->drm, 7352 "[CRTC:%d:%s] Used as slave for big joiner master [CRTC:%d:%s]\n", 7353 slave_crtc->base.base.id, slave_crtc->base.name, 7354 master_crtc->base.base.id, master_crtc->base.name); 7355 7356 master_crtc_state->bigjoiner_pipes = 7357 BIT(master_crtc->pipe) | BIT(slave_crtc->pipe); 7358 slave_crtc_state->bigjoiner_pipes = 7359 BIT(master_crtc->pipe) | BIT(slave_crtc->pipe); 7360 7361 ret = copy_bigjoiner_crtc_state_modeset(state, slave_crtc); 7362 if (ret) 7363 return ret; 7364 } 7365 7366 return 0; 7367 } 7368 7369 static void kill_bigjoiner_slave(struct intel_atomic_state *state, 7370 struct intel_crtc *master_crtc) 7371 { 7372 struct drm_i915_private *i915 = to_i915(state->base.dev); 7373 struct intel_crtc_state *master_crtc_state = 7374 intel_atomic_get_new_crtc_state(state, master_crtc); 7375 struct intel_crtc *slave_crtc; 7376 7377 for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, 7378 intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) { 7379 struct intel_crtc_state *slave_crtc_state = 7380 intel_atomic_get_new_crtc_state(state, slave_crtc); 7381 7382 slave_crtc_state->bigjoiner = false; 7383 slave_crtc_state->bigjoiner_pipes = 0; 7384 7385 intel_crtc_copy_uapi_to_hw_state_modeset(state, slave_crtc); 7386 } 7387 7388 master_crtc_state->bigjoiner = false; 7389 master_crtc_state->bigjoiner_pipes = 0; 7390 } 7391 7392 /** 7393 * DOC: asynchronous flip implementation 7394 * 7395 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC 7396 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL. 7397 * Correspondingly, support is currently added for primary plane only. 7398 * 7399 * Async flip can only change the plane surface address, so anything else 7400 * changing is rejected from the intel_async_flip_check_hw() function. 7401 * Once this check is cleared, flip done interrupt is enabled using 7402 * the intel_crtc_enable_flip_done() function. 7403 * 7404 * As soon as the surface address register is written, flip done interrupt is 7405 * generated and the requested events are sent to the usersapce in the interrupt 7406 * handler itself. The timestamp and sequence sent during the flip done event 7407 * correspond to the last vblank and have no relation to the actual time when 7408 * the flip done event was sent. 7409 */ 7410 static int intel_async_flip_check_uapi(struct intel_atomic_state *state, 7411 struct intel_crtc *crtc) 7412 { 7413 struct drm_i915_private *i915 = to_i915(state->base.dev); 7414 const struct intel_crtc_state *new_crtc_state = 7415 intel_atomic_get_new_crtc_state(state, crtc); 7416 const struct intel_plane_state *old_plane_state; 7417 struct intel_plane_state *new_plane_state; 7418 struct intel_plane *plane; 7419 int i; 7420 7421 if (!new_crtc_state->uapi.async_flip) 7422 return 0; 7423 7424 if (!new_crtc_state->uapi.active) { 7425 drm_dbg_kms(&i915->drm, 7426 "[CRTC:%d:%s] not active\n", 7427 crtc->base.base.id, crtc->base.name); 7428 return -EINVAL; 7429 } 7430 7431 if (intel_crtc_needs_modeset(new_crtc_state)) { 7432 drm_dbg_kms(&i915->drm, 7433 "[CRTC:%d:%s] modeset required\n", 7434 crtc->base.base.id, crtc->base.name); 7435 return -EINVAL; 7436 } 7437 7438 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 7439 new_plane_state, i) { 7440 if (plane->pipe != crtc->pipe) 7441 continue; 7442 7443 /* 7444 * TODO: Async flip is only supported through the page flip IOCTL 7445 * as of now. So support currently added for primary plane only. 7446 * Support for other planes on platforms on which supports 7447 * this(vlv/chv and icl+) should be added when async flip is 7448 * enabled in the atomic IOCTL path. 7449 */ 7450 if (!plane->async_flip) { 7451 drm_dbg_kms(&i915->drm, 7452 "[PLANE:%d:%s] async flip not supported\n", 7453 plane->base.base.id, plane->base.name); 7454 return -EINVAL; 7455 } 7456 7457 if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) { 7458 drm_dbg_kms(&i915->drm, 7459 "[PLANE:%d:%s] no old or new framebuffer\n", 7460 plane->base.base.id, plane->base.name); 7461 return -EINVAL; 7462 } 7463 } 7464 7465 return 0; 7466 } 7467 7468 static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc) 7469 { 7470 struct drm_i915_private *i915 = to_i915(state->base.dev); 7471 const struct intel_crtc_state *old_crtc_state, *new_crtc_state; 7472 const struct intel_plane_state *new_plane_state, *old_plane_state; 7473 struct intel_plane *plane; 7474 int i; 7475 7476 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 7477 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 7478 7479 if (!new_crtc_state->uapi.async_flip) 7480 return 0; 7481 7482 if (intel_crtc_needs_modeset(new_crtc_state)) { 7483 drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n"); 7484 return -EINVAL; 7485 } 7486 7487 if (!new_crtc_state->hw.active) { 7488 drm_dbg_kms(&i915->drm, "CRTC inactive\n"); 7489 return -EINVAL; 7490 } 7491 if (old_crtc_state->active_planes != new_crtc_state->active_planes) { 7492 drm_dbg_kms(&i915->drm, 7493 "Active planes cannot be changed during async flip\n"); 7494 return -EINVAL; 7495 } 7496 7497 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 7498 new_plane_state, i) { 7499 if (plane->pipe != crtc->pipe) 7500 continue; 7501 7502 /* 7503 * Only async flip capable planes should be in the state 7504 * if we're really about to ask the hardware to perform 7505 * an async flip. We should never get this far otherwise. 7506 */ 7507 if (drm_WARN_ON(&i915->drm, 7508 new_crtc_state->do_async_flip && !plane->async_flip)) 7509 return -EINVAL; 7510 7511 /* 7512 * Only check async flip capable planes other planes 7513 * may be involved in the initial commit due to 7514 * the wm0/ddb optimization. 7515 * 7516 * TODO maybe should track which planes actually 7517 * were requested to do the async flip... 7518 */ 7519 if (!plane->async_flip) 7520 continue; 7521 7522 /* 7523 * FIXME: This check is kept generic for all platforms. 7524 * Need to verify this for all gen9 platforms to enable 7525 * this selectively if required. 7526 */ 7527 switch (new_plane_state->hw.fb->modifier) { 7528 case I915_FORMAT_MOD_X_TILED: 7529 case I915_FORMAT_MOD_Y_TILED: 7530 case I915_FORMAT_MOD_Yf_TILED: 7531 break; 7532 default: 7533 drm_dbg_kms(&i915->drm, 7534 "Linear memory/CCS does not support async flips\n"); 7535 return -EINVAL; 7536 } 7537 7538 if (new_plane_state->hw.fb->format->num_planes > 1) { 7539 drm_dbg_kms(&i915->drm, 7540 "Planar formats not supported with async flips\n"); 7541 return -EINVAL; 7542 } 7543 7544 if (old_plane_state->view.color_plane[0].mapping_stride != 7545 new_plane_state->view.color_plane[0].mapping_stride) { 7546 drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n"); 7547 return -EINVAL; 7548 } 7549 7550 if (old_plane_state->hw.fb->modifier != 7551 new_plane_state->hw.fb->modifier) { 7552 drm_dbg_kms(&i915->drm, 7553 "Framebuffer modifiers cannot be changed in async flip\n"); 7554 return -EINVAL; 7555 } 7556 7557 if (old_plane_state->hw.fb->format != 7558 new_plane_state->hw.fb->format) { 7559 drm_dbg_kms(&i915->drm, 7560 "Framebuffer format cannot be changed in async flip\n"); 7561 return -EINVAL; 7562 } 7563 7564 if (old_plane_state->hw.rotation != 7565 new_plane_state->hw.rotation) { 7566 drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n"); 7567 return -EINVAL; 7568 } 7569 7570 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) || 7571 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) { 7572 drm_dbg_kms(&i915->drm, 7573 "Plane size/co-ordinates cannot be changed in async flip\n"); 7574 return -EINVAL; 7575 } 7576 7577 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) { 7578 drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n"); 7579 return -EINVAL; 7580 } 7581 7582 if (old_plane_state->hw.pixel_blend_mode != 7583 new_plane_state->hw.pixel_blend_mode) { 7584 drm_dbg_kms(&i915->drm, 7585 "Pixel blend mode cannot be changed in async flip\n"); 7586 return -EINVAL; 7587 } 7588 7589 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) { 7590 drm_dbg_kms(&i915->drm, 7591 "Color encoding cannot be changed in async flip\n"); 7592 return -EINVAL; 7593 } 7594 7595 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) { 7596 drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n"); 7597 return -EINVAL; 7598 } 7599 7600 /* plane decryption is allow to change only in synchronous flips */ 7601 if (old_plane_state->decrypt != new_plane_state->decrypt) 7602 return -EINVAL; 7603 } 7604 7605 return 0; 7606 } 7607 7608 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state) 7609 { 7610 struct drm_i915_private *i915 = to_i915(state->base.dev); 7611 struct intel_crtc_state *crtc_state; 7612 struct intel_crtc *crtc; 7613 u8 affected_pipes = 0; 7614 u8 modeset_pipes = 0; 7615 int i; 7616 7617 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 7618 affected_pipes |= crtc_state->bigjoiner_pipes; 7619 if (intel_crtc_needs_modeset(crtc_state)) 7620 modeset_pipes |= crtc_state->bigjoiner_pipes; 7621 } 7622 7623 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) { 7624 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 7625 if (IS_ERR(crtc_state)) 7626 return PTR_ERR(crtc_state); 7627 } 7628 7629 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) { 7630 int ret; 7631 7632 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 7633 7634 crtc_state->uapi.mode_changed = true; 7635 7636 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 7637 if (ret) 7638 return ret; 7639 7640 ret = intel_atomic_add_affected_planes(state, crtc); 7641 if (ret) 7642 return ret; 7643 } 7644 7645 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 7646 /* Kill old bigjoiner link, we may re-establish afterwards */ 7647 if (intel_crtc_needs_modeset(crtc_state) && 7648 intel_crtc_is_bigjoiner_master(crtc_state)) 7649 kill_bigjoiner_slave(state, crtc); 7650 } 7651 7652 return 0; 7653 } 7654 7655 /** 7656 * intel_atomic_check - validate state object 7657 * @dev: drm device 7658 * @_state: state to validate 7659 */ 7660 static int intel_atomic_check(struct drm_device *dev, 7661 struct drm_atomic_state *_state) 7662 { 7663 struct drm_i915_private *dev_priv = to_i915(dev); 7664 struct intel_atomic_state *state = to_intel_atomic_state(_state); 7665 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 7666 struct intel_crtc *crtc; 7667 int ret, i; 7668 bool any_ms = false; 7669 7670 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7671 new_crtc_state, i) { 7672 if (new_crtc_state->inherited != old_crtc_state->inherited) 7673 new_crtc_state->uapi.mode_changed = true; 7674 7675 if (new_crtc_state->uapi.scaling_filter != 7676 old_crtc_state->uapi.scaling_filter) 7677 new_crtc_state->uapi.mode_changed = true; 7678 } 7679 7680 intel_vrr_check_modeset(state); 7681 7682 ret = drm_atomic_helper_check_modeset(dev, &state->base); 7683 if (ret) 7684 goto fail; 7685 7686 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7687 ret = intel_async_flip_check_uapi(state, crtc); 7688 if (ret) 7689 return ret; 7690 } 7691 7692 ret = intel_bigjoiner_add_affected_crtcs(state); 7693 if (ret) 7694 goto fail; 7695 7696 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7697 new_crtc_state, i) { 7698 if (!intel_crtc_needs_modeset(new_crtc_state)) { 7699 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) 7700 copy_bigjoiner_crtc_state_nomodeset(state, crtc); 7701 else 7702 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 7703 continue; 7704 } 7705 7706 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) { 7707 drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable); 7708 continue; 7709 } 7710 7711 ret = intel_crtc_prepare_cleared_state(state, crtc); 7712 if (ret) 7713 goto fail; 7714 7715 if (!new_crtc_state->hw.enable) 7716 continue; 7717 7718 ret = intel_modeset_pipe_config(state, new_crtc_state); 7719 if (ret) 7720 goto fail; 7721 7722 ret = intel_atomic_check_bigjoiner(state, crtc); 7723 if (ret) 7724 goto fail; 7725 } 7726 7727 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7728 new_crtc_state, i) { 7729 if (!intel_crtc_needs_modeset(new_crtc_state)) 7730 continue; 7731 7732 ret = intel_modeset_pipe_config_late(new_crtc_state); 7733 if (ret) 7734 goto fail; 7735 7736 intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 7737 } 7738 7739 /** 7740 * Check if fastset is allowed by external dependencies like other 7741 * pipes and transcoders. 7742 * 7743 * Right now it only forces a fullmodeset when the MST master 7744 * transcoder did not changed but the pipe of the master transcoder 7745 * needs a fullmodeset so all slaves also needs to do a fullmodeset or 7746 * in case of port synced crtcs, if one of the synced crtcs 7747 * needs a full modeset, all other synced crtcs should be 7748 * forced a full modeset. 7749 */ 7750 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7751 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state)) 7752 continue; 7753 7754 if (intel_dp_mst_is_slave_trans(new_crtc_state)) { 7755 enum transcoder master = new_crtc_state->mst_master_transcoder; 7756 7757 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) { 7758 new_crtc_state->uapi.mode_changed = true; 7759 new_crtc_state->update_pipe = false; 7760 } 7761 } 7762 7763 if (is_trans_port_sync_mode(new_crtc_state)) { 7764 u8 trans = new_crtc_state->sync_mode_slaves_mask; 7765 7766 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER) 7767 trans |= BIT(new_crtc_state->master_transcoder); 7768 7769 if (intel_cpu_transcoders_need_modeset(state, trans)) { 7770 new_crtc_state->uapi.mode_changed = true; 7771 new_crtc_state->update_pipe = false; 7772 } 7773 } 7774 7775 if (new_crtc_state->bigjoiner) { 7776 if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) { 7777 new_crtc_state->uapi.mode_changed = true; 7778 new_crtc_state->update_pipe = false; 7779 } 7780 } 7781 } 7782 7783 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7784 new_crtc_state, i) { 7785 if (intel_crtc_needs_modeset(new_crtc_state)) { 7786 any_ms = true; 7787 continue; 7788 } 7789 7790 if (!new_crtc_state->update_pipe) 7791 continue; 7792 7793 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state); 7794 } 7795 7796 if (any_ms && !check_digital_port_conflicts(state)) { 7797 drm_dbg_kms(&dev_priv->drm, 7798 "rejecting conflicting digital port configuration\n"); 7799 ret = -EINVAL; 7800 goto fail; 7801 } 7802 7803 ret = drm_dp_mst_atomic_check(&state->base); 7804 if (ret) 7805 goto fail; 7806 7807 ret = intel_atomic_check_planes(state); 7808 if (ret) 7809 goto fail; 7810 7811 ret = intel_compute_global_watermarks(state); 7812 if (ret) 7813 goto fail; 7814 7815 ret = intel_bw_atomic_check(state); 7816 if (ret) 7817 goto fail; 7818 7819 ret = intel_cdclk_atomic_check(state, &any_ms); 7820 if (ret) 7821 goto fail; 7822 7823 if (intel_any_crtc_needs_modeset(state)) 7824 any_ms = true; 7825 7826 if (any_ms) { 7827 ret = intel_modeset_checks(state); 7828 if (ret) 7829 goto fail; 7830 7831 ret = intel_modeset_calc_cdclk(state); 7832 if (ret) 7833 return ret; 7834 7835 intel_modeset_clear_plls(state); 7836 } 7837 7838 ret = intel_atomic_check_crtcs(state); 7839 if (ret) 7840 goto fail; 7841 7842 ret = intel_fbc_atomic_check(state); 7843 if (ret) 7844 goto fail; 7845 7846 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7847 new_crtc_state, i) { 7848 ret = intel_async_flip_check_hw(state, crtc); 7849 if (ret) 7850 goto fail; 7851 7852 if (!intel_crtc_needs_modeset(new_crtc_state) && 7853 !new_crtc_state->update_pipe) 7854 continue; 7855 7856 intel_dump_pipe_config(new_crtc_state, state, 7857 intel_crtc_needs_modeset(new_crtc_state) ? 7858 "[modeset]" : "[fastset]"); 7859 } 7860 7861 return 0; 7862 7863 fail: 7864 if (ret == -EDEADLK) 7865 return ret; 7866 7867 /* 7868 * FIXME would probably be nice to know which crtc specifically 7869 * caused the failure, in cases where we can pinpoint it. 7870 */ 7871 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7872 new_crtc_state, i) 7873 intel_dump_pipe_config(new_crtc_state, state, "[failed]"); 7874 7875 return ret; 7876 } 7877 7878 static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 7879 { 7880 struct intel_crtc_state *crtc_state; 7881 struct intel_crtc *crtc; 7882 int i, ret; 7883 7884 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base); 7885 if (ret < 0) 7886 return ret; 7887 7888 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 7889 bool mode_changed = intel_crtc_needs_modeset(crtc_state); 7890 7891 if (mode_changed || crtc_state->update_pipe || 7892 crtc_state->uapi.color_mgmt_changed) { 7893 intel_dsb_prepare(crtc_state); 7894 } 7895 } 7896 7897 return 0; 7898 } 7899 7900 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 7901 struct intel_crtc_state *crtc_state) 7902 { 7903 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7904 7905 if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes) 7906 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 7907 7908 if (crtc_state->has_pch_encoder) { 7909 enum pipe pch_transcoder = 7910 intel_crtc_pch_transcoder(crtc); 7911 7912 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); 7913 } 7914 } 7915 7916 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, 7917 const struct intel_crtc_state *new_crtc_state) 7918 { 7919 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 7920 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7921 7922 /* 7923 * Update pipe size and adjust fitter if needed: the reason for this is 7924 * that in compute_mode_changes we check the native mode (not the pfit 7925 * mode) to see if we can flip rather than do a full mode set. In the 7926 * fastboot case, we'll flip, but if we don't update the pipesrc and 7927 * pfit state, we'll end up with a big fb scanned out into the wrong 7928 * sized surface. 7929 */ 7930 intel_set_pipe_src_size(new_crtc_state); 7931 7932 /* on skylake this is done by detaching scalers */ 7933 if (DISPLAY_VER(dev_priv) >= 9) { 7934 if (new_crtc_state->pch_pfit.enabled) 7935 skl_pfit_enable(new_crtc_state); 7936 } else if (HAS_PCH_SPLIT(dev_priv)) { 7937 if (new_crtc_state->pch_pfit.enabled) 7938 ilk_pfit_enable(new_crtc_state); 7939 else if (old_crtc_state->pch_pfit.enabled) 7940 ilk_pfit_disable(old_crtc_state); 7941 } 7942 7943 /* 7944 * The register is supposedly single buffered so perhaps 7945 * not 100% correct to do this here. But SKL+ calculate 7946 * this based on the adjust pixel rate so pfit changes do 7947 * affect it and so it must be updated for fastsets. 7948 * HSW/BDW only really need this here for fastboot, after 7949 * that the value should not change without a full modeset. 7950 */ 7951 if (DISPLAY_VER(dev_priv) >= 9 || 7952 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 7953 hsw_set_linetime_wm(new_crtc_state); 7954 } 7955 7956 static void commit_pipe_pre_planes(struct intel_atomic_state *state, 7957 struct intel_crtc *crtc) 7958 { 7959 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7960 const struct intel_crtc_state *old_crtc_state = 7961 intel_atomic_get_old_crtc_state(state, crtc); 7962 const struct intel_crtc_state *new_crtc_state = 7963 intel_atomic_get_new_crtc_state(state, crtc); 7964 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 7965 7966 /* 7967 * During modesets pipe configuration was programmed as the 7968 * CRTC was enabled. 7969 */ 7970 if (!modeset) { 7971 if (new_crtc_state->uapi.color_mgmt_changed || 7972 new_crtc_state->update_pipe) 7973 intel_color_commit(new_crtc_state); 7974 7975 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 7976 bdw_set_pipemisc(new_crtc_state); 7977 7978 if (new_crtc_state->update_pipe) 7979 intel_pipe_fastset(old_crtc_state, new_crtc_state); 7980 } 7981 7982 intel_psr2_program_trans_man_trk_ctl(new_crtc_state); 7983 7984 intel_atomic_update_watermarks(state, crtc); 7985 } 7986 7987 static void commit_pipe_post_planes(struct intel_atomic_state *state, 7988 struct intel_crtc *crtc) 7989 { 7990 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7991 const struct intel_crtc_state *new_crtc_state = 7992 intel_atomic_get_new_crtc_state(state, crtc); 7993 7994 /* 7995 * Disable the scaler(s) after the plane(s) so that we don't 7996 * get a catastrophic underrun even if the two operations 7997 * end up happening in two different frames. 7998 */ 7999 if (DISPLAY_VER(dev_priv) >= 9 && 8000 !intel_crtc_needs_modeset(new_crtc_state)) 8001 skl_detach_scalers(new_crtc_state); 8002 } 8003 8004 static void intel_enable_crtc(struct intel_atomic_state *state, 8005 struct intel_crtc *crtc) 8006 { 8007 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 8008 const struct intel_crtc_state *new_crtc_state = 8009 intel_atomic_get_new_crtc_state(state, crtc); 8010 8011 if (!intel_crtc_needs_modeset(new_crtc_state)) 8012 return; 8013 8014 intel_crtc_update_active_timings(new_crtc_state); 8015 8016 dev_priv->display->crtc_enable(state, crtc); 8017 8018 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) 8019 return; 8020 8021 /* vblanks work again, re-enable pipe CRC. */ 8022 intel_crtc_enable_pipe_crc(crtc); 8023 } 8024 8025 static void intel_update_crtc(struct intel_atomic_state *state, 8026 struct intel_crtc *crtc) 8027 { 8028 struct drm_i915_private *i915 = to_i915(state->base.dev); 8029 const struct intel_crtc_state *old_crtc_state = 8030 intel_atomic_get_old_crtc_state(state, crtc); 8031 struct intel_crtc_state *new_crtc_state = 8032 intel_atomic_get_new_crtc_state(state, crtc); 8033 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 8034 8035 if (!modeset) { 8036 if (new_crtc_state->preload_luts && 8037 (new_crtc_state->uapi.color_mgmt_changed || 8038 new_crtc_state->update_pipe)) 8039 intel_color_load_luts(new_crtc_state); 8040 8041 intel_pre_plane_update(state, crtc); 8042 8043 if (new_crtc_state->update_pipe) 8044 intel_encoders_update_pipe(state, crtc); 8045 8046 if (DISPLAY_VER(i915) >= 11 && 8047 new_crtc_state->update_pipe) 8048 icl_set_pipe_chicken(new_crtc_state); 8049 } 8050 8051 intel_fbc_update(state, crtc); 8052 8053 intel_crtc_planes_update_noarm(state, crtc); 8054 8055 /* Perform vblank evasion around commit operation */ 8056 intel_pipe_update_start(new_crtc_state); 8057 8058 commit_pipe_pre_planes(state, crtc); 8059 8060 intel_crtc_planes_update_arm(state, crtc); 8061 8062 commit_pipe_post_planes(state, crtc); 8063 8064 intel_pipe_update_end(new_crtc_state); 8065 8066 /* 8067 * We usually enable FIFO underrun interrupts as part of the 8068 * CRTC enable sequence during modesets. But when we inherit a 8069 * valid pipe configuration from the BIOS we need to take care 8070 * of enabling them on the CRTC's first fastset. 8071 */ 8072 if (new_crtc_state->update_pipe && !modeset && 8073 old_crtc_state->inherited) 8074 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 8075 } 8076 8077 static void intel_old_crtc_state_disables(struct intel_atomic_state *state, 8078 struct intel_crtc_state *old_crtc_state, 8079 struct intel_crtc_state *new_crtc_state, 8080 struct intel_crtc *crtc) 8081 { 8082 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 8083 8084 /* 8085 * We need to disable pipe CRC before disabling the pipe, 8086 * or we race against vblank off. 8087 */ 8088 intel_crtc_disable_pipe_crc(crtc); 8089 8090 dev_priv->display->crtc_disable(state, crtc); 8091 crtc->active = false; 8092 intel_fbc_disable(crtc); 8093 intel_disable_shared_dpll(old_crtc_state); 8094 8095 /* FIXME unify this for all platforms */ 8096 if (!new_crtc_state->hw.active && 8097 !HAS_GMCH(dev_priv)) 8098 intel_initial_watermarks(state, crtc); 8099 } 8100 8101 static void intel_commit_modeset_disables(struct intel_atomic_state *state) 8102 { 8103 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 8104 struct intel_crtc *crtc; 8105 u32 handled = 0; 8106 int i; 8107 8108 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 8109 new_crtc_state, i) { 8110 if (!intel_crtc_needs_modeset(new_crtc_state)) 8111 continue; 8112 8113 if (!old_crtc_state->hw.active) 8114 continue; 8115 8116 intel_pre_plane_update(state, crtc); 8117 intel_crtc_disable_planes(state, crtc); 8118 } 8119 8120 /* Only disable port sync and MST slaves */ 8121 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 8122 new_crtc_state, i) { 8123 if (!intel_crtc_needs_modeset(new_crtc_state)) 8124 continue; 8125 8126 if (!old_crtc_state->hw.active) 8127 continue; 8128 8129 /* In case of Transcoder port Sync master slave CRTCs can be 8130 * assigned in any order and we need to make sure that 8131 * slave CRTCs are disabled first and then master CRTC since 8132 * Slave vblanks are masked till Master Vblanks. 8133 */ 8134 if (!is_trans_port_sync_slave(old_crtc_state) && 8135 !intel_dp_mst_is_slave_trans(old_crtc_state) && 8136 !intel_crtc_is_bigjoiner_slave(old_crtc_state)) 8137 continue; 8138 8139 intel_old_crtc_state_disables(state, old_crtc_state, 8140 new_crtc_state, crtc); 8141 handled |= BIT(crtc->pipe); 8142 } 8143 8144 /* Disable everything else left on */ 8145 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 8146 new_crtc_state, i) { 8147 if (!intel_crtc_needs_modeset(new_crtc_state) || 8148 (handled & BIT(crtc->pipe))) 8149 continue; 8150 8151 if (!old_crtc_state->hw.active) 8152 continue; 8153 8154 intel_old_crtc_state_disables(state, old_crtc_state, 8155 new_crtc_state, crtc); 8156 } 8157 } 8158 8159 static void intel_commit_modeset_enables(struct intel_atomic_state *state) 8160 { 8161 struct intel_crtc_state *new_crtc_state; 8162 struct intel_crtc *crtc; 8163 int i; 8164 8165 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 8166 if (!new_crtc_state->hw.active) 8167 continue; 8168 8169 intel_enable_crtc(state, crtc); 8170 intel_update_crtc(state, crtc); 8171 } 8172 } 8173 8174 static void skl_commit_modeset_enables(struct intel_atomic_state *state) 8175 { 8176 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 8177 struct intel_crtc *crtc; 8178 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 8179 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 8180 u8 update_pipes = 0, modeset_pipes = 0; 8181 int i; 8182 8183 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 8184 enum pipe pipe = crtc->pipe; 8185 8186 if (!new_crtc_state->hw.active) 8187 continue; 8188 8189 /* ignore allocations for crtc's that have been turned off. */ 8190 if (!intel_crtc_needs_modeset(new_crtc_state)) { 8191 entries[pipe] = old_crtc_state->wm.skl.ddb; 8192 update_pipes |= BIT(pipe); 8193 } else { 8194 modeset_pipes |= BIT(pipe); 8195 } 8196 } 8197 8198 /* 8199 * Whenever the number of active pipes changes, we need to make sure we 8200 * update the pipes in the right order so that their ddb allocations 8201 * never overlap with each other between CRTC updates. Otherwise we'll 8202 * cause pipe underruns and other bad stuff. 8203 * 8204 * So first lets enable all pipes that do not need a fullmodeset as 8205 * those don't have any external dependency. 8206 */ 8207 while (update_pipes) { 8208 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 8209 new_crtc_state, i) { 8210 enum pipe pipe = crtc->pipe; 8211 8212 if ((update_pipes & BIT(pipe)) == 0) 8213 continue; 8214 8215 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 8216 entries, I915_MAX_PIPES, pipe)) 8217 continue; 8218 8219 entries[pipe] = new_crtc_state->wm.skl.ddb; 8220 update_pipes &= ~BIT(pipe); 8221 8222 intel_update_crtc(state, crtc); 8223 8224 /* 8225 * If this is an already active pipe, it's DDB changed, 8226 * and this isn't the last pipe that needs updating 8227 * then we need to wait for a vblank to pass for the 8228 * new ddb allocation to take effect. 8229 */ 8230 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 8231 &old_crtc_state->wm.skl.ddb) && 8232 (update_pipes | modeset_pipes)) 8233 intel_crtc_wait_for_next_vblank(crtc); 8234 } 8235 } 8236 8237 update_pipes = modeset_pipes; 8238 8239 /* 8240 * Enable all pipes that needs a modeset and do not depends on other 8241 * pipes 8242 */ 8243 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 8244 enum pipe pipe = crtc->pipe; 8245 8246 if ((modeset_pipes & BIT(pipe)) == 0) 8247 continue; 8248 8249 if (intel_dp_mst_is_slave_trans(new_crtc_state) || 8250 is_trans_port_sync_master(new_crtc_state) || 8251 intel_crtc_is_bigjoiner_master(new_crtc_state)) 8252 continue; 8253 8254 modeset_pipes &= ~BIT(pipe); 8255 8256 intel_enable_crtc(state, crtc); 8257 } 8258 8259 /* 8260 * Then we enable all remaining pipes that depend on other 8261 * pipes: MST slaves and port sync masters, big joiner master 8262 */ 8263 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 8264 enum pipe pipe = crtc->pipe; 8265 8266 if ((modeset_pipes & BIT(pipe)) == 0) 8267 continue; 8268 8269 modeset_pipes &= ~BIT(pipe); 8270 8271 intel_enable_crtc(state, crtc); 8272 } 8273 8274 /* 8275 * Finally we do the plane updates/etc. for all pipes that got enabled. 8276 */ 8277 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 8278 enum pipe pipe = crtc->pipe; 8279 8280 if ((update_pipes & BIT(pipe)) == 0) 8281 continue; 8282 8283 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 8284 entries, I915_MAX_PIPES, pipe)); 8285 8286 entries[pipe] = new_crtc_state->wm.skl.ddb; 8287 update_pipes &= ~BIT(pipe); 8288 8289 intel_update_crtc(state, crtc); 8290 } 8291 8292 drm_WARN_ON(&dev_priv->drm, modeset_pipes); 8293 drm_WARN_ON(&dev_priv->drm, update_pipes); 8294 } 8295 8296 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) 8297 { 8298 struct intel_atomic_state *state, *next; 8299 struct llist_node *freed; 8300 8301 freed = llist_del_all(&dev_priv->atomic_helper.free_list); 8302 llist_for_each_entry_safe(state, next, freed, freed) 8303 drm_atomic_state_put(&state->base); 8304 } 8305 8306 static void intel_atomic_helper_free_state_worker(struct work_struct *work) 8307 { 8308 struct drm_i915_private *dev_priv = 8309 container_of(work, typeof(*dev_priv), atomic_helper.free_work); 8310 8311 intel_atomic_helper_free_state(dev_priv); 8312 } 8313 8314 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 8315 { 8316 struct wait_queue_entry wait_fence, wait_reset; 8317 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); 8318 8319 init_wait_entry(&wait_fence, 0); 8320 init_wait_entry(&wait_reset, 0); 8321 for (;;) { 8322 prepare_to_wait(&intel_state->commit_ready.wait, 8323 &wait_fence, TASK_UNINTERRUPTIBLE); 8324 prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags, 8325 I915_RESET_MODESET), 8326 &wait_reset, TASK_UNINTERRUPTIBLE); 8327 8328 8329 if (i915_sw_fence_done(&intel_state->commit_ready) || 8330 test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags)) 8331 break; 8332 8333 schedule(); 8334 } 8335 finish_wait(&intel_state->commit_ready.wait, &wait_fence); 8336 finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags, 8337 I915_RESET_MODESET), 8338 &wait_reset); 8339 } 8340 8341 static void intel_cleanup_dsbs(struct intel_atomic_state *state) 8342 { 8343 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 8344 struct intel_crtc *crtc; 8345 int i; 8346 8347 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 8348 new_crtc_state, i) 8349 intel_dsb_cleanup(old_crtc_state); 8350 } 8351 8352 static void intel_atomic_cleanup_work(struct work_struct *work) 8353 { 8354 struct intel_atomic_state *state = 8355 container_of(work, struct intel_atomic_state, base.commit_work); 8356 struct drm_i915_private *i915 = to_i915(state->base.dev); 8357 8358 intel_cleanup_dsbs(state); 8359 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base); 8360 drm_atomic_helper_commit_cleanup_done(&state->base); 8361 drm_atomic_state_put(&state->base); 8362 8363 intel_atomic_helper_free_state(i915); 8364 } 8365 8366 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state) 8367 { 8368 struct drm_i915_private *i915 = to_i915(state->base.dev); 8369 struct intel_plane *plane; 8370 struct intel_plane_state *plane_state; 8371 int i; 8372 8373 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 8374 struct drm_framebuffer *fb = plane_state->hw.fb; 8375 int cc_plane; 8376 int ret; 8377 8378 if (!fb) 8379 continue; 8380 8381 cc_plane = intel_fb_rc_ccs_cc_plane(fb); 8382 if (cc_plane < 0) 8383 continue; 8384 8385 /* 8386 * The layout of the fast clear color value expected by HW 8387 * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2): 8388 * - 4 x 4 bytes per-channel value 8389 * (in surface type specific float/int format provided by the fb user) 8390 * - 8 bytes native color value used by the display 8391 * (converted/written by GPU during a fast clear operation using the 8392 * above per-channel values) 8393 * 8394 * The commit's FB prepare hook already ensured that FB obj is pinned and the 8395 * caller made sure that the object is synced wrt. the related color clear value 8396 * GPU write on it. 8397 */ 8398 ret = i915_gem_object_read_from_page(intel_fb_obj(fb), 8399 fb->offsets[cc_plane] + 16, 8400 &plane_state->ccval, 8401 sizeof(plane_state->ccval)); 8402 /* The above could only fail if the FB obj has an unexpected backing store type. */ 8403 drm_WARN_ON(&i915->drm, ret); 8404 } 8405 } 8406 8407 static void intel_atomic_commit_tail(struct intel_atomic_state *state) 8408 { 8409 struct drm_device *dev = state->base.dev; 8410 struct drm_i915_private *dev_priv = to_i915(dev); 8411 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 8412 struct intel_crtc *crtc; 8413 u64 put_domains[I915_MAX_PIPES] = {}; 8414 intel_wakeref_t wakeref = 0; 8415 int i; 8416 8417 intel_atomic_commit_fence_wait(state); 8418 8419 drm_atomic_helper_wait_for_dependencies(&state->base); 8420 8421 if (state->modeset) 8422 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 8423 8424 intel_atomic_prepare_plane_clear_colors(state); 8425 8426 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 8427 new_crtc_state, i) { 8428 if (intel_crtc_needs_modeset(new_crtc_state) || 8429 new_crtc_state->update_pipe) { 8430 8431 put_domains[crtc->pipe] = 8432 modeset_get_crtc_power_domains(new_crtc_state); 8433 } 8434 } 8435 8436 intel_commit_modeset_disables(state); 8437 8438 /* FIXME: Eventually get rid of our crtc->config pointer */ 8439 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 8440 crtc->config = new_crtc_state; 8441 8442 if (state->modeset) { 8443 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); 8444 8445 intel_set_cdclk_pre_plane_update(state); 8446 8447 intel_modeset_verify_disabled(dev_priv, state); 8448 } 8449 8450 intel_sagv_pre_plane_update(state); 8451 8452 /* Complete the events for pipes that have now been disabled */ 8453 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 8454 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 8455 8456 /* Complete events for now disable pipes here. */ 8457 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { 8458 spin_lock_irq(&dev->event_lock); 8459 drm_crtc_send_vblank_event(&crtc->base, 8460 new_crtc_state->uapi.event); 8461 spin_unlock_irq(&dev->event_lock); 8462 8463 new_crtc_state->uapi.event = NULL; 8464 } 8465 } 8466 8467 intel_encoders_update_prepare(state); 8468 8469 intel_dbuf_pre_plane_update(state); 8470 8471 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 8472 if (new_crtc_state->do_async_flip) 8473 intel_crtc_enable_flip_done(state, crtc); 8474 } 8475 8476 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 8477 dev_priv->display->commit_modeset_enables(state); 8478 8479 intel_encoders_update_complete(state); 8480 8481 if (state->modeset) 8482 intel_set_cdclk_post_plane_update(state); 8483 8484 intel_wait_for_vblank_workers(state); 8485 8486 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 8487 * already, but still need the state for the delayed optimization. To 8488 * fix this: 8489 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 8490 * - schedule that vblank worker _before_ calling hw_done 8491 * - at the start of commit_tail, cancel it _synchrously 8492 * - switch over to the vblank wait helper in the core after that since 8493 * we don't need out special handling any more. 8494 */ 8495 drm_atomic_helper_wait_for_flip_done(dev, &state->base); 8496 8497 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 8498 if (new_crtc_state->do_async_flip) 8499 intel_crtc_disable_flip_done(state, crtc); 8500 } 8501 8502 /* 8503 * Now that the vblank has passed, we can go ahead and program the 8504 * optimal watermarks on platforms that need two-step watermark 8505 * programming. 8506 * 8507 * TODO: Move this (and other cleanup) to an async worker eventually. 8508 */ 8509 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 8510 new_crtc_state, i) { 8511 /* 8512 * Gen2 reports pipe underruns whenever all planes are disabled. 8513 * So re-enable underrun reporting after some planes get enabled. 8514 * 8515 * We do this before .optimize_watermarks() so that we have a 8516 * chance of catching underruns with the intermediate watermarks 8517 * vs. the new plane configuration. 8518 */ 8519 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state)) 8520 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 8521 8522 intel_optimize_watermarks(state, crtc); 8523 } 8524 8525 intel_dbuf_post_plane_update(state); 8526 intel_psr_post_plane_update(state); 8527 8528 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 8529 intel_post_plane_update(state, crtc); 8530 8531 modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]); 8532 8533 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); 8534 8535 /* 8536 * DSB cleanup is done in cleanup_work aligning with framebuffer 8537 * cleanup. So copy and reset the dsb structure to sync with 8538 * commit_done and later do dsb cleanup in cleanup_work. 8539 */ 8540 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb); 8541 } 8542 8543 /* Underruns don't always raise interrupts, so check manually */ 8544 intel_check_cpu_fifo_underruns(dev_priv); 8545 intel_check_pch_fifo_underruns(dev_priv); 8546 8547 if (state->modeset) 8548 intel_verify_planes(state); 8549 8550 intel_sagv_post_plane_update(state); 8551 8552 drm_atomic_helper_commit_hw_done(&state->base); 8553 8554 if (state->modeset) { 8555 /* As one of the primary mmio accessors, KMS has a high 8556 * likelihood of triggering bugs in unclaimed access. After we 8557 * finish modesetting, see if an error has been flagged, and if 8558 * so enable debugging for the next modeset - and hope we catch 8559 * the culprit. 8560 */ 8561 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 8562 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); 8563 } 8564 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 8565 8566 /* 8567 * Defer the cleanup of the old state to a separate worker to not 8568 * impede the current task (userspace for blocking modesets) that 8569 * are executed inline. For out-of-line asynchronous modesets/flips, 8570 * deferring to a new worker seems overkill, but we would place a 8571 * schedule point (cond_resched()) here anyway to keep latencies 8572 * down. 8573 */ 8574 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work); 8575 queue_work(system_highpri_wq, &state->base.commit_work); 8576 } 8577 8578 static void intel_atomic_commit_work(struct work_struct *work) 8579 { 8580 struct intel_atomic_state *state = 8581 container_of(work, struct intel_atomic_state, base.commit_work); 8582 8583 intel_atomic_commit_tail(state); 8584 } 8585 8586 static int 8587 intel_atomic_commit_ready(struct i915_sw_fence *fence, 8588 enum i915_sw_fence_notify notify) 8589 { 8590 struct intel_atomic_state *state = 8591 container_of(fence, struct intel_atomic_state, commit_ready); 8592 8593 switch (notify) { 8594 case FENCE_COMPLETE: 8595 /* we do blocking waits in the worker, nothing to do here */ 8596 break; 8597 case FENCE_FREE: 8598 { 8599 struct intel_atomic_helper *helper = 8600 &to_i915(state->base.dev)->atomic_helper; 8601 8602 if (llist_add(&state->freed, &helper->free_list)) 8603 schedule_work(&helper->free_work); 8604 break; 8605 } 8606 } 8607 8608 return NOTIFY_DONE; 8609 } 8610 8611 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 8612 { 8613 struct intel_plane_state *old_plane_state, *new_plane_state; 8614 struct intel_plane *plane; 8615 int i; 8616 8617 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 8618 new_plane_state, i) 8619 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 8620 to_intel_frontbuffer(new_plane_state->hw.fb), 8621 plane->frontbuffer_bit); 8622 } 8623 8624 static int intel_atomic_commit(struct drm_device *dev, 8625 struct drm_atomic_state *_state, 8626 bool nonblock) 8627 { 8628 struct intel_atomic_state *state = to_intel_atomic_state(_state); 8629 struct drm_i915_private *dev_priv = to_i915(dev); 8630 int ret = 0; 8631 8632 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 8633 8634 drm_atomic_state_get(&state->base); 8635 i915_sw_fence_init(&state->commit_ready, 8636 intel_atomic_commit_ready); 8637 8638 /* 8639 * The intel_legacy_cursor_update() fast path takes care 8640 * of avoiding the vblank waits for simple cursor 8641 * movement and flips. For cursor on/off and size changes, 8642 * we want to perform the vblank waits so that watermark 8643 * updates happen during the correct frames. Gen9+ have 8644 * double buffered watermarks and so shouldn't need this. 8645 * 8646 * Unset state->legacy_cursor_update before the call to 8647 * drm_atomic_helper_setup_commit() because otherwise 8648 * drm_atomic_helper_wait_for_flip_done() is a noop and 8649 * we get FIFO underruns because we didn't wait 8650 * for vblank. 8651 * 8652 * FIXME doing watermarks and fb cleanup from a vblank worker 8653 * (assuming we had any) would solve these problems. 8654 */ 8655 if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) { 8656 struct intel_crtc_state *new_crtc_state; 8657 struct intel_crtc *crtc; 8658 int i; 8659 8660 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 8661 if (new_crtc_state->wm.need_postvbl_update || 8662 new_crtc_state->update_wm_post) 8663 state->base.legacy_cursor_update = false; 8664 } 8665 8666 ret = intel_atomic_prepare_commit(state); 8667 if (ret) { 8668 drm_dbg_atomic(&dev_priv->drm, 8669 "Preparing state failed with %i\n", ret); 8670 i915_sw_fence_commit(&state->commit_ready); 8671 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 8672 return ret; 8673 } 8674 8675 ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 8676 if (!ret) 8677 ret = drm_atomic_helper_swap_state(&state->base, true); 8678 if (!ret) 8679 intel_atomic_swap_global_state(state); 8680 8681 if (ret) { 8682 struct intel_crtc_state *new_crtc_state; 8683 struct intel_crtc *crtc; 8684 int i; 8685 8686 i915_sw_fence_commit(&state->commit_ready); 8687 8688 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 8689 intel_dsb_cleanup(new_crtc_state); 8690 8691 drm_atomic_helper_cleanup_planes(dev, &state->base); 8692 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 8693 return ret; 8694 } 8695 intel_shared_dpll_swap_state(state); 8696 intel_atomic_track_fbs(state); 8697 8698 drm_atomic_state_get(&state->base); 8699 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 8700 8701 i915_sw_fence_commit(&state->commit_ready); 8702 if (nonblock && state->modeset) { 8703 queue_work(dev_priv->modeset_wq, &state->base.commit_work); 8704 } else if (nonblock) { 8705 queue_work(dev_priv->flip_wq, &state->base.commit_work); 8706 } else { 8707 if (state->modeset) 8708 flush_workqueue(dev_priv->modeset_wq); 8709 intel_atomic_commit_tail(state); 8710 } 8711 8712 return 0; 8713 } 8714 8715 /** 8716 * intel_plane_destroy - destroy a plane 8717 * @plane: plane to destroy 8718 * 8719 * Common destruction function for all types of planes (primary, cursor, 8720 * sprite). 8721 */ 8722 void intel_plane_destroy(struct drm_plane *plane) 8723 { 8724 drm_plane_cleanup(plane); 8725 kfree(to_intel_plane(plane)); 8726 } 8727 8728 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv) 8729 { 8730 struct intel_plane *plane; 8731 8732 for_each_intel_plane(&dev_priv->drm, plane) { 8733 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, 8734 plane->pipe); 8735 8736 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base); 8737 } 8738 } 8739 8740 8741 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 8742 struct drm_file *file) 8743 { 8744 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 8745 struct drm_crtc *drmmode_crtc; 8746 struct intel_crtc *crtc; 8747 8748 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); 8749 if (!drmmode_crtc) 8750 return -ENOENT; 8751 8752 crtc = to_intel_crtc(drmmode_crtc); 8753 pipe_from_crtc_id->pipe = crtc->pipe; 8754 8755 return 0; 8756 } 8757 8758 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) 8759 { 8760 struct drm_device *dev = encoder->base.dev; 8761 struct intel_encoder *source_encoder; 8762 u32 possible_clones = 0; 8763 8764 for_each_intel_encoder(dev, source_encoder) { 8765 if (encoders_cloneable(encoder, source_encoder)) 8766 possible_clones |= drm_encoder_mask(&source_encoder->base); 8767 } 8768 8769 return possible_clones; 8770 } 8771 8772 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) 8773 { 8774 struct drm_device *dev = encoder->base.dev; 8775 struct intel_crtc *crtc; 8776 u32 possible_crtcs = 0; 8777 8778 for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask) 8779 possible_crtcs |= drm_crtc_mask(&crtc->base); 8780 8781 return possible_crtcs; 8782 } 8783 8784 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) 8785 { 8786 if (!IS_MOBILE(dev_priv)) 8787 return false; 8788 8789 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0) 8790 return false; 8791 8792 if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE)) 8793 return false; 8794 8795 return true; 8796 } 8797 8798 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) 8799 { 8800 if (DISPLAY_VER(dev_priv) >= 9) 8801 return false; 8802 8803 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) 8804 return false; 8805 8806 if (HAS_PCH_LPT_H(dev_priv) && 8807 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 8808 return false; 8809 8810 /* DDI E can't be used if DDI A requires 4 lanes */ 8811 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 8812 return false; 8813 8814 if (!dev_priv->vbt.int_crt_support) 8815 return false; 8816 8817 return true; 8818 } 8819 8820 static void intel_setup_outputs(struct drm_i915_private *dev_priv) 8821 { 8822 struct intel_encoder *encoder; 8823 bool dpd_is_edp = false; 8824 8825 intel_pps_unlock_regs_wa(dev_priv); 8826 8827 if (!HAS_DISPLAY(dev_priv)) 8828 return; 8829 8830 if (IS_DG2(dev_priv)) { 8831 intel_ddi_init(dev_priv, PORT_A); 8832 intel_ddi_init(dev_priv, PORT_B); 8833 intel_ddi_init(dev_priv, PORT_C); 8834 intel_ddi_init(dev_priv, PORT_D_XELPD); 8835 intel_ddi_init(dev_priv, PORT_TC1); 8836 } else if (IS_ALDERLAKE_P(dev_priv)) { 8837 intel_ddi_init(dev_priv, PORT_A); 8838 intel_ddi_init(dev_priv, PORT_B); 8839 intel_ddi_init(dev_priv, PORT_TC1); 8840 intel_ddi_init(dev_priv, PORT_TC2); 8841 intel_ddi_init(dev_priv, PORT_TC3); 8842 intel_ddi_init(dev_priv, PORT_TC4); 8843 icl_dsi_init(dev_priv); 8844 } else if (IS_ALDERLAKE_S(dev_priv)) { 8845 intel_ddi_init(dev_priv, PORT_A); 8846 intel_ddi_init(dev_priv, PORT_TC1); 8847 intel_ddi_init(dev_priv, PORT_TC2); 8848 intel_ddi_init(dev_priv, PORT_TC3); 8849 intel_ddi_init(dev_priv, PORT_TC4); 8850 } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) { 8851 intel_ddi_init(dev_priv, PORT_A); 8852 intel_ddi_init(dev_priv, PORT_B); 8853 intel_ddi_init(dev_priv, PORT_TC1); 8854 intel_ddi_init(dev_priv, PORT_TC2); 8855 } else if (DISPLAY_VER(dev_priv) >= 12) { 8856 intel_ddi_init(dev_priv, PORT_A); 8857 intel_ddi_init(dev_priv, PORT_B); 8858 intel_ddi_init(dev_priv, PORT_TC1); 8859 intel_ddi_init(dev_priv, PORT_TC2); 8860 intel_ddi_init(dev_priv, PORT_TC3); 8861 intel_ddi_init(dev_priv, PORT_TC4); 8862 intel_ddi_init(dev_priv, PORT_TC5); 8863 intel_ddi_init(dev_priv, PORT_TC6); 8864 icl_dsi_init(dev_priv); 8865 } else if (IS_JSL_EHL(dev_priv)) { 8866 intel_ddi_init(dev_priv, PORT_A); 8867 intel_ddi_init(dev_priv, PORT_B); 8868 intel_ddi_init(dev_priv, PORT_C); 8869 intel_ddi_init(dev_priv, PORT_D); 8870 icl_dsi_init(dev_priv); 8871 } else if (DISPLAY_VER(dev_priv) == 11) { 8872 intel_ddi_init(dev_priv, PORT_A); 8873 intel_ddi_init(dev_priv, PORT_B); 8874 intel_ddi_init(dev_priv, PORT_C); 8875 intel_ddi_init(dev_priv, PORT_D); 8876 intel_ddi_init(dev_priv, PORT_E); 8877 intel_ddi_init(dev_priv, PORT_F); 8878 icl_dsi_init(dev_priv); 8879 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 8880 intel_ddi_init(dev_priv, PORT_A); 8881 intel_ddi_init(dev_priv, PORT_B); 8882 intel_ddi_init(dev_priv, PORT_C); 8883 vlv_dsi_init(dev_priv); 8884 } else if (DISPLAY_VER(dev_priv) >= 9) { 8885 intel_ddi_init(dev_priv, PORT_A); 8886 intel_ddi_init(dev_priv, PORT_B); 8887 intel_ddi_init(dev_priv, PORT_C); 8888 intel_ddi_init(dev_priv, PORT_D); 8889 intel_ddi_init(dev_priv, PORT_E); 8890 } else if (HAS_DDI(dev_priv)) { 8891 u32 found; 8892 8893 if (intel_ddi_crt_present(dev_priv)) 8894 intel_crt_init(dev_priv); 8895 8896 /* Haswell uses DDI functions to detect digital outputs. */ 8897 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 8898 if (found) 8899 intel_ddi_init(dev_priv, PORT_A); 8900 8901 found = intel_de_read(dev_priv, SFUSE_STRAP); 8902 if (found & SFUSE_STRAP_DDIB_DETECTED) 8903 intel_ddi_init(dev_priv, PORT_B); 8904 if (found & SFUSE_STRAP_DDIC_DETECTED) 8905 intel_ddi_init(dev_priv, PORT_C); 8906 if (found & SFUSE_STRAP_DDID_DETECTED) 8907 intel_ddi_init(dev_priv, PORT_D); 8908 if (found & SFUSE_STRAP_DDIF_DETECTED) 8909 intel_ddi_init(dev_priv, PORT_F); 8910 } else if (HAS_PCH_SPLIT(dev_priv)) { 8911 int found; 8912 8913 /* 8914 * intel_edp_init_connector() depends on this completing first, 8915 * to prevent the registration of both eDP and LVDS and the 8916 * incorrect sharing of the PPS. 8917 */ 8918 intel_lvds_init(dev_priv); 8919 intel_crt_init(dev_priv); 8920 8921 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); 8922 8923 if (ilk_has_edp_a(dev_priv)) 8924 g4x_dp_init(dev_priv, DP_A, PORT_A); 8925 8926 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) { 8927 /* PCH SDVOB multiplex with HDMIB */ 8928 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 8929 if (!found) 8930 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 8931 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED)) 8932 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B); 8933 } 8934 8935 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED) 8936 g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 8937 8938 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED) 8939 g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 8940 8941 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED) 8942 g4x_dp_init(dev_priv, PCH_DP_C, PORT_C); 8943 8944 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED) 8945 g4x_dp_init(dev_priv, PCH_DP_D, PORT_D); 8946 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 8947 bool has_edp, has_port; 8948 8949 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support) 8950 intel_crt_init(dev_priv); 8951 8952 /* 8953 * The DP_DETECTED bit is the latched state of the DDC 8954 * SDA pin at boot. However since eDP doesn't require DDC 8955 * (no way to plug in a DP->HDMI dongle) the DDC pins for 8956 * eDP ports may have been muxed to an alternate function. 8957 * Thus we can't rely on the DP_DETECTED bit alone to detect 8958 * eDP ports. Consult the VBT as well as DP_DETECTED to 8959 * detect eDP ports. 8960 * 8961 * Sadly the straps seem to be missing sometimes even for HDMI 8962 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 8963 * and VBT for the presence of the port. Additionally we can't 8964 * trust the port type the VBT declares as we've seen at least 8965 * HDMI ports that the VBT claim are DP or eDP. 8966 */ 8967 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); 8968 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 8969 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port) 8970 has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B); 8971 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 8972 g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 8973 8974 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); 8975 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 8976 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port) 8977 has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C); 8978 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 8979 g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 8980 8981 if (IS_CHERRYVIEW(dev_priv)) { 8982 /* 8983 * eDP not supported on port D, 8984 * so no need to worry about it 8985 */ 8986 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 8987 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port) 8988 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D); 8989 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port) 8990 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 8991 } 8992 8993 vlv_dsi_init(dev_priv); 8994 } else if (IS_PINEVIEW(dev_priv)) { 8995 intel_lvds_init(dev_priv); 8996 intel_crt_init(dev_priv); 8997 } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) { 8998 bool found = false; 8999 9000 if (IS_MOBILE(dev_priv)) 9001 intel_lvds_init(dev_priv); 9002 9003 intel_crt_init(dev_priv); 9004 9005 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 9006 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n"); 9007 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 9008 if (!found && IS_G4X(dev_priv)) { 9009 drm_dbg_kms(&dev_priv->drm, 9010 "probing HDMI on SDVOB\n"); 9011 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 9012 } 9013 9014 if (!found && IS_G4X(dev_priv)) 9015 g4x_dp_init(dev_priv, DP_B, PORT_B); 9016 } 9017 9018 /* Before G4X SDVOC doesn't have its own detect register */ 9019 9020 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 9021 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n"); 9022 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 9023 } 9024 9025 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) { 9026 9027 if (IS_G4X(dev_priv)) { 9028 drm_dbg_kms(&dev_priv->drm, 9029 "probing HDMI on SDVOC\n"); 9030 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 9031 } 9032 if (IS_G4X(dev_priv)) 9033 g4x_dp_init(dev_priv, DP_C, PORT_C); 9034 } 9035 9036 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED)) 9037 g4x_dp_init(dev_priv, DP_D, PORT_D); 9038 9039 if (SUPPORTS_TV(dev_priv)) 9040 intel_tv_init(dev_priv); 9041 } else if (DISPLAY_VER(dev_priv) == 2) { 9042 if (IS_I85X(dev_priv)) 9043 intel_lvds_init(dev_priv); 9044 9045 intel_crt_init(dev_priv); 9046 intel_dvo_init(dev_priv); 9047 } 9048 9049 for_each_intel_encoder(&dev_priv->drm, encoder) { 9050 encoder->base.possible_crtcs = 9051 intel_encoder_possible_crtcs(encoder); 9052 encoder->base.possible_clones = 9053 intel_encoder_possible_clones(encoder); 9054 } 9055 9056 intel_init_pch_refclk(dev_priv); 9057 9058 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 9059 } 9060 9061 static enum drm_mode_status 9062 intel_mode_valid(struct drm_device *dev, 9063 const struct drm_display_mode *mode) 9064 { 9065 struct drm_i915_private *dev_priv = to_i915(dev); 9066 int hdisplay_max, htotal_max; 9067 int vdisplay_max, vtotal_max; 9068 9069 /* 9070 * Can't reject DBLSCAN here because Xorg ddxen can add piles 9071 * of DBLSCAN modes to the output's mode list when they detect 9072 * the scaling mode property on the connector. And they don't 9073 * ask the kernel to validate those modes in any way until 9074 * modeset time at which point the client gets a protocol error. 9075 * So in order to not upset those clients we silently ignore the 9076 * DBLSCAN flag on such connectors. For other connectors we will 9077 * reject modes with the DBLSCAN flag in encoder->compute_config(). 9078 * And we always reject DBLSCAN modes in connector->mode_valid() 9079 * as we never want such modes on the connector's mode list. 9080 */ 9081 9082 if (mode->vscan > 1) 9083 return MODE_NO_VSCAN; 9084 9085 if (mode->flags & DRM_MODE_FLAG_HSKEW) 9086 return MODE_H_ILLEGAL; 9087 9088 if (mode->flags & (DRM_MODE_FLAG_CSYNC | 9089 DRM_MODE_FLAG_NCSYNC | 9090 DRM_MODE_FLAG_PCSYNC)) 9091 return MODE_HSYNC; 9092 9093 if (mode->flags & (DRM_MODE_FLAG_BCAST | 9094 DRM_MODE_FLAG_PIXMUX | 9095 DRM_MODE_FLAG_CLKDIV2)) 9096 return MODE_BAD; 9097 9098 /* Transcoder timing limits */ 9099 if (DISPLAY_VER(dev_priv) >= 11) { 9100 hdisplay_max = 16384; 9101 vdisplay_max = 8192; 9102 htotal_max = 16384; 9103 vtotal_max = 8192; 9104 } else if (DISPLAY_VER(dev_priv) >= 9 || 9105 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 9106 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 9107 vdisplay_max = 4096; 9108 htotal_max = 8192; 9109 vtotal_max = 8192; 9110 } else if (DISPLAY_VER(dev_priv) >= 3) { 9111 hdisplay_max = 4096; 9112 vdisplay_max = 4096; 9113 htotal_max = 8192; 9114 vtotal_max = 8192; 9115 } else { 9116 hdisplay_max = 2048; 9117 vdisplay_max = 2048; 9118 htotal_max = 4096; 9119 vtotal_max = 4096; 9120 } 9121 9122 if (mode->hdisplay > hdisplay_max || 9123 mode->hsync_start > htotal_max || 9124 mode->hsync_end > htotal_max || 9125 mode->htotal > htotal_max) 9126 return MODE_H_ILLEGAL; 9127 9128 if (mode->vdisplay > vdisplay_max || 9129 mode->vsync_start > vtotal_max || 9130 mode->vsync_end > vtotal_max || 9131 mode->vtotal > vtotal_max) 9132 return MODE_V_ILLEGAL; 9133 9134 if (DISPLAY_VER(dev_priv) >= 5) { 9135 if (mode->hdisplay < 64 || 9136 mode->htotal - mode->hdisplay < 32) 9137 return MODE_H_ILLEGAL; 9138 9139 if (mode->vtotal - mode->vdisplay < 5) 9140 return MODE_V_ILLEGAL; 9141 } else { 9142 if (mode->htotal - mode->hdisplay < 32) 9143 return MODE_H_ILLEGAL; 9144 9145 if (mode->vtotal - mode->vdisplay < 3) 9146 return MODE_V_ILLEGAL; 9147 } 9148 9149 /* 9150 * Cantiga+ cannot handle modes with a hsync front porch of 0. 9151 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 9152 */ 9153 if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) && 9154 mode->hsync_start == mode->hdisplay) 9155 return MODE_H_ILLEGAL; 9156 9157 return MODE_OK; 9158 } 9159 9160 enum drm_mode_status 9161 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, 9162 const struct drm_display_mode *mode, 9163 bool bigjoiner) 9164 { 9165 int plane_width_max, plane_height_max; 9166 9167 /* 9168 * intel_mode_valid() should be 9169 * sufficient on older platforms. 9170 */ 9171 if (DISPLAY_VER(dev_priv) < 9) 9172 return MODE_OK; 9173 9174 /* 9175 * Most people will probably want a fullscreen 9176 * plane so let's not advertize modes that are 9177 * too big for that. 9178 */ 9179 if (DISPLAY_VER(dev_priv) >= 11) { 9180 plane_width_max = 5120 << bigjoiner; 9181 plane_height_max = 4320; 9182 } else { 9183 plane_width_max = 5120; 9184 plane_height_max = 4096; 9185 } 9186 9187 if (mode->hdisplay > plane_width_max) 9188 return MODE_H_ILLEGAL; 9189 9190 if (mode->vdisplay > plane_height_max) 9191 return MODE_V_ILLEGAL; 9192 9193 return MODE_OK; 9194 } 9195 9196 static const struct drm_mode_config_funcs intel_mode_funcs = { 9197 .fb_create = intel_user_framebuffer_create, 9198 .get_format_info = intel_fb_get_format_info, 9199 .output_poll_changed = intel_fbdev_output_poll_changed, 9200 .mode_valid = intel_mode_valid, 9201 .atomic_check = intel_atomic_check, 9202 .atomic_commit = intel_atomic_commit, 9203 .atomic_state_alloc = intel_atomic_state_alloc, 9204 .atomic_state_clear = intel_atomic_state_clear, 9205 .atomic_state_free = intel_atomic_state_free, 9206 }; 9207 9208 static const struct drm_i915_display_funcs skl_display_funcs = { 9209 .get_pipe_config = hsw_get_pipe_config, 9210 .crtc_enable = hsw_crtc_enable, 9211 .crtc_disable = hsw_crtc_disable, 9212 .commit_modeset_enables = skl_commit_modeset_enables, 9213 .get_initial_plane_config = skl_get_initial_plane_config, 9214 }; 9215 9216 static const struct drm_i915_display_funcs ddi_display_funcs = { 9217 .get_pipe_config = hsw_get_pipe_config, 9218 .crtc_enable = hsw_crtc_enable, 9219 .crtc_disable = hsw_crtc_disable, 9220 .commit_modeset_enables = intel_commit_modeset_enables, 9221 .get_initial_plane_config = i9xx_get_initial_plane_config, 9222 }; 9223 9224 static const struct drm_i915_display_funcs pch_split_display_funcs = { 9225 .get_pipe_config = ilk_get_pipe_config, 9226 .crtc_enable = ilk_crtc_enable, 9227 .crtc_disable = ilk_crtc_disable, 9228 .commit_modeset_enables = intel_commit_modeset_enables, 9229 .get_initial_plane_config = i9xx_get_initial_plane_config, 9230 }; 9231 9232 static const struct drm_i915_display_funcs vlv_display_funcs = { 9233 .get_pipe_config = i9xx_get_pipe_config, 9234 .crtc_enable = valleyview_crtc_enable, 9235 .crtc_disable = i9xx_crtc_disable, 9236 .commit_modeset_enables = intel_commit_modeset_enables, 9237 .get_initial_plane_config = i9xx_get_initial_plane_config, 9238 }; 9239 9240 static const struct drm_i915_display_funcs i9xx_display_funcs = { 9241 .get_pipe_config = i9xx_get_pipe_config, 9242 .crtc_enable = i9xx_crtc_enable, 9243 .crtc_disable = i9xx_crtc_disable, 9244 .commit_modeset_enables = intel_commit_modeset_enables, 9245 .get_initial_plane_config = i9xx_get_initial_plane_config, 9246 }; 9247 9248 /** 9249 * intel_init_display_hooks - initialize the display modesetting hooks 9250 * @dev_priv: device private 9251 */ 9252 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 9253 { 9254 if (!HAS_DISPLAY(dev_priv)) 9255 return; 9256 9257 intel_init_cdclk_hooks(dev_priv); 9258 intel_audio_hooks_init(dev_priv); 9259 9260 intel_dpll_init_clock_hook(dev_priv); 9261 9262 if (DISPLAY_VER(dev_priv) >= 9) { 9263 dev_priv->display = &skl_display_funcs; 9264 } else if (HAS_DDI(dev_priv)) { 9265 dev_priv->display = &ddi_display_funcs; 9266 } else if (HAS_PCH_SPLIT(dev_priv)) { 9267 dev_priv->display = &pch_split_display_funcs; 9268 } else if (IS_CHERRYVIEW(dev_priv) || 9269 IS_VALLEYVIEW(dev_priv)) { 9270 dev_priv->display = &vlv_display_funcs; 9271 } else { 9272 dev_priv->display = &i9xx_display_funcs; 9273 } 9274 9275 intel_fdi_init_hook(dev_priv); 9276 } 9277 9278 void intel_modeset_init_hw(struct drm_i915_private *i915) 9279 { 9280 struct intel_cdclk_state *cdclk_state; 9281 9282 if (!HAS_DISPLAY(i915)) 9283 return; 9284 9285 cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state); 9286 9287 intel_update_cdclk(i915); 9288 intel_cdclk_dump_config(i915, &i915->cdclk.hw, "Current CDCLK"); 9289 cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw; 9290 } 9291 9292 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state) 9293 { 9294 struct drm_plane *plane; 9295 struct intel_crtc *crtc; 9296 9297 for_each_intel_crtc(state->dev, crtc) { 9298 struct intel_crtc_state *crtc_state; 9299 9300 crtc_state = intel_atomic_get_crtc_state(state, crtc); 9301 if (IS_ERR(crtc_state)) 9302 return PTR_ERR(crtc_state); 9303 9304 if (crtc_state->hw.active) { 9305 /* 9306 * Preserve the inherited flag to avoid 9307 * taking the full modeset path. 9308 */ 9309 crtc_state->inherited = true; 9310 } 9311 } 9312 9313 drm_for_each_plane(plane, state->dev) { 9314 struct drm_plane_state *plane_state; 9315 9316 plane_state = drm_atomic_get_plane_state(state, plane); 9317 if (IS_ERR(plane_state)) 9318 return PTR_ERR(plane_state); 9319 } 9320 9321 return 0; 9322 } 9323 9324 /* 9325 * Calculate what we think the watermarks should be for the state we've read 9326 * out of the hardware and then immediately program those watermarks so that 9327 * we ensure the hardware settings match our internal state. 9328 * 9329 * We can calculate what we think WM's should be by creating a duplicate of the 9330 * current state (which was constructed during hardware readout) and running it 9331 * through the atomic check code to calculate new watermark values in the 9332 * state object. 9333 */ 9334 static void sanitize_watermarks(struct drm_i915_private *dev_priv) 9335 { 9336 struct drm_atomic_state *state; 9337 struct intel_atomic_state *intel_state; 9338 struct intel_crtc *crtc; 9339 struct intel_crtc_state *crtc_state; 9340 struct drm_modeset_acquire_ctx ctx; 9341 int ret; 9342 int i; 9343 9344 /* Only supported on platforms that use atomic watermark design */ 9345 if (!dev_priv->wm_disp->optimize_watermarks) 9346 return; 9347 9348 state = drm_atomic_state_alloc(&dev_priv->drm); 9349 if (drm_WARN_ON(&dev_priv->drm, !state)) 9350 return; 9351 9352 intel_state = to_intel_atomic_state(state); 9353 9354 drm_modeset_acquire_init(&ctx, 0); 9355 9356 retry: 9357 state->acquire_ctx = &ctx; 9358 9359 /* 9360 * Hardware readout is the only time we don't want to calculate 9361 * intermediate watermarks (since we don't trust the current 9362 * watermarks). 9363 */ 9364 if (!HAS_GMCH(dev_priv)) 9365 intel_state->skip_intermediate_wm = true; 9366 9367 ret = sanitize_watermarks_add_affected(state); 9368 if (ret) 9369 goto fail; 9370 9371 ret = intel_atomic_check(&dev_priv->drm, state); 9372 if (ret) 9373 goto fail; 9374 9375 /* Write calculated watermark values back */ 9376 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { 9377 crtc_state->wm.need_postvbl_update = true; 9378 intel_optimize_watermarks(intel_state, crtc); 9379 9380 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; 9381 } 9382 9383 fail: 9384 if (ret == -EDEADLK) { 9385 drm_atomic_state_clear(state); 9386 drm_modeset_backoff(&ctx); 9387 goto retry; 9388 } 9389 9390 /* 9391 * If we fail here, it means that the hardware appears to be 9392 * programmed in a way that shouldn't be possible, given our 9393 * understanding of watermark requirements. This might mean a 9394 * mistake in the hardware readout code or a mistake in the 9395 * watermark calculations for a given platform. Raise a WARN 9396 * so that this is noticeable. 9397 * 9398 * If this actually happens, we'll have to just leave the 9399 * BIOS-programmed watermarks untouched and hope for the best. 9400 */ 9401 drm_WARN(&dev_priv->drm, ret, 9402 "Could not determine valid watermarks for inherited state\n"); 9403 9404 drm_atomic_state_put(state); 9405 9406 drm_modeset_drop_locks(&ctx); 9407 drm_modeset_acquire_fini(&ctx); 9408 } 9409 9410 static int intel_initial_commit(struct drm_device *dev) 9411 { 9412 struct drm_atomic_state *state = NULL; 9413 struct drm_modeset_acquire_ctx ctx; 9414 struct intel_crtc *crtc; 9415 int ret = 0; 9416 9417 state = drm_atomic_state_alloc(dev); 9418 if (!state) 9419 return -ENOMEM; 9420 9421 drm_modeset_acquire_init(&ctx, 0); 9422 9423 retry: 9424 state->acquire_ctx = &ctx; 9425 9426 for_each_intel_crtc(dev, crtc) { 9427 struct intel_crtc_state *crtc_state = 9428 intel_atomic_get_crtc_state(state, crtc); 9429 9430 if (IS_ERR(crtc_state)) { 9431 ret = PTR_ERR(crtc_state); 9432 goto out; 9433 } 9434 9435 if (crtc_state->hw.active) { 9436 struct intel_encoder *encoder; 9437 9438 /* 9439 * We've not yet detected sink capabilities 9440 * (audio,infoframes,etc.) and thus we don't want to 9441 * force a full state recomputation yet. We want that to 9442 * happen only for the first real commit from userspace. 9443 * So preserve the inherited flag for the time being. 9444 */ 9445 crtc_state->inherited = true; 9446 9447 ret = drm_atomic_add_affected_planes(state, &crtc->base); 9448 if (ret) 9449 goto out; 9450 9451 /* 9452 * FIXME hack to force a LUT update to avoid the 9453 * plane update forcing the pipe gamma on without 9454 * having a proper LUT loaded. Remove once we 9455 * have readout for pipe gamma enable. 9456 */ 9457 crtc_state->uapi.color_mgmt_changed = true; 9458 9459 for_each_intel_encoder_mask(dev, encoder, 9460 crtc_state->uapi.encoder_mask) { 9461 if (encoder->initial_fastset_check && 9462 !encoder->initial_fastset_check(encoder, crtc_state)) { 9463 ret = drm_atomic_add_affected_connectors(state, 9464 &crtc->base); 9465 if (ret) 9466 goto out; 9467 } 9468 } 9469 } 9470 } 9471 9472 ret = drm_atomic_commit(state); 9473 9474 out: 9475 if (ret == -EDEADLK) { 9476 drm_atomic_state_clear(state); 9477 drm_modeset_backoff(&ctx); 9478 goto retry; 9479 } 9480 9481 drm_atomic_state_put(state); 9482 9483 drm_modeset_drop_locks(&ctx); 9484 drm_modeset_acquire_fini(&ctx); 9485 9486 return ret; 9487 } 9488 9489 static void intel_mode_config_init(struct drm_i915_private *i915) 9490 { 9491 struct drm_mode_config *mode_config = &i915->drm.mode_config; 9492 9493 drm_mode_config_init(&i915->drm); 9494 INIT_LIST_HEAD(&i915->global_obj_list); 9495 9496 mode_config->min_width = 0; 9497 mode_config->min_height = 0; 9498 9499 mode_config->preferred_depth = 24; 9500 mode_config->prefer_shadow = 1; 9501 9502 mode_config->funcs = &intel_mode_funcs; 9503 9504 mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915); 9505 9506 /* 9507 * Maximum framebuffer dimensions, chosen to match 9508 * the maximum render engine surface size on gen4+. 9509 */ 9510 if (DISPLAY_VER(i915) >= 7) { 9511 mode_config->max_width = 16384; 9512 mode_config->max_height = 16384; 9513 } else if (DISPLAY_VER(i915) >= 4) { 9514 mode_config->max_width = 8192; 9515 mode_config->max_height = 8192; 9516 } else if (DISPLAY_VER(i915) == 3) { 9517 mode_config->max_width = 4096; 9518 mode_config->max_height = 4096; 9519 } else { 9520 mode_config->max_width = 2048; 9521 mode_config->max_height = 2048; 9522 } 9523 9524 if (IS_I845G(i915) || IS_I865G(i915)) { 9525 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512; 9526 mode_config->cursor_height = 1023; 9527 } else if (IS_I830(i915) || IS_I85X(i915) || 9528 IS_I915G(i915) || IS_I915GM(i915)) { 9529 mode_config->cursor_width = 64; 9530 mode_config->cursor_height = 64; 9531 } else { 9532 mode_config->cursor_width = 256; 9533 mode_config->cursor_height = 256; 9534 } 9535 } 9536 9537 static void intel_mode_config_cleanup(struct drm_i915_private *i915) 9538 { 9539 intel_atomic_global_obj_cleanup(i915); 9540 drm_mode_config_cleanup(&i915->drm); 9541 } 9542 9543 /* part #1: call before irq install */ 9544 int intel_modeset_init_noirq(struct drm_i915_private *i915) 9545 { 9546 int ret; 9547 9548 if (i915_inject_probe_failure(i915)) 9549 return -ENODEV; 9550 9551 if (HAS_DISPLAY(i915)) { 9552 ret = drm_vblank_init(&i915->drm, 9553 INTEL_NUM_PIPES(i915)); 9554 if (ret) 9555 return ret; 9556 } 9557 9558 intel_bios_init(i915); 9559 9560 ret = intel_vga_register(i915); 9561 if (ret) 9562 goto cleanup_bios; 9563 9564 /* FIXME: completely on the wrong abstraction layer */ 9565 intel_power_domains_init_hw(i915, false); 9566 9567 if (!HAS_DISPLAY(i915)) 9568 return 0; 9569 9570 intel_dmc_ucode_init(i915); 9571 9572 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); 9573 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI | 9574 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); 9575 9576 i915->framestart_delay = 1; /* 1-4 */ 9577 9578 i915->window2_delay = 0; /* No DSB so no window2 delay */ 9579 9580 intel_mode_config_init(i915); 9581 9582 ret = intel_cdclk_init(i915); 9583 if (ret) 9584 goto cleanup_vga_client_pw_domain_dmc; 9585 9586 ret = intel_dbuf_init(i915); 9587 if (ret) 9588 goto cleanup_vga_client_pw_domain_dmc; 9589 9590 ret = intel_bw_init(i915); 9591 if (ret) 9592 goto cleanup_vga_client_pw_domain_dmc; 9593 9594 init_llist_head(&i915->atomic_helper.free_list); 9595 INIT_WORK(&i915->atomic_helper.free_work, 9596 intel_atomic_helper_free_state_worker); 9597 9598 intel_init_quirks(i915); 9599 9600 intel_fbc_init(i915); 9601 9602 return 0; 9603 9604 cleanup_vga_client_pw_domain_dmc: 9605 intel_dmc_ucode_fini(i915); 9606 intel_power_domains_driver_remove(i915); 9607 intel_vga_unregister(i915); 9608 cleanup_bios: 9609 intel_bios_driver_remove(i915); 9610 9611 return ret; 9612 } 9613 9614 /* part #2: call after irq install, but before gem init */ 9615 int intel_modeset_init_nogem(struct drm_i915_private *i915) 9616 { 9617 struct drm_device *dev = &i915->drm; 9618 enum pipe pipe; 9619 struct intel_crtc *crtc; 9620 int ret; 9621 9622 if (!HAS_DISPLAY(i915)) 9623 return 0; 9624 9625 intel_init_pm(i915); 9626 9627 intel_panel_sanitize_ssc(i915); 9628 9629 intel_pps_setup(i915); 9630 9631 intel_gmbus_setup(i915); 9632 9633 drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n", 9634 INTEL_NUM_PIPES(i915), 9635 INTEL_NUM_PIPES(i915) > 1 ? "s" : ""); 9636 9637 for_each_pipe(i915, pipe) { 9638 ret = intel_crtc_init(i915, pipe); 9639 if (ret) { 9640 intel_mode_config_cleanup(i915); 9641 return ret; 9642 } 9643 } 9644 9645 intel_plane_possible_crtcs_init(i915); 9646 intel_shared_dpll_init(dev); 9647 intel_fdi_pll_freq_update(i915); 9648 9649 intel_update_czclk(i915); 9650 intel_modeset_init_hw(i915); 9651 intel_dpll_update_ref_clks(i915); 9652 9653 intel_hdcp_component_init(i915); 9654 9655 if (i915->max_cdclk_freq == 0) 9656 intel_update_max_cdclk(i915); 9657 9658 /* 9659 * If the platform has HTI, we need to find out whether it has reserved 9660 * any display resources before we create our display outputs. 9661 */ 9662 if (INTEL_INFO(i915)->display.has_hti) 9663 i915->hti_state = intel_de_read(i915, HDPORT_STATE); 9664 9665 /* Just disable it once at startup */ 9666 intel_vga_disable(i915); 9667 intel_setup_outputs(i915); 9668 9669 drm_modeset_lock_all(dev); 9670 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); 9671 intel_acpi_assign_connector_fwnodes(i915); 9672 drm_modeset_unlock_all(dev); 9673 9674 for_each_intel_crtc(dev, crtc) { 9675 if (!to_intel_crtc_state(crtc->base.state)->uapi.active) 9676 continue; 9677 intel_crtc_initial_plane_config(crtc); 9678 } 9679 9680 /* 9681 * Make sure hardware watermarks really match the state we read out. 9682 * Note that we need to do this after reconstructing the BIOS fb's 9683 * since the watermark calculation done here will use pstate->fb. 9684 */ 9685 if (!HAS_GMCH(i915)) 9686 sanitize_watermarks(i915); 9687 9688 return 0; 9689 } 9690 9691 /* part #3: call after gem init */ 9692 int intel_modeset_init(struct drm_i915_private *i915) 9693 { 9694 int ret; 9695 9696 if (!HAS_DISPLAY(i915)) 9697 return 0; 9698 9699 /* 9700 * Force all active planes to recompute their states. So that on 9701 * mode_setcrtc after probe, all the intel_plane_state variables 9702 * are already calculated and there is no assert_plane warnings 9703 * during bootup. 9704 */ 9705 ret = intel_initial_commit(&i915->drm); 9706 if (ret) 9707 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret); 9708 9709 intel_overlay_setup(i915); 9710 9711 ret = intel_fbdev_init(&i915->drm); 9712 if (ret) 9713 return ret; 9714 9715 /* Only enable hotplug handling once the fbdev is fully set up. */ 9716 intel_hpd_init(i915); 9717 intel_hpd_poll_disable(i915); 9718 9719 intel_init_ipc(i915); 9720 9721 return 0; 9722 } 9723 9724 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 9725 { 9726 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 9727 /* 640x480@60Hz, ~25175 kHz */ 9728 struct dpll clock = { 9729 .m1 = 18, 9730 .m2 = 7, 9731 .p1 = 13, 9732 .p2 = 4, 9733 .n = 2, 9734 }; 9735 u32 dpll, fp; 9736 int i; 9737 9738 drm_WARN_ON(&dev_priv->drm, 9739 i9xx_calc_dpll_params(48000, &clock) != 25154); 9740 9741 drm_dbg_kms(&dev_priv->drm, 9742 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 9743 pipe_name(pipe), clock.vco, clock.dot); 9744 9745 fp = i9xx_dpll_compute_fp(&clock); 9746 dpll = DPLL_DVO_2X_MODE | 9747 DPLL_VGA_MODE_DIS | 9748 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 9749 PLL_P2_DIVIDE_BY_4 | 9750 PLL_REF_INPUT_DREFCLK | 9751 DPLL_VCO_ENABLE; 9752 9753 intel_de_write(dev_priv, FP0(pipe), fp); 9754 intel_de_write(dev_priv, FP1(pipe), fp); 9755 9756 intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); 9757 intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); 9758 intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); 9759 intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16)); 9760 intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16)); 9761 intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); 9762 intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); 9763 9764 /* 9765 * Apparently we need to have VGA mode enabled prior to changing 9766 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 9767 * dividers, even though the register value does change. 9768 */ 9769 intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); 9770 intel_de_write(dev_priv, DPLL(pipe), dpll); 9771 9772 /* Wait for the clocks to stabilize. */ 9773 intel_de_posting_read(dev_priv, DPLL(pipe)); 9774 udelay(150); 9775 9776 /* The pixel multiplier can only be updated once the 9777 * DPLL is enabled and the clocks are stable. 9778 * 9779 * So write it again. 9780 */ 9781 intel_de_write(dev_priv, DPLL(pipe), dpll); 9782 9783 /* We do this three times for luck */ 9784 for (i = 0; i < 3 ; i++) { 9785 intel_de_write(dev_priv, DPLL(pipe), dpll); 9786 intel_de_posting_read(dev_priv, DPLL(pipe)); 9787 udelay(150); /* wait for warmup */ 9788 } 9789 9790 intel_de_write(dev_priv, PIPECONF(pipe), PIPECONF_ENABLE); 9791 intel_de_posting_read(dev_priv, PIPECONF(pipe)); 9792 9793 intel_wait_for_pipe_scanline_moving(crtc); 9794 } 9795 9796 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 9797 { 9798 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 9799 9800 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n", 9801 pipe_name(pipe)); 9802 9803 drm_WARN_ON(&dev_priv->drm, 9804 intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE); 9805 drm_WARN_ON(&dev_priv->drm, 9806 intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE); 9807 drm_WARN_ON(&dev_priv->drm, 9808 intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE); 9809 drm_WARN_ON(&dev_priv->drm, 9810 intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK); 9811 drm_WARN_ON(&dev_priv->drm, 9812 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK); 9813 9814 intel_de_write(dev_priv, PIPECONF(pipe), 0); 9815 intel_de_posting_read(dev_priv, PIPECONF(pipe)); 9816 9817 intel_wait_for_pipe_scanline_stopped(crtc); 9818 9819 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS); 9820 intel_de_posting_read(dev_priv, DPLL(pipe)); 9821 } 9822 9823 static void 9824 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) 9825 { 9826 struct intel_crtc *crtc; 9827 9828 if (DISPLAY_VER(dev_priv) >= 4) 9829 return; 9830 9831 for_each_intel_crtc(&dev_priv->drm, crtc) { 9832 struct intel_plane *plane = 9833 to_intel_plane(crtc->base.primary); 9834 struct intel_crtc *plane_crtc; 9835 enum pipe pipe; 9836 9837 if (!plane->get_hw_state(plane, &pipe)) 9838 continue; 9839 9840 if (pipe == crtc->pipe) 9841 continue; 9842 9843 drm_dbg_kms(&dev_priv->drm, 9844 "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", 9845 plane->base.base.id, plane->base.name); 9846 9847 plane_crtc = intel_crtc_for_pipe(dev_priv, pipe); 9848 intel_plane_disable_noatomic(plane_crtc, plane); 9849 } 9850 } 9851 9852 static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 9853 { 9854 struct drm_device *dev = crtc->base.dev; 9855 struct intel_encoder *encoder; 9856 9857 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 9858 return true; 9859 9860 return false; 9861 } 9862 9863 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder) 9864 { 9865 struct drm_device *dev = encoder->base.dev; 9866 struct intel_connector *connector; 9867 9868 for_each_connector_on_encoder(dev, &encoder->base, connector) 9869 return connector; 9870 9871 return NULL; 9872 } 9873 9874 static bool has_pch_trancoder(struct drm_i915_private *dev_priv, 9875 enum pipe pch_transcoder) 9876 { 9877 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 9878 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A); 9879 } 9880 9881 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state) 9882 { 9883 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 9884 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9885 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 9886 9887 if (DISPLAY_VER(dev_priv) >= 9 || 9888 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 9889 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder); 9890 u32 val; 9891 9892 if (transcoder_is_dsi(cpu_transcoder)) 9893 return; 9894 9895 val = intel_de_read(dev_priv, reg); 9896 val &= ~HSW_FRAME_START_DELAY_MASK; 9897 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 9898 intel_de_write(dev_priv, reg, val); 9899 } else { 9900 i915_reg_t reg = PIPECONF(cpu_transcoder); 9901 u32 val; 9902 9903 val = intel_de_read(dev_priv, reg); 9904 val &= ~PIPECONF_FRAME_START_DELAY_MASK; 9905 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 9906 intel_de_write(dev_priv, reg, val); 9907 } 9908 9909 if (!crtc_state->has_pch_encoder) 9910 return; 9911 9912 if (HAS_PCH_IBX(dev_priv)) { 9913 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe); 9914 u32 val; 9915 9916 val = intel_de_read(dev_priv, reg); 9917 val &= ~TRANS_FRAME_START_DELAY_MASK; 9918 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 9919 intel_de_write(dev_priv, reg, val); 9920 } else { 9921 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc); 9922 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder); 9923 u32 val; 9924 9925 val = intel_de_read(dev_priv, reg); 9926 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 9927 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 9928 intel_de_write(dev_priv, reg, val); 9929 } 9930 } 9931 9932 static void intel_sanitize_crtc(struct intel_crtc *crtc, 9933 struct drm_modeset_acquire_ctx *ctx) 9934 { 9935 struct drm_device *dev = crtc->base.dev; 9936 struct drm_i915_private *dev_priv = to_i915(dev); 9937 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); 9938 9939 if (crtc_state->hw.active) { 9940 struct intel_plane *plane; 9941 9942 /* Clear any frame start delays used for debugging left by the BIOS */ 9943 intel_sanitize_frame_start_delay(crtc_state); 9944 9945 /* Disable everything but the primary plane */ 9946 for_each_intel_plane_on_crtc(dev, crtc, plane) { 9947 const struct intel_plane_state *plane_state = 9948 to_intel_plane_state(plane->base.state); 9949 9950 if (plane_state->uapi.visible && 9951 plane->base.type != DRM_PLANE_TYPE_PRIMARY) 9952 intel_plane_disable_noatomic(crtc, plane); 9953 } 9954 9955 /* Disable any background color/etc. set by the BIOS */ 9956 intel_color_commit(crtc_state); 9957 } 9958 9959 /* Adjust the state of the output pipe according to whether we 9960 * have active connectors/encoders. */ 9961 if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) && 9962 !intel_crtc_is_bigjoiner_slave(crtc_state)) 9963 intel_crtc_disable_noatomic(crtc, ctx); 9964 9965 if (crtc_state->hw.active || HAS_GMCH(dev_priv)) { 9966 /* 9967 * We start out with underrun reporting disabled to avoid races. 9968 * For correct bookkeeping mark this on active crtcs. 9969 * 9970 * Also on gmch platforms we dont have any hardware bits to 9971 * disable the underrun reporting. Which means we need to start 9972 * out with underrun reporting disabled also on inactive pipes, 9973 * since otherwise we'll complain about the garbage we read when 9974 * e.g. coming up after runtime pm. 9975 * 9976 * No protection against concurrent access is required - at 9977 * worst a fifo underrun happens which also sets this to false. 9978 */ 9979 crtc->cpu_fifo_underrun_disabled = true; 9980 /* 9981 * We track the PCH trancoder underrun reporting state 9982 * within the crtc. With crtc for pipe A housing the underrun 9983 * reporting state for PCH transcoder A, crtc for pipe B housing 9984 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, 9985 * and marking underrun reporting as disabled for the non-existing 9986 * PCH transcoders B and C would prevent enabling the south 9987 * error interrupt (see cpt_can_enable_serr_int()). 9988 */ 9989 if (has_pch_trancoder(dev_priv, crtc->pipe)) 9990 crtc->pch_fifo_underrun_disabled = true; 9991 } 9992 } 9993 9994 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) 9995 { 9996 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 9997 9998 /* 9999 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram 10000 * the hardware when a high res displays plugged in. DPLL P 10001 * divider is zero, and the pipe timings are bonkers. We'll 10002 * try to disable everything in that case. 10003 * 10004 * FIXME would be nice to be able to sanitize this state 10005 * without several WARNs, but for now let's take the easy 10006 * road. 10007 */ 10008 return IS_SANDYBRIDGE(dev_priv) && 10009 crtc_state->hw.active && 10010 crtc_state->shared_dpll && 10011 crtc_state->port_clock == 0; 10012 } 10013 10014 static void intel_sanitize_encoder(struct intel_encoder *encoder) 10015 { 10016 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 10017 struct intel_connector *connector; 10018 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 10019 struct intel_crtc_state *crtc_state = crtc ? 10020 to_intel_crtc_state(crtc->base.state) : NULL; 10021 10022 /* We need to check both for a crtc link (meaning that the 10023 * encoder is active and trying to read from a pipe) and the 10024 * pipe itself being active. */ 10025 bool has_active_crtc = crtc_state && 10026 crtc_state->hw.active; 10027 10028 if (crtc_state && has_bogus_dpll_config(crtc_state)) { 10029 drm_dbg_kms(&dev_priv->drm, 10030 "BIOS has misprogrammed the hardware. Disabling pipe %c\n", 10031 pipe_name(crtc->pipe)); 10032 has_active_crtc = false; 10033 } 10034 10035 connector = intel_encoder_find_connector(encoder); 10036 if (connector && !has_active_crtc) { 10037 drm_dbg_kms(&dev_priv->drm, 10038 "[ENCODER:%d:%s] has active connectors but no active pipe!\n", 10039 encoder->base.base.id, 10040 encoder->base.name); 10041 10042 /* Connector is active, but has no active pipe. This is 10043 * fallout from our resume register restoring. Disable 10044 * the encoder manually again. */ 10045 if (crtc_state) { 10046 struct drm_encoder *best_encoder; 10047 10048 drm_dbg_kms(&dev_priv->drm, 10049 "[ENCODER:%d:%s] manually disabled\n", 10050 encoder->base.base.id, 10051 encoder->base.name); 10052 10053 /* avoid oopsing in case the hooks consult best_encoder */ 10054 best_encoder = connector->base.state->best_encoder; 10055 connector->base.state->best_encoder = &encoder->base; 10056 10057 /* FIXME NULL atomic state passed! */ 10058 if (encoder->disable) 10059 encoder->disable(NULL, encoder, crtc_state, 10060 connector->base.state); 10061 if (encoder->post_disable) 10062 encoder->post_disable(NULL, encoder, crtc_state, 10063 connector->base.state); 10064 10065 connector->base.state->best_encoder = best_encoder; 10066 } 10067 encoder->base.crtc = NULL; 10068 10069 /* Inconsistent output/port/pipe state happens presumably due to 10070 * a bug in one of the get_hw_state functions. Or someplace else 10071 * in our code, like the register restore mess on resume. Clamp 10072 * things to off as a safer default. */ 10073 10074 connector->base.dpms = DRM_MODE_DPMS_OFF; 10075 connector->base.encoder = NULL; 10076 } 10077 10078 /* notify opregion of the sanitized encoder state */ 10079 intel_opregion_notify_encoder(encoder, connector && has_active_crtc); 10080 10081 if (HAS_DDI(dev_priv)) 10082 intel_ddi_sanitize_encoder_pll_mapping(encoder); 10083 } 10084 10085 /* FIXME read out full plane state for all planes */ 10086 static void readout_plane_state(struct drm_i915_private *dev_priv) 10087 { 10088 struct intel_plane *plane; 10089 struct intel_crtc *crtc; 10090 10091 for_each_intel_plane(&dev_priv->drm, plane) { 10092 struct intel_plane_state *plane_state = 10093 to_intel_plane_state(plane->base.state); 10094 struct intel_crtc_state *crtc_state; 10095 enum pipe pipe = PIPE_A; 10096 bool visible; 10097 10098 visible = plane->get_hw_state(plane, &pipe); 10099 10100 crtc = intel_crtc_for_pipe(dev_priv, pipe); 10101 crtc_state = to_intel_crtc_state(crtc->base.state); 10102 10103 intel_set_plane_visible(crtc_state, plane_state, visible); 10104 10105 drm_dbg_kms(&dev_priv->drm, 10106 "[PLANE:%d:%s] hw state readout: %s, pipe %c\n", 10107 plane->base.base.id, plane->base.name, 10108 enableddisabled(visible), pipe_name(pipe)); 10109 } 10110 10111 for_each_intel_crtc(&dev_priv->drm, crtc) { 10112 struct intel_crtc_state *crtc_state = 10113 to_intel_crtc_state(crtc->base.state); 10114 10115 fixup_plane_bitmasks(crtc_state); 10116 } 10117 } 10118 10119 static void intel_modeset_readout_hw_state(struct drm_device *dev) 10120 { 10121 struct drm_i915_private *dev_priv = to_i915(dev); 10122 struct intel_cdclk_state *cdclk_state = 10123 to_intel_cdclk_state(dev_priv->cdclk.obj.state); 10124 struct intel_dbuf_state *dbuf_state = 10125 to_intel_dbuf_state(dev_priv->dbuf.obj.state); 10126 enum pipe pipe; 10127 struct intel_crtc *crtc; 10128 struct intel_encoder *encoder; 10129 struct intel_connector *connector; 10130 struct drm_connector_list_iter conn_iter; 10131 u8 active_pipes = 0; 10132 10133 for_each_intel_crtc(dev, crtc) { 10134 struct intel_crtc_state *crtc_state = 10135 to_intel_crtc_state(crtc->base.state); 10136 10137 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi); 10138 intel_crtc_free_hw_state(crtc_state); 10139 intel_crtc_state_reset(crtc_state, crtc); 10140 10141 intel_crtc_get_pipe_config(crtc_state); 10142 10143 crtc_state->hw.enable = crtc_state->hw.active; 10144 10145 crtc->base.enabled = crtc_state->hw.enable; 10146 crtc->active = crtc_state->hw.active; 10147 10148 if (crtc_state->hw.active) 10149 active_pipes |= BIT(crtc->pipe); 10150 10151 drm_dbg_kms(&dev_priv->drm, 10152 "[CRTC:%d:%s] hw state readout: %s\n", 10153 crtc->base.base.id, crtc->base.name, 10154 enableddisabled(crtc_state->hw.active)); 10155 } 10156 10157 cdclk_state->active_pipes = dbuf_state->active_pipes = active_pipes; 10158 10159 readout_plane_state(dev_priv); 10160 10161 for_each_intel_encoder(dev, encoder) { 10162 struct intel_crtc_state *crtc_state = NULL; 10163 10164 pipe = 0; 10165 10166 if (encoder->get_hw_state(encoder, &pipe)) { 10167 crtc = intel_crtc_for_pipe(dev_priv, pipe); 10168 crtc_state = to_intel_crtc_state(crtc->base.state); 10169 10170 encoder->base.crtc = &crtc->base; 10171 intel_encoder_get_config(encoder, crtc_state); 10172 10173 /* read out to slave crtc as well for bigjoiner */ 10174 if (crtc_state->bigjoiner) { 10175 struct intel_crtc *slave_crtc; 10176 10177 /* encoder should read be linked to bigjoiner master */ 10178 WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state)); 10179 10180 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, slave_crtc, 10181 intel_crtc_bigjoiner_slave_pipes(crtc_state)) { 10182 struct intel_crtc_state *slave_crtc_state; 10183 10184 slave_crtc_state = to_intel_crtc_state(slave_crtc->base.state); 10185 intel_encoder_get_config(encoder, slave_crtc_state); 10186 } 10187 } 10188 } else { 10189 encoder->base.crtc = NULL; 10190 } 10191 10192 if (encoder->sync_state) 10193 encoder->sync_state(encoder, crtc_state); 10194 10195 drm_dbg_kms(&dev_priv->drm, 10196 "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 10197 encoder->base.base.id, encoder->base.name, 10198 enableddisabled(encoder->base.crtc), 10199 pipe_name(pipe)); 10200 } 10201 10202 intel_dpll_readout_hw_state(dev_priv); 10203 10204 drm_connector_list_iter_begin(dev, &conn_iter); 10205 for_each_intel_connector_iter(connector, &conn_iter) { 10206 if (connector->get_hw_state(connector)) { 10207 struct intel_crtc_state *crtc_state; 10208 struct intel_crtc *crtc; 10209 10210 connector->base.dpms = DRM_MODE_DPMS_ON; 10211 10212 encoder = intel_attached_encoder(connector); 10213 connector->base.encoder = &encoder->base; 10214 10215 crtc = to_intel_crtc(encoder->base.crtc); 10216 crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL; 10217 10218 if (crtc_state && crtc_state->hw.active) { 10219 /* 10220 * This has to be done during hardware readout 10221 * because anything calling .crtc_disable may 10222 * rely on the connector_mask being accurate. 10223 */ 10224 crtc_state->uapi.connector_mask |= 10225 drm_connector_mask(&connector->base); 10226 crtc_state->uapi.encoder_mask |= 10227 drm_encoder_mask(&encoder->base); 10228 } 10229 } else { 10230 connector->base.dpms = DRM_MODE_DPMS_OFF; 10231 connector->base.encoder = NULL; 10232 } 10233 drm_dbg_kms(&dev_priv->drm, 10234 "[CONNECTOR:%d:%s] hw state readout: %s\n", 10235 connector->base.base.id, connector->base.name, 10236 enableddisabled(connector->base.encoder)); 10237 } 10238 drm_connector_list_iter_end(&conn_iter); 10239 10240 for_each_intel_crtc(dev, crtc) { 10241 struct intel_bw_state *bw_state = 10242 to_intel_bw_state(dev_priv->bw_obj.state); 10243 struct intel_crtc_state *crtc_state = 10244 to_intel_crtc_state(crtc->base.state); 10245 struct intel_plane *plane; 10246 int min_cdclk = 0; 10247 10248 if (crtc_state->hw.active) { 10249 /* 10250 * The initial mode needs to be set in order to keep 10251 * the atomic core happy. It wants a valid mode if the 10252 * crtc's enabled, so we do the above call. 10253 * 10254 * But we don't set all the derived state fully, hence 10255 * set a flag to indicate that a full recalculation is 10256 * needed on the next commit. 10257 */ 10258 crtc_state->inherited = true; 10259 10260 intel_crtc_update_active_timings(crtc_state); 10261 10262 intel_crtc_copy_hw_to_uapi_state(crtc_state); 10263 } 10264 10265 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 10266 const struct intel_plane_state *plane_state = 10267 to_intel_plane_state(plane->base.state); 10268 10269 /* 10270 * FIXME don't have the fb yet, so can't 10271 * use intel_plane_data_rate() :( 10272 */ 10273 if (plane_state->uapi.visible) 10274 crtc_state->data_rate[plane->id] = 10275 4 * crtc_state->pixel_rate; 10276 /* 10277 * FIXME don't have the fb yet, so can't 10278 * use plane->min_cdclk() :( 10279 */ 10280 if (plane_state->uapi.visible && plane->min_cdclk) { 10281 if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10) 10282 crtc_state->min_cdclk[plane->id] = 10283 DIV_ROUND_UP(crtc_state->pixel_rate, 2); 10284 else 10285 crtc_state->min_cdclk[plane->id] = 10286 crtc_state->pixel_rate; 10287 } 10288 drm_dbg_kms(&dev_priv->drm, 10289 "[PLANE:%d:%s] min_cdclk %d kHz\n", 10290 plane->base.base.id, plane->base.name, 10291 crtc_state->min_cdclk[plane->id]); 10292 } 10293 10294 if (crtc_state->hw.active) { 10295 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); 10296 if (drm_WARN_ON(dev, min_cdclk < 0)) 10297 min_cdclk = 0; 10298 } 10299 10300 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk; 10301 cdclk_state->min_voltage_level[crtc->pipe] = 10302 crtc_state->min_voltage_level; 10303 10304 intel_bw_crtc_update(bw_state, crtc_state); 10305 10306 intel_pipe_config_sanity_check(dev_priv, crtc_state); 10307 } 10308 } 10309 10310 static void 10311 get_encoder_power_domains(struct drm_i915_private *dev_priv) 10312 { 10313 struct intel_encoder *encoder; 10314 10315 for_each_intel_encoder(&dev_priv->drm, encoder) { 10316 struct intel_crtc_state *crtc_state; 10317 10318 if (!encoder->get_power_domains) 10319 continue; 10320 10321 /* 10322 * MST-primary and inactive encoders don't have a crtc state 10323 * and neither of these require any power domain references. 10324 */ 10325 if (!encoder->base.crtc) 10326 continue; 10327 10328 crtc_state = to_intel_crtc_state(encoder->base.crtc->state); 10329 encoder->get_power_domains(encoder, crtc_state); 10330 } 10331 } 10332 10333 static void intel_early_display_was(struct drm_i915_private *dev_priv) 10334 { 10335 /* 10336 * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl 10337 * Also known as Wa_14010480278. 10338 */ 10339 if (IS_DISPLAY_VER(dev_priv, 10, 12)) 10340 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0, 10341 intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS); 10342 10343 if (IS_HASWELL(dev_priv)) { 10344 /* 10345 * WaRsPkgCStateDisplayPMReq:hsw 10346 * System hang if this isn't done before disabling all planes! 10347 */ 10348 intel_de_write(dev_priv, CHICKEN_PAR1_1, 10349 intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); 10350 } 10351 10352 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) { 10353 /* Display WA #1142:kbl,cfl,cml */ 10354 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, 10355 KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22); 10356 intel_de_rmw(dev_priv, CHICKEN_MISC_2, 10357 KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14, 10358 KBL_ARB_FILL_SPARE_14); 10359 } 10360 } 10361 10362 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv, 10363 enum port port, i915_reg_t hdmi_reg) 10364 { 10365 u32 val = intel_de_read(dev_priv, hdmi_reg); 10366 10367 if (val & SDVO_ENABLE || 10368 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A)) 10369 return; 10370 10371 drm_dbg_kms(&dev_priv->drm, 10372 "Sanitizing transcoder select for HDMI %c\n", 10373 port_name(port)); 10374 10375 val &= ~SDVO_PIPE_SEL_MASK; 10376 val |= SDVO_PIPE_SEL(PIPE_A); 10377 10378 intel_de_write(dev_priv, hdmi_reg, val); 10379 } 10380 10381 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv, 10382 enum port port, i915_reg_t dp_reg) 10383 { 10384 u32 val = intel_de_read(dev_priv, dp_reg); 10385 10386 if (val & DP_PORT_EN || 10387 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A)) 10388 return; 10389 10390 drm_dbg_kms(&dev_priv->drm, 10391 "Sanitizing transcoder select for DP %c\n", 10392 port_name(port)); 10393 10394 val &= ~DP_PIPE_SEL_MASK; 10395 val |= DP_PIPE_SEL(PIPE_A); 10396 10397 intel_de_write(dev_priv, dp_reg, val); 10398 } 10399 10400 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv) 10401 { 10402 /* 10403 * The BIOS may select transcoder B on some of the PCH 10404 * ports even it doesn't enable the port. This would trip 10405 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled(). 10406 * Sanitize the transcoder select bits to prevent that. We 10407 * assume that the BIOS never actually enabled the port, 10408 * because if it did we'd actually have to toggle the port 10409 * on and back off to make the transcoder A select stick 10410 * (see. intel_dp_link_down(), intel_disable_hdmi(), 10411 * intel_disable_sdvo()). 10412 */ 10413 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B); 10414 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C); 10415 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D); 10416 10417 /* PCH SDVOB multiplex with HDMIB */ 10418 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB); 10419 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC); 10420 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID); 10421 } 10422 10423 /* Scan out the current hw modeset state, 10424 * and sanitizes it to the current state 10425 */ 10426 static void 10427 intel_modeset_setup_hw_state(struct drm_device *dev, 10428 struct drm_modeset_acquire_ctx *ctx) 10429 { 10430 struct drm_i915_private *dev_priv = to_i915(dev); 10431 struct intel_encoder *encoder; 10432 struct intel_crtc *crtc; 10433 intel_wakeref_t wakeref; 10434 10435 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 10436 10437 intel_early_display_was(dev_priv); 10438 intel_modeset_readout_hw_state(dev); 10439 10440 /* HW state is read out, now we need to sanitize this mess. */ 10441 get_encoder_power_domains(dev_priv); 10442 10443 if (HAS_PCH_IBX(dev_priv)) 10444 ibx_sanitize_pch_ports(dev_priv); 10445 10446 /* 10447 * intel_sanitize_plane_mapping() may need to do vblank 10448 * waits, so we need vblank interrupts restored beforehand. 10449 */ 10450 for_each_intel_crtc(&dev_priv->drm, crtc) { 10451 struct intel_crtc_state *crtc_state = 10452 to_intel_crtc_state(crtc->base.state); 10453 10454 drm_crtc_vblank_reset(&crtc->base); 10455 10456 if (crtc_state->hw.active) 10457 intel_crtc_vblank_on(crtc_state); 10458 } 10459 10460 intel_sanitize_plane_mapping(dev_priv); 10461 10462 for_each_intel_encoder(dev, encoder) 10463 intel_sanitize_encoder(encoder); 10464 10465 for_each_intel_crtc(&dev_priv->drm, crtc) { 10466 struct intel_crtc_state *crtc_state = 10467 to_intel_crtc_state(crtc->base.state); 10468 10469 intel_sanitize_crtc(crtc, ctx); 10470 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]"); 10471 } 10472 10473 intel_modeset_update_connector_atomic_state(dev); 10474 10475 intel_dpll_sanitize_state(dev_priv); 10476 10477 if (IS_G4X(dev_priv)) { 10478 g4x_wm_get_hw_state(dev_priv); 10479 g4x_wm_sanitize(dev_priv); 10480 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 10481 vlv_wm_get_hw_state(dev_priv); 10482 vlv_wm_sanitize(dev_priv); 10483 } else if (DISPLAY_VER(dev_priv) >= 9) { 10484 skl_wm_get_hw_state(dev_priv); 10485 skl_wm_sanitize(dev_priv); 10486 } else if (HAS_PCH_SPLIT(dev_priv)) { 10487 ilk_wm_get_hw_state(dev_priv); 10488 } 10489 10490 for_each_intel_crtc(dev, crtc) { 10491 struct intel_crtc_state *crtc_state = 10492 to_intel_crtc_state(crtc->base.state); 10493 u64 put_domains; 10494 10495 put_domains = modeset_get_crtc_power_domains(crtc_state); 10496 if (drm_WARN_ON(dev, put_domains)) 10497 modeset_put_crtc_power_domains(crtc, put_domains); 10498 } 10499 10500 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); 10501 10502 intel_power_domains_sanitize_state(dev_priv); 10503 } 10504 10505 void intel_display_resume(struct drm_device *dev) 10506 { 10507 struct drm_i915_private *dev_priv = to_i915(dev); 10508 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 10509 struct drm_modeset_acquire_ctx ctx; 10510 int ret; 10511 10512 if (!HAS_DISPLAY(dev_priv)) 10513 return; 10514 10515 dev_priv->modeset_restore_state = NULL; 10516 if (state) 10517 state->acquire_ctx = &ctx; 10518 10519 drm_modeset_acquire_init(&ctx, 0); 10520 10521 while (1) { 10522 ret = drm_modeset_lock_all_ctx(dev, &ctx); 10523 if (ret != -EDEADLK) 10524 break; 10525 10526 drm_modeset_backoff(&ctx); 10527 } 10528 10529 if (!ret) 10530 ret = __intel_display_resume(dev, state, &ctx); 10531 10532 intel_enable_ipc(dev_priv); 10533 drm_modeset_drop_locks(&ctx); 10534 drm_modeset_acquire_fini(&ctx); 10535 10536 if (ret) 10537 drm_err(&dev_priv->drm, 10538 "Restoring old state failed with %i\n", ret); 10539 if (state) 10540 drm_atomic_state_put(state); 10541 } 10542 10543 static void intel_hpd_poll_fini(struct drm_i915_private *i915) 10544 { 10545 struct intel_connector *connector; 10546 struct drm_connector_list_iter conn_iter; 10547 10548 /* Kill all the work that may have been queued by hpd. */ 10549 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 10550 for_each_intel_connector_iter(connector, &conn_iter) { 10551 if (connector->modeset_retry_work.func) 10552 cancel_work_sync(&connector->modeset_retry_work); 10553 if (connector->hdcp.shim) { 10554 cancel_delayed_work_sync(&connector->hdcp.check_work); 10555 cancel_work_sync(&connector->hdcp.prop_work); 10556 } 10557 } 10558 drm_connector_list_iter_end(&conn_iter); 10559 } 10560 10561 /* part #1: call before irq uninstall */ 10562 void intel_modeset_driver_remove(struct drm_i915_private *i915) 10563 { 10564 if (!HAS_DISPLAY(i915)) 10565 return; 10566 10567 flush_workqueue(i915->flip_wq); 10568 flush_workqueue(i915->modeset_wq); 10569 10570 flush_work(&i915->atomic_helper.free_work); 10571 drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list)); 10572 } 10573 10574 /* part #2: call after irq uninstall */ 10575 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) 10576 { 10577 if (!HAS_DISPLAY(i915)) 10578 return; 10579 10580 /* 10581 * Due to the hpd irq storm handling the hotplug work can re-arm the 10582 * poll handlers. Hence disable polling after hpd handling is shut down. 10583 */ 10584 intel_hpd_poll_fini(i915); 10585 10586 /* 10587 * MST topology needs to be suspended so we don't have any calls to 10588 * fbdev after it's finalized. MST will be destroyed later as part of 10589 * drm_mode_config_cleanup() 10590 */ 10591 intel_dp_mst_suspend(i915); 10592 10593 /* poll work can call into fbdev, hence clean that up afterwards */ 10594 intel_fbdev_fini(i915); 10595 10596 intel_unregister_dsm_handler(); 10597 10598 intel_fbc_global_disable(i915); 10599 10600 /* flush any delayed tasks or pending work */ 10601 flush_scheduled_work(); 10602 10603 intel_hdcp_component_fini(i915); 10604 10605 intel_mode_config_cleanup(i915); 10606 10607 intel_overlay_cleanup(i915); 10608 10609 intel_gmbus_teardown(i915); 10610 10611 destroy_workqueue(i915->flip_wq); 10612 destroy_workqueue(i915->modeset_wq); 10613 10614 intel_fbc_cleanup(i915); 10615 } 10616 10617 /* part #3: call after gem init */ 10618 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915) 10619 { 10620 intel_dmc_ucode_fini(i915); 10621 10622 intel_power_domains_driver_remove(i915); 10623 10624 intel_vga_unregister(i915); 10625 10626 intel_bios_driver_remove(i915); 10627 } 10628 10629 bool intel_modeset_probe_defer(struct pci_dev *pdev) 10630 { 10631 struct drm_privacy_screen *privacy_screen; 10632 10633 /* 10634 * apple-gmux is needed on dual GPU MacBook Pro 10635 * to probe the panel if we're the inactive GPU. 10636 */ 10637 if (vga_switcheroo_client_probe_defer(pdev)) 10638 return true; 10639 10640 /* If the LCD panel has a privacy-screen, wait for it */ 10641 privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL); 10642 if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER) 10643 return true; 10644 10645 drm_privacy_screen_put(privacy_screen); 10646 10647 return false; 10648 } 10649 10650 void intel_display_driver_register(struct drm_i915_private *i915) 10651 { 10652 if (!HAS_DISPLAY(i915)) 10653 return; 10654 10655 intel_display_debugfs_register(i915); 10656 10657 /* Must be done after probing outputs */ 10658 intel_opregion_register(i915); 10659 acpi_video_register(); 10660 10661 intel_audio_init(i915); 10662 10663 /* 10664 * Some ports require correctly set-up hpd registers for 10665 * detection to work properly (leading to ghost connected 10666 * connector status), e.g. VGA on gm45. Hence we can only set 10667 * up the initial fbdev config after hpd irqs are fully 10668 * enabled. We do it last so that the async config cannot run 10669 * before the connectors are registered. 10670 */ 10671 intel_fbdev_initial_config_async(&i915->drm); 10672 10673 /* 10674 * We need to coordinate the hotplugs with the asynchronous 10675 * fbdev configuration, for which we use the 10676 * fbdev->async_cookie. 10677 */ 10678 drm_kms_helper_poll_init(&i915->drm); 10679 } 10680 10681 void intel_display_driver_unregister(struct drm_i915_private *i915) 10682 { 10683 if (!HAS_DISPLAY(i915)) 10684 return; 10685 10686 intel_fbdev_unregister(i915); 10687 intel_audio_deinit(i915); 10688 10689 /* 10690 * After flushing the fbdev (incl. a late async config which 10691 * will have delayed queuing of a hotplug event), then flush 10692 * the hotplug events. 10693 */ 10694 drm_kms_helper_poll_fini(&i915->drm); 10695 drm_atomic_helper_shutdown(&i915->drm); 10696 10697 acpi_video_unregister(); 10698 intel_opregion_unregister(i915); 10699 } 10700