1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <acpi/video.h> 28 #include <linux/i2c.h> 29 #include <linux/input.h> 30 #include <linux/kernel.h> 31 #include <linux/module.h> 32 #include <linux/dma-resv.h> 33 #include <linux/slab.h> 34 #include <linux/string_helpers.h> 35 #include <linux/vga_switcheroo.h> 36 37 #include <drm/display/drm_dp_helper.h> 38 #include <drm/drm_atomic.h> 39 #include <drm/drm_atomic_helper.h> 40 #include <drm/drm_atomic_uapi.h> 41 #include <drm/drm_damage_helper.h> 42 #include <drm/drm_edid.h> 43 #include <drm/drm_fourcc.h> 44 #include <drm/drm_privacy_screen_consumer.h> 45 #include <drm/drm_probe_helper.h> 46 #include <drm/drm_rect.h> 47 48 #include "display/intel_audio.h" 49 #include "display/intel_crt.h" 50 #include "display/intel_ddi.h" 51 #include "display/intel_display_debugfs.h" 52 #include "display/intel_display_power.h" 53 #include "display/intel_dp.h" 54 #include "display/intel_dp_mst.h" 55 #include "display/intel_dpll.h" 56 #include "display/intel_dpll_mgr.h" 57 #include "display/intel_drrs.h" 58 #include "display/intel_dsi.h" 59 #include "display/intel_dvo.h" 60 #include "display/intel_fb.h" 61 #include "display/intel_gmbus.h" 62 #include "display/intel_hdmi.h" 63 #include "display/intel_lvds.h" 64 #include "display/intel_sdvo.h" 65 #include "display/intel_snps_phy.h" 66 #include "display/intel_tv.h" 67 #include "display/intel_vdsc.h" 68 #include "display/intel_vrr.h" 69 70 #include "gem/i915_gem_lmem.h" 71 #include "gem/i915_gem_object.h" 72 73 #include "gt/gen8_ppgtt.h" 74 75 #include "g4x_dp.h" 76 #include "g4x_hdmi.h" 77 #include "hsw_ips.h" 78 #include "i915_drv.h" 79 #include "i915_utils.h" 80 #include "icl_dsi.h" 81 #include "intel_acpi.h" 82 #include "intel_atomic.h" 83 #include "intel_atomic_plane.h" 84 #include "intel_bw.h" 85 #include "intel_cdclk.h" 86 #include "intel_color.h" 87 #include "intel_crtc.h" 88 #include "intel_crtc_state_dump.h" 89 #include "intel_de.h" 90 #include "intel_display_types.h" 91 #include "intel_dmc.h" 92 #include "intel_dp_link_training.h" 93 #include "intel_dpt.h" 94 #include "intel_fbc.h" 95 #include "intel_fbdev.h" 96 #include "intel_fdi.h" 97 #include "intel_fifo_underrun.h" 98 #include "intel_frontbuffer.h" 99 #include "intel_hdcp.h" 100 #include "intel_hotplug.h" 101 #include "intel_modeset_verify.h" 102 #include "intel_modeset_setup.h" 103 #include "intel_overlay.h" 104 #include "intel_panel.h" 105 #include "intel_pch_display.h" 106 #include "intel_pch_refclk.h" 107 #include "intel_pcode.h" 108 #include "intel_pipe_crc.h" 109 #include "intel_plane_initial.h" 110 #include "intel_pm.h" 111 #include "intel_pps.h" 112 #include "intel_psr.h" 113 #include "intel_quirks.h" 114 #include "intel_sprite.h" 115 #include "intel_tc.h" 116 #include "intel_vga.h" 117 #include "i9xx_plane.h" 118 #include "skl_scaler.h" 119 #include "skl_universal_plane.h" 120 #include "vlv_dsi.h" 121 #include "vlv_dsi_pll.h" 122 #include "vlv_dsi_regs.h" 123 #include "vlv_sideband.h" 124 125 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); 126 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 127 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); 128 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); 129 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); 130 131 /** 132 * intel_update_watermarks - update FIFO watermark values based on current modes 133 * @dev_priv: i915 device 134 * 135 * Calculate watermark values for the various WM regs based on current mode 136 * and plane configuration. 137 * 138 * There are several cases to deal with here: 139 * - normal (i.e. non-self-refresh) 140 * - self-refresh (SR) mode 141 * - lines are large relative to FIFO size (buffer can hold up to 2) 142 * - lines are small relative to FIFO size (buffer can hold more than 2 143 * lines), so need to account for TLB latency 144 * 145 * The normal calculation is: 146 * watermark = dotclock * bytes per pixel * latency 147 * where latency is platform & configuration dependent (we assume pessimal 148 * values here). 149 * 150 * The SR calculation is: 151 * watermark = (trunc(latency/line time)+1) * surface width * 152 * bytes per pixel 153 * where 154 * line time = htotal / dotclock 155 * surface width = hdisplay for normal plane and 64 for cursor 156 * and latency is assumed to be high, as above. 157 * 158 * The final value programmed to the register should always be rounded up, 159 * and include an extra 2 entries to account for clock crossings. 160 * 161 * We don't use the sprite, so we can ignore that. And on Crestline we have 162 * to set the non-SR watermarks to 8. 163 */ 164 void intel_update_watermarks(struct drm_i915_private *dev_priv) 165 { 166 if (dev_priv->wm_disp->update_wm) 167 dev_priv->wm_disp->update_wm(dev_priv); 168 } 169 170 static int intel_compute_pipe_wm(struct intel_atomic_state *state, 171 struct intel_crtc *crtc) 172 { 173 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 174 if (dev_priv->wm_disp->compute_pipe_wm) 175 return dev_priv->wm_disp->compute_pipe_wm(state, crtc); 176 return 0; 177 } 178 179 static int intel_compute_intermediate_wm(struct intel_atomic_state *state, 180 struct intel_crtc *crtc) 181 { 182 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 183 if (!dev_priv->wm_disp->compute_intermediate_wm) 184 return 0; 185 if (drm_WARN_ON(&dev_priv->drm, 186 !dev_priv->wm_disp->compute_pipe_wm)) 187 return 0; 188 return dev_priv->wm_disp->compute_intermediate_wm(state, crtc); 189 } 190 191 static bool intel_initial_watermarks(struct intel_atomic_state *state, 192 struct intel_crtc *crtc) 193 { 194 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 195 if (dev_priv->wm_disp->initial_watermarks) { 196 dev_priv->wm_disp->initial_watermarks(state, crtc); 197 return true; 198 } 199 return false; 200 } 201 202 static void intel_atomic_update_watermarks(struct intel_atomic_state *state, 203 struct intel_crtc *crtc) 204 { 205 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 206 if (dev_priv->wm_disp->atomic_update_watermarks) 207 dev_priv->wm_disp->atomic_update_watermarks(state, crtc); 208 } 209 210 static void intel_optimize_watermarks(struct intel_atomic_state *state, 211 struct intel_crtc *crtc) 212 { 213 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 214 if (dev_priv->wm_disp->optimize_watermarks) 215 dev_priv->wm_disp->optimize_watermarks(state, crtc); 216 } 217 218 static int intel_compute_global_watermarks(struct intel_atomic_state *state) 219 { 220 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 221 if (dev_priv->wm_disp->compute_global_watermarks) 222 return dev_priv->wm_disp->compute_global_watermarks(state); 223 return 0; 224 } 225 226 /* returns HPLL frequency in kHz */ 227 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 228 { 229 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 230 231 /* Obtain SKU information */ 232 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 233 CCK_FUSE_HPLL_FREQ_MASK; 234 235 return vco_freq[hpll_freq] * 1000; 236 } 237 238 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 239 const char *name, u32 reg, int ref_freq) 240 { 241 u32 val; 242 int divider; 243 244 val = vlv_cck_read(dev_priv, reg); 245 divider = val & CCK_FREQUENCY_VALUES; 246 247 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) != 248 (divider << CCK_FREQUENCY_STATUS_SHIFT), 249 "%s change in progress\n", name); 250 251 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 252 } 253 254 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 255 const char *name, u32 reg) 256 { 257 int hpll; 258 259 vlv_cck_get(dev_priv); 260 261 if (dev_priv->hpll_freq == 0) 262 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 263 264 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); 265 266 vlv_cck_put(dev_priv); 267 268 return hpll; 269 } 270 271 static void intel_update_czclk(struct drm_i915_private *dev_priv) 272 { 273 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 274 return; 275 276 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 277 CCK_CZ_CLOCK_CONTROL); 278 279 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n", 280 dev_priv->czclk_freq); 281 } 282 283 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state) 284 { 285 return (crtc_state->active_planes & 286 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0; 287 } 288 289 /* WA Display #0827: Gen9:all */ 290 static void 291 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) 292 { 293 if (enable) 294 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 295 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS); 296 else 297 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 298 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS)); 299 } 300 301 /* Wa_2006604312:icl,ehl */ 302 static void 303 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 304 bool enable) 305 { 306 if (enable) 307 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 308 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS); 309 else 310 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 311 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS); 312 } 313 314 /* Wa_1604331009:icl,jsl,ehl */ 315 static void 316 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 317 bool enable) 318 { 319 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS, 320 enable ? CURSOR_GATING_DIS : 0); 321 } 322 323 static bool 324 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) 325 { 326 return crtc_state->master_transcoder != INVALID_TRANSCODER; 327 } 328 329 static bool 330 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) 331 { 332 return crtc_state->sync_mode_slaves_mask != 0; 333 } 334 335 bool 336 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) 337 { 338 return is_trans_port_sync_master(crtc_state) || 339 is_trans_port_sync_slave(crtc_state); 340 } 341 342 static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state) 343 { 344 return ffs(crtc_state->bigjoiner_pipes) - 1; 345 } 346 347 u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state) 348 { 349 if (crtc_state->bigjoiner_pipes) 350 return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state)); 351 else 352 return 0; 353 } 354 355 bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state) 356 { 357 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 358 359 return crtc_state->bigjoiner_pipes && 360 crtc->pipe != bigjoiner_master_pipe(crtc_state); 361 } 362 363 bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state) 364 { 365 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 366 367 return crtc_state->bigjoiner_pipes && 368 crtc->pipe == bigjoiner_master_pipe(crtc_state); 369 } 370 371 static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state) 372 { 373 return hweight8(crtc_state->bigjoiner_pipes); 374 } 375 376 struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state) 377 { 378 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 379 380 if (intel_crtc_is_bigjoiner_slave(crtc_state)) 381 return intel_crtc_for_pipe(i915, bigjoiner_master_pipe(crtc_state)); 382 else 383 return to_intel_crtc(crtc_state->uapi.crtc); 384 } 385 386 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, 387 enum pipe pipe) 388 { 389 i915_reg_t reg = PIPEDSL(pipe); 390 u32 line1, line2; 391 392 line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK; 393 msleep(5); 394 line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK; 395 396 return line1 != line2; 397 } 398 399 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state) 400 { 401 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 402 enum pipe pipe = crtc->pipe; 403 404 /* Wait for the display line to settle/start moving */ 405 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100)) 406 drm_err(&dev_priv->drm, 407 "pipe %c scanline %s wait timed out\n", 408 pipe_name(pipe), str_on_off(state)); 409 } 410 411 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc) 412 { 413 wait_for_pipe_scanline_moving(crtc, false); 414 } 415 416 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) 417 { 418 wait_for_pipe_scanline_moving(crtc, true); 419 } 420 421 static void 422 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 423 { 424 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 425 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 426 427 if (DISPLAY_VER(dev_priv) >= 4) { 428 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 429 430 /* Wait for the Pipe State to go off */ 431 if (intel_de_wait_for_clear(dev_priv, PIPECONF(cpu_transcoder), 432 PIPECONF_STATE_ENABLE, 100)) 433 drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n"); 434 } else { 435 intel_wait_for_pipe_scanline_stopped(crtc); 436 } 437 } 438 439 void assert_transcoder(struct drm_i915_private *dev_priv, 440 enum transcoder cpu_transcoder, bool state) 441 { 442 bool cur_state; 443 enum intel_display_power_domain power_domain; 444 intel_wakeref_t wakeref; 445 446 /* we keep both pipes enabled on 830 */ 447 if (IS_I830(dev_priv)) 448 state = true; 449 450 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 451 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 452 if (wakeref) { 453 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); 454 cur_state = !!(val & PIPECONF_ENABLE); 455 456 intel_display_power_put(dev_priv, power_domain, wakeref); 457 } else { 458 cur_state = false; 459 } 460 461 I915_STATE_WARN(cur_state != state, 462 "transcoder %s assertion failure (expected %s, current %s)\n", 463 transcoder_name(cpu_transcoder), 464 str_on_off(state), str_on_off(cur_state)); 465 } 466 467 static void assert_plane(struct intel_plane *plane, bool state) 468 { 469 enum pipe pipe; 470 bool cur_state; 471 472 cur_state = plane->get_hw_state(plane, &pipe); 473 474 I915_STATE_WARN(cur_state != state, 475 "%s assertion failure (expected %s, current %s)\n", 476 plane->base.name, str_on_off(state), 477 str_on_off(cur_state)); 478 } 479 480 #define assert_plane_enabled(p) assert_plane(p, true) 481 #define assert_plane_disabled(p) assert_plane(p, false) 482 483 static void assert_planes_disabled(struct intel_crtc *crtc) 484 { 485 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 486 struct intel_plane *plane; 487 488 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 489 assert_plane_disabled(plane); 490 } 491 492 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 493 struct intel_digital_port *dig_port, 494 unsigned int expected_mask) 495 { 496 u32 port_mask; 497 i915_reg_t dpll_reg; 498 499 switch (dig_port->base.port) { 500 default: 501 MISSING_CASE(dig_port->base.port); 502 fallthrough; 503 case PORT_B: 504 port_mask = DPLL_PORTB_READY_MASK; 505 dpll_reg = DPLL(0); 506 break; 507 case PORT_C: 508 port_mask = DPLL_PORTC_READY_MASK; 509 dpll_reg = DPLL(0); 510 expected_mask <<= 4; 511 break; 512 case PORT_D: 513 port_mask = DPLL_PORTD_READY_MASK; 514 dpll_reg = DPIO_PHY_STATUS; 515 break; 516 } 517 518 if (intel_de_wait_for_register(dev_priv, dpll_reg, 519 port_mask, expected_mask, 1000)) 520 drm_WARN(&dev_priv->drm, 1, 521 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", 522 dig_port->base.base.base.id, dig_port->base.base.name, 523 intel_de_read(dev_priv, dpll_reg) & port_mask, 524 expected_mask); 525 } 526 527 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state) 528 { 529 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 530 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 531 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 532 enum pipe pipe = crtc->pipe; 533 i915_reg_t reg; 534 u32 val; 535 536 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe)); 537 538 assert_planes_disabled(crtc); 539 540 /* 541 * A pipe without a PLL won't actually be able to drive bits from 542 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 543 * need the check. 544 */ 545 if (HAS_GMCH(dev_priv)) { 546 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 547 assert_dsi_pll_enabled(dev_priv); 548 else 549 assert_pll_enabled(dev_priv, pipe); 550 } else { 551 if (new_crtc_state->has_pch_encoder) { 552 /* if driving the PCH, we need FDI enabled */ 553 assert_fdi_rx_pll_enabled(dev_priv, 554 intel_crtc_pch_transcoder(crtc)); 555 assert_fdi_tx_pll_enabled(dev_priv, 556 (enum pipe) cpu_transcoder); 557 } 558 /* FIXME: assert CPU port conditions for SNB+ */ 559 } 560 561 /* Wa_22012358565:adl-p */ 562 if (DISPLAY_VER(dev_priv) == 13) 563 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe), 564 0, PIPE_ARB_USE_PROG_SLOTS); 565 566 reg = PIPECONF(cpu_transcoder); 567 val = intel_de_read(dev_priv, reg); 568 if (val & PIPECONF_ENABLE) { 569 /* we keep both pipes enabled on 830 */ 570 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); 571 return; 572 } 573 574 intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE); 575 intel_de_posting_read(dev_priv, reg); 576 577 /* 578 * Until the pipe starts PIPEDSL reads will return a stale value, 579 * which causes an apparent vblank timestamp jump when PIPEDSL 580 * resets to its proper value. That also messes up the frame count 581 * when it's derived from the timestamps. So let's wait for the 582 * pipe to start properly before we call drm_crtc_vblank_on() 583 */ 584 if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 585 intel_wait_for_pipe_scanline_moving(crtc); 586 } 587 588 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) 589 { 590 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 591 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 592 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 593 enum pipe pipe = crtc->pipe; 594 i915_reg_t reg; 595 u32 val; 596 597 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe)); 598 599 /* 600 * Make sure planes won't keep trying to pump pixels to us, 601 * or we might hang the display. 602 */ 603 assert_planes_disabled(crtc); 604 605 reg = PIPECONF(cpu_transcoder); 606 val = intel_de_read(dev_priv, reg); 607 if ((val & PIPECONF_ENABLE) == 0) 608 return; 609 610 /* 611 * Double wide has implications for planes 612 * so best keep it disabled when not needed. 613 */ 614 if (old_crtc_state->double_wide) 615 val &= ~PIPECONF_DOUBLE_WIDE; 616 617 /* Don't disable pipe or pipe PLLs if needed */ 618 if (!IS_I830(dev_priv)) 619 val &= ~PIPECONF_ENABLE; 620 621 if (DISPLAY_VER(dev_priv) >= 12) 622 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 623 FECSTALL_DIS_DPTSTREAM_DPTTG, 0); 624 625 intel_de_write(dev_priv, reg, val); 626 if ((val & PIPECONF_ENABLE) == 0) 627 intel_wait_for_pipe_off(old_crtc_state); 628 } 629 630 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 631 { 632 unsigned int size = 0; 633 int i; 634 635 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 636 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width; 637 638 return size; 639 } 640 641 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info) 642 { 643 unsigned int size = 0; 644 int i; 645 646 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { 647 unsigned int plane_size; 648 649 if (rem_info->plane[i].linear) 650 plane_size = rem_info->plane[i].size; 651 else 652 plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height; 653 654 if (plane_size == 0) 655 continue; 656 657 if (rem_info->plane_alignment) 658 size = ALIGN(size, rem_info->plane_alignment); 659 660 size += plane_size; 661 } 662 663 return size; 664 } 665 666 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) 667 { 668 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 669 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 670 671 return DISPLAY_VER(dev_priv) < 4 || 672 (plane->fbc && 673 plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL); 674 } 675 676 /* 677 * Convert the x/y offsets into a linear offset. 678 * Only valid with 0/180 degree rotation, which is fine since linear 679 * offset is only used with linear buffers on pre-hsw and tiled buffers 680 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 681 */ 682 u32 intel_fb_xy_to_linear(int x, int y, 683 const struct intel_plane_state *state, 684 int color_plane) 685 { 686 const struct drm_framebuffer *fb = state->hw.fb; 687 unsigned int cpp = fb->format->cpp[color_plane]; 688 unsigned int pitch = state->view.color_plane[color_plane].mapping_stride; 689 690 return y * pitch + x * cpp; 691 } 692 693 /* 694 * Add the x/y offsets derived from fb->offsets[] to the user 695 * specified plane src x/y offsets. The resulting x/y offsets 696 * specify the start of scanout from the beginning of the gtt mapping. 697 */ 698 void intel_add_fb_offsets(int *x, int *y, 699 const struct intel_plane_state *state, 700 int color_plane) 701 702 { 703 *x += state->view.color_plane[color_plane].x; 704 *y += state->view.color_plane[color_plane].y; 705 } 706 707 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 708 u32 pixel_format, u64 modifier) 709 { 710 struct intel_crtc *crtc; 711 struct intel_plane *plane; 712 713 if (!HAS_DISPLAY(dev_priv)) 714 return 0; 715 716 /* 717 * We assume the primary plane for pipe A has 718 * the highest stride limits of them all, 719 * if in case pipe A is disabled, use the first pipe from pipe_mask. 720 */ 721 crtc = intel_first_crtc(dev_priv); 722 if (!crtc) 723 return 0; 724 725 plane = to_intel_plane(crtc->base.primary); 726 727 return plane->max_stride(plane, pixel_format, modifier, 728 DRM_MODE_ROTATE_0); 729 } 730 731 void intel_set_plane_visible(struct intel_crtc_state *crtc_state, 732 struct intel_plane_state *plane_state, 733 bool visible) 734 { 735 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 736 737 plane_state->uapi.visible = visible; 738 739 if (visible) 740 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base); 741 else 742 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); 743 } 744 745 void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state) 746 { 747 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 748 struct drm_plane *plane; 749 750 /* 751 * Active_planes aliases if multiple "primary" or cursor planes 752 * have been used on the same (or wrong) pipe. plane_mask uses 753 * unique ids, hence we can use that to reconstruct active_planes. 754 */ 755 crtc_state->enabled_planes = 0; 756 crtc_state->active_planes = 0; 757 758 drm_for_each_plane_mask(plane, &dev_priv->drm, 759 crtc_state->uapi.plane_mask) { 760 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id); 761 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 762 } 763 } 764 765 void intel_plane_disable_noatomic(struct intel_crtc *crtc, 766 struct intel_plane *plane) 767 { 768 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 769 struct intel_crtc_state *crtc_state = 770 to_intel_crtc_state(crtc->base.state); 771 struct intel_plane_state *plane_state = 772 to_intel_plane_state(plane->base.state); 773 774 drm_dbg_kms(&dev_priv->drm, 775 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 776 plane->base.base.id, plane->base.name, 777 crtc->base.base.id, crtc->base.name); 778 779 intel_set_plane_visible(crtc_state, plane_state, false); 780 intel_plane_fixup_bitmasks(crtc_state); 781 crtc_state->data_rate[plane->id] = 0; 782 crtc_state->data_rate_y[plane->id] = 0; 783 crtc_state->rel_data_rate[plane->id] = 0; 784 crtc_state->rel_data_rate_y[plane->id] = 0; 785 crtc_state->min_cdclk[plane->id] = 0; 786 787 if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 && 788 hsw_ips_disable(crtc_state)) { 789 crtc_state->ips_enabled = false; 790 intel_crtc_wait_for_next_vblank(crtc); 791 } 792 793 /* 794 * Vblank time updates from the shadow to live plane control register 795 * are blocked if the memory self-refresh mode is active at that 796 * moment. So to make sure the plane gets truly disabled, disable 797 * first the self-refresh mode. The self-refresh enable bit in turn 798 * will be checked/applied by the HW only at the next frame start 799 * event which is after the vblank start event, so we need to have a 800 * wait-for-vblank between disabling the plane and the pipe. 801 */ 802 if (HAS_GMCH(dev_priv) && 803 intel_set_memory_cxsr(dev_priv, false)) 804 intel_crtc_wait_for_next_vblank(crtc); 805 806 /* 807 * Gen2 reports pipe underruns whenever all planes are disabled. 808 * So disable underrun reporting before all the planes get disabled. 809 */ 810 if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes) 811 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 812 813 intel_plane_disable_arm(plane, crtc_state); 814 intel_crtc_wait_for_next_vblank(crtc); 815 } 816 817 unsigned int 818 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) 819 { 820 int x = 0, y = 0; 821 822 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 823 plane_state->view.color_plane[0].offset, 0); 824 825 return y; 826 } 827 828 static int 829 __intel_display_resume(struct drm_i915_private *i915, 830 struct drm_atomic_state *state, 831 struct drm_modeset_acquire_ctx *ctx) 832 { 833 struct drm_crtc_state *crtc_state; 834 struct drm_crtc *crtc; 835 int i, ret; 836 837 intel_modeset_setup_hw_state(i915, ctx); 838 intel_vga_redisable(i915); 839 840 if (!state) 841 return 0; 842 843 /* 844 * We've duplicated the state, pointers to the old state are invalid. 845 * 846 * Don't attempt to use the old state until we commit the duplicated state. 847 */ 848 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 849 /* 850 * Force recalculation even if we restore 851 * current state. With fast modeset this may not result 852 * in a modeset when the state is compatible. 853 */ 854 crtc_state->mode_changed = true; 855 } 856 857 /* ignore any reset values/BIOS leftovers in the WM registers */ 858 if (!HAS_GMCH(i915)) 859 to_intel_atomic_state(state)->skip_intermediate_wm = true; 860 861 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 862 863 drm_WARN_ON(&i915->drm, ret == -EDEADLK); 864 865 return ret; 866 } 867 868 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) 869 { 870 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display && 871 intel_has_gpu_reset(to_gt(dev_priv))); 872 } 873 874 void intel_display_prepare_reset(struct drm_i915_private *dev_priv) 875 { 876 struct drm_device *dev = &dev_priv->drm; 877 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 878 struct drm_atomic_state *state; 879 int ret; 880 881 if (!HAS_DISPLAY(dev_priv)) 882 return; 883 884 /* reset doesn't touch the display */ 885 if (!dev_priv->params.force_reset_modeset_test && 886 !gpu_reset_clobbers_display(dev_priv)) 887 return; 888 889 /* We have a modeset vs reset deadlock, defensively unbreak it. */ 890 set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags); 891 smp_mb__after_atomic(); 892 wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET); 893 894 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) { 895 drm_dbg_kms(&dev_priv->drm, 896 "Modeset potentially stuck, unbreaking through wedging\n"); 897 intel_gt_set_wedged(to_gt(dev_priv)); 898 } 899 900 /* 901 * Need mode_config.mutex so that we don't 902 * trample ongoing ->detect() and whatnot. 903 */ 904 mutex_lock(&dev->mode_config.mutex); 905 drm_modeset_acquire_init(ctx, 0); 906 while (1) { 907 ret = drm_modeset_lock_all_ctx(dev, ctx); 908 if (ret != -EDEADLK) 909 break; 910 911 drm_modeset_backoff(ctx); 912 } 913 /* 914 * Disabling the crtcs gracefully seems nicer. Also the 915 * g33 docs say we should at least disable all the planes. 916 */ 917 state = drm_atomic_helper_duplicate_state(dev, ctx); 918 if (IS_ERR(state)) { 919 ret = PTR_ERR(state); 920 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n", 921 ret); 922 return; 923 } 924 925 ret = drm_atomic_helper_disable_all(dev, ctx); 926 if (ret) { 927 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n", 928 ret); 929 drm_atomic_state_put(state); 930 return; 931 } 932 933 dev_priv->modeset_restore_state = state; 934 state->acquire_ctx = ctx; 935 } 936 937 void intel_display_finish_reset(struct drm_i915_private *i915) 938 { 939 struct drm_modeset_acquire_ctx *ctx = &i915->reset_ctx; 940 struct drm_atomic_state *state; 941 int ret; 942 943 if (!HAS_DISPLAY(i915)) 944 return; 945 946 /* reset doesn't touch the display */ 947 if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags)) 948 return; 949 950 state = fetch_and_zero(&i915->modeset_restore_state); 951 if (!state) 952 goto unlock; 953 954 /* reset doesn't touch the display */ 955 if (!gpu_reset_clobbers_display(i915)) { 956 /* for testing only restore the display */ 957 ret = __intel_display_resume(i915, state, ctx); 958 if (ret) 959 drm_err(&i915->drm, 960 "Restoring old state failed with %i\n", ret); 961 } else { 962 /* 963 * The display has been reset as well, 964 * so need a full re-initialization. 965 */ 966 intel_pps_unlock_regs_wa(i915); 967 intel_modeset_init_hw(i915); 968 intel_init_clock_gating(i915); 969 intel_hpd_init(i915); 970 971 ret = __intel_display_resume(i915, state, ctx); 972 if (ret) 973 drm_err(&i915->drm, 974 "Restoring old state failed with %i\n", ret); 975 976 intel_hpd_poll_disable(i915); 977 } 978 979 drm_atomic_state_put(state); 980 unlock: 981 drm_modeset_drop_locks(ctx); 982 drm_modeset_acquire_fini(ctx); 983 mutex_unlock(&i915->drm.mode_config.mutex); 984 985 clear_bit_unlock(I915_RESET_MODESET, &to_gt(i915)->reset.flags); 986 } 987 988 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) 989 { 990 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 991 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 992 enum pipe pipe = crtc->pipe; 993 u32 tmp; 994 995 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe)); 996 997 /* 998 * Display WA #1153: icl 999 * enable hardware to bypass the alpha math 1000 * and rounding for per-pixel values 00 and 0xff 1001 */ 1002 tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 1003 /* 1004 * Display WA # 1605353570: icl 1005 * Set the pixel rounding bit to 1 for allowing 1006 * passthrough of Frame buffer pixels unmodified 1007 * across pipe 1008 */ 1009 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 1010 1011 /* 1012 * Underrun recovery must always be disabled on display 13+. 1013 * DG2 chicken bit meaning is inverted compared to other platforms. 1014 */ 1015 if (IS_DG2(dev_priv)) 1016 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2; 1017 else if (DISPLAY_VER(dev_priv) >= 13) 1018 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; 1019 1020 /* Wa_14010547955:dg2 */ 1021 if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER)) 1022 tmp |= DG2_RENDER_CCSTAG_4_3_EN; 1023 1024 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); 1025 } 1026 1027 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 1028 { 1029 struct drm_crtc *crtc; 1030 bool cleanup_done; 1031 1032 drm_for_each_crtc(crtc, &dev_priv->drm) { 1033 struct drm_crtc_commit *commit; 1034 spin_lock(&crtc->commit_lock); 1035 commit = list_first_entry_or_null(&crtc->commit_list, 1036 struct drm_crtc_commit, commit_entry); 1037 cleanup_done = commit ? 1038 try_wait_for_completion(&commit->cleanup_done) : true; 1039 spin_unlock(&crtc->commit_lock); 1040 1041 if (cleanup_done) 1042 continue; 1043 1044 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc)); 1045 1046 return true; 1047 } 1048 1049 return false; 1050 } 1051 1052 /* 1053 * Finds the encoder associated with the given CRTC. This can only be 1054 * used when we know that the CRTC isn't feeding multiple encoders! 1055 */ 1056 struct intel_encoder * 1057 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 1058 const struct intel_crtc_state *crtc_state) 1059 { 1060 const struct drm_connector_state *connector_state; 1061 const struct drm_connector *connector; 1062 struct intel_encoder *encoder = NULL; 1063 struct intel_crtc *master_crtc; 1064 int num_encoders = 0; 1065 int i; 1066 1067 master_crtc = intel_master_crtc(crtc_state); 1068 1069 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 1070 if (connector_state->crtc != &master_crtc->base) 1071 continue; 1072 1073 encoder = to_intel_encoder(connector_state->best_encoder); 1074 num_encoders++; 1075 } 1076 1077 drm_WARN(encoder->base.dev, num_encoders != 1, 1078 "%d encoders for pipe %c\n", 1079 num_encoders, pipe_name(master_crtc->pipe)); 1080 1081 return encoder; 1082 } 1083 1084 static void cpt_verify_modeset(struct drm_i915_private *dev_priv, 1085 enum pipe pipe) 1086 { 1087 i915_reg_t dslreg = PIPEDSL(pipe); 1088 u32 temp; 1089 1090 temp = intel_de_read(dev_priv, dslreg); 1091 udelay(500); 1092 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) { 1093 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) 1094 drm_err(&dev_priv->drm, 1095 "mode set failed: pipe %c stuck\n", 1096 pipe_name(pipe)); 1097 } 1098 } 1099 1100 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) 1101 { 1102 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1103 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1104 const struct drm_rect *dst = &crtc_state->pch_pfit.dst; 1105 enum pipe pipe = crtc->pipe; 1106 int width = drm_rect_width(dst); 1107 int height = drm_rect_height(dst); 1108 int x = dst->x1; 1109 int y = dst->y1; 1110 1111 if (!crtc_state->pch_pfit.enabled) 1112 return; 1113 1114 /* Force use of hard-coded filter coefficients 1115 * as some pre-programmed values are broken, 1116 * e.g. x201. 1117 */ 1118 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 1119 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | 1120 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe)); 1121 else 1122 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | 1123 PF_FILTER_MED_3x3); 1124 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), x << 16 | y); 1125 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), width << 16 | height); 1126 } 1127 1128 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc) 1129 { 1130 if (crtc->overlay) 1131 (void) intel_overlay_switch_off(crtc->overlay); 1132 1133 /* Let userspace switch the overlay on again. In most cases userspace 1134 * has to recompute where to put it anyway. 1135 */ 1136 } 1137 1138 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) 1139 { 1140 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1141 1142 if (!crtc_state->nv12_planes) 1143 return false; 1144 1145 /* WA Display #0827: Gen9:all */ 1146 if (DISPLAY_VER(dev_priv) == 9) 1147 return true; 1148 1149 return false; 1150 } 1151 1152 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) 1153 { 1154 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1155 1156 /* Wa_2006604312:icl,ehl */ 1157 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11) 1158 return true; 1159 1160 return false; 1161 } 1162 1163 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state) 1164 { 1165 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1166 1167 /* Wa_1604331009:icl,jsl,ehl */ 1168 if (is_hdr_mode(crtc_state) && 1169 crtc_state->active_planes & BIT(PLANE_CURSOR) && 1170 DISPLAY_VER(dev_priv) == 11) 1171 return true; 1172 1173 return false; 1174 } 1175 1176 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915, 1177 enum pipe pipe, bool enable) 1178 { 1179 if (DISPLAY_VER(i915) == 9) { 1180 /* 1181 * "Plane N strech max must be programmed to 11b (x1) 1182 * when Async flips are enabled on that plane." 1183 */ 1184 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), 1185 SKL_PLANE1_STRETCH_MAX_MASK, 1186 enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8); 1187 } else { 1188 /* Also needed on HSW/BDW albeit undocumented */ 1189 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), 1190 HSW_PRI_STRETCH_MAX_MASK, 1191 enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8); 1192 } 1193 } 1194 1195 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state) 1196 { 1197 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 1198 1199 return crtc_state->uapi.async_flip && i915_vtd_active(i915) && 1200 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915)); 1201 } 1202 1203 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, 1204 const struct intel_crtc_state *new_crtc_state) 1205 { 1206 return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) && 1207 new_crtc_state->active_planes; 1208 } 1209 1210 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, 1211 const struct intel_crtc_state *new_crtc_state) 1212 { 1213 return old_crtc_state->active_planes && 1214 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)); 1215 } 1216 1217 static void intel_post_plane_update(struct intel_atomic_state *state, 1218 struct intel_crtc *crtc) 1219 { 1220 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1221 const struct intel_crtc_state *old_crtc_state = 1222 intel_atomic_get_old_crtc_state(state, crtc); 1223 const struct intel_crtc_state *new_crtc_state = 1224 intel_atomic_get_new_crtc_state(state, crtc); 1225 enum pipe pipe = crtc->pipe; 1226 1227 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); 1228 1229 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) 1230 intel_update_watermarks(dev_priv); 1231 1232 hsw_ips_post_update(state, crtc); 1233 intel_fbc_post_update(state, crtc); 1234 1235 if (needs_async_flip_vtd_wa(old_crtc_state) && 1236 !needs_async_flip_vtd_wa(new_crtc_state)) 1237 intel_async_flip_vtd_wa(dev_priv, pipe, false); 1238 1239 if (needs_nv12_wa(old_crtc_state) && 1240 !needs_nv12_wa(new_crtc_state)) 1241 skl_wa_827(dev_priv, pipe, false); 1242 1243 if (needs_scalerclk_wa(old_crtc_state) && 1244 !needs_scalerclk_wa(new_crtc_state)) 1245 icl_wa_scalerclkgating(dev_priv, pipe, false); 1246 1247 if (needs_cursorclk_wa(old_crtc_state) && 1248 !needs_cursorclk_wa(new_crtc_state)) 1249 icl_wa_cursorclkgating(dev_priv, pipe, false); 1250 1251 intel_drrs_activate(new_crtc_state); 1252 } 1253 1254 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, 1255 struct intel_crtc *crtc) 1256 { 1257 const struct intel_crtc_state *crtc_state = 1258 intel_atomic_get_new_crtc_state(state, crtc); 1259 u8 update_planes = crtc_state->update_planes; 1260 const struct intel_plane_state *plane_state; 1261 struct intel_plane *plane; 1262 int i; 1263 1264 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1265 if (plane->pipe == crtc->pipe && 1266 update_planes & BIT(plane->id)) 1267 plane->enable_flip_done(plane); 1268 } 1269 } 1270 1271 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state, 1272 struct intel_crtc *crtc) 1273 { 1274 const struct intel_crtc_state *crtc_state = 1275 intel_atomic_get_new_crtc_state(state, crtc); 1276 u8 update_planes = crtc_state->update_planes; 1277 const struct intel_plane_state *plane_state; 1278 struct intel_plane *plane; 1279 int i; 1280 1281 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1282 if (plane->pipe == crtc->pipe && 1283 update_planes & BIT(plane->id)) 1284 plane->disable_flip_done(plane); 1285 } 1286 } 1287 1288 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, 1289 struct intel_crtc *crtc) 1290 { 1291 const struct intel_crtc_state *old_crtc_state = 1292 intel_atomic_get_old_crtc_state(state, crtc); 1293 const struct intel_crtc_state *new_crtc_state = 1294 intel_atomic_get_new_crtc_state(state, crtc); 1295 u8 update_planes = new_crtc_state->update_planes; 1296 const struct intel_plane_state *old_plane_state; 1297 struct intel_plane *plane; 1298 bool need_vbl_wait = false; 1299 int i; 1300 1301 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1302 if (plane->need_async_flip_disable_wa && 1303 plane->pipe == crtc->pipe && 1304 update_planes & BIT(plane->id)) { 1305 /* 1306 * Apart from the async flip bit we want to 1307 * preserve the old state for the plane. 1308 */ 1309 plane->async_flip(plane, old_crtc_state, 1310 old_plane_state, false); 1311 need_vbl_wait = true; 1312 } 1313 } 1314 1315 if (need_vbl_wait) 1316 intel_crtc_wait_for_next_vblank(crtc); 1317 } 1318 1319 static void intel_pre_plane_update(struct intel_atomic_state *state, 1320 struct intel_crtc *crtc) 1321 { 1322 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1323 const struct intel_crtc_state *old_crtc_state = 1324 intel_atomic_get_old_crtc_state(state, crtc); 1325 const struct intel_crtc_state *new_crtc_state = 1326 intel_atomic_get_new_crtc_state(state, crtc); 1327 enum pipe pipe = crtc->pipe; 1328 1329 intel_drrs_deactivate(old_crtc_state); 1330 1331 intel_psr_pre_plane_update(state, crtc); 1332 1333 if (hsw_ips_pre_update(state, crtc)) 1334 intel_crtc_wait_for_next_vblank(crtc); 1335 1336 if (intel_fbc_pre_update(state, crtc)) 1337 intel_crtc_wait_for_next_vblank(crtc); 1338 1339 if (!needs_async_flip_vtd_wa(old_crtc_state) && 1340 needs_async_flip_vtd_wa(new_crtc_state)) 1341 intel_async_flip_vtd_wa(dev_priv, pipe, true); 1342 1343 /* Display WA 827 */ 1344 if (!needs_nv12_wa(old_crtc_state) && 1345 needs_nv12_wa(new_crtc_state)) 1346 skl_wa_827(dev_priv, pipe, true); 1347 1348 /* Wa_2006604312:icl,ehl */ 1349 if (!needs_scalerclk_wa(old_crtc_state) && 1350 needs_scalerclk_wa(new_crtc_state)) 1351 icl_wa_scalerclkgating(dev_priv, pipe, true); 1352 1353 /* Wa_1604331009:icl,jsl,ehl */ 1354 if (!needs_cursorclk_wa(old_crtc_state) && 1355 needs_cursorclk_wa(new_crtc_state)) 1356 icl_wa_cursorclkgating(dev_priv, pipe, true); 1357 1358 /* 1359 * Vblank time updates from the shadow to live plane control register 1360 * are blocked if the memory self-refresh mode is active at that 1361 * moment. So to make sure the plane gets truly disabled, disable 1362 * first the self-refresh mode. The self-refresh enable bit in turn 1363 * will be checked/applied by the HW only at the next frame start 1364 * event which is after the vblank start event, so we need to have a 1365 * wait-for-vblank between disabling the plane and the pipe. 1366 */ 1367 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && 1368 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 1369 intel_crtc_wait_for_next_vblank(crtc); 1370 1371 /* 1372 * IVB workaround: must disable low power watermarks for at least 1373 * one frame before enabling scaling. LP watermarks can be re-enabled 1374 * when scaling is disabled. 1375 * 1376 * WaCxSRDisabledForSpriteScaling:ivb 1377 */ 1378 if (old_crtc_state->hw.active && 1379 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv)) 1380 intel_crtc_wait_for_next_vblank(crtc); 1381 1382 /* 1383 * If we're doing a modeset we don't need to do any 1384 * pre-vblank watermark programming here. 1385 */ 1386 if (!intel_crtc_needs_modeset(new_crtc_state)) { 1387 /* 1388 * For platforms that support atomic watermarks, program the 1389 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 1390 * will be the intermediate values that are safe for both pre- and 1391 * post- vblank; when vblank happens, the 'active' values will be set 1392 * to the final 'target' values and we'll do this again to get the 1393 * optimal watermarks. For gen9+ platforms, the values we program here 1394 * will be the final target values which will get automatically latched 1395 * at vblank time; no further programming will be necessary. 1396 * 1397 * If a platform hasn't been transitioned to atomic watermarks yet, 1398 * we'll continue to update watermarks the old way, if flags tell 1399 * us to. 1400 */ 1401 if (!intel_initial_watermarks(state, crtc)) 1402 if (new_crtc_state->update_wm_pre) 1403 intel_update_watermarks(dev_priv); 1404 } 1405 1406 /* 1407 * Gen2 reports pipe underruns whenever all planes are disabled. 1408 * So disable underrun reporting before all the planes get disabled. 1409 * 1410 * We do this after .initial_watermarks() so that we have a 1411 * chance of catching underruns with the intermediate watermarks 1412 * vs. the old plane configuration. 1413 */ 1414 if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state)) 1415 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1416 1417 /* 1418 * WA for platforms where async address update enable bit 1419 * is double buffered and only latched at start of vblank. 1420 */ 1421 if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip) 1422 intel_crtc_async_flip_disable_wa(state, crtc); 1423 } 1424 1425 static void intel_crtc_disable_planes(struct intel_atomic_state *state, 1426 struct intel_crtc *crtc) 1427 { 1428 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1429 const struct intel_crtc_state *new_crtc_state = 1430 intel_atomic_get_new_crtc_state(state, crtc); 1431 unsigned int update_mask = new_crtc_state->update_planes; 1432 const struct intel_plane_state *old_plane_state; 1433 struct intel_plane *plane; 1434 unsigned fb_bits = 0; 1435 int i; 1436 1437 intel_crtc_dpms_overlay_disable(crtc); 1438 1439 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1440 if (crtc->pipe != plane->pipe || 1441 !(update_mask & BIT(plane->id))) 1442 continue; 1443 1444 intel_plane_disable_arm(plane, new_crtc_state); 1445 1446 if (old_plane_state->uapi.visible) 1447 fb_bits |= plane->frontbuffer_bit; 1448 } 1449 1450 intel_frontbuffer_flip(dev_priv, fb_bits); 1451 } 1452 1453 /* 1454 * intel_connector_primary_encoder - get the primary encoder for a connector 1455 * @connector: connector for which to return the encoder 1456 * 1457 * Returns the primary encoder for a connector. There is a 1:1 mapping from 1458 * all connectors to their encoder, except for DP-MST connectors which have 1459 * both a virtual and a primary encoder. These DP-MST primary encoders can be 1460 * pointed to by as many DP-MST connectors as there are pipes. 1461 */ 1462 static struct intel_encoder * 1463 intel_connector_primary_encoder(struct intel_connector *connector) 1464 { 1465 struct intel_encoder *encoder; 1466 1467 if (connector->mst_port) 1468 return &dp_to_dig_port(connector->mst_port)->base; 1469 1470 encoder = intel_attached_encoder(connector); 1471 drm_WARN_ON(connector->base.dev, !encoder); 1472 1473 return encoder; 1474 } 1475 1476 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 1477 { 1478 struct drm_i915_private *i915 = to_i915(state->base.dev); 1479 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 1480 struct intel_crtc *crtc; 1481 struct drm_connector_state *new_conn_state; 1482 struct drm_connector *connector; 1483 int i; 1484 1485 /* 1486 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits. 1487 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook. 1488 */ 1489 if (i915->dpll.mgr) { 1490 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1491 if (intel_crtc_needs_modeset(new_crtc_state)) 1492 continue; 1493 1494 new_crtc_state->shared_dpll = old_crtc_state->shared_dpll; 1495 new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state; 1496 } 1497 } 1498 1499 if (!state->modeset) 1500 return; 1501 1502 for_each_new_connector_in_state(&state->base, connector, new_conn_state, 1503 i) { 1504 struct intel_connector *intel_connector; 1505 struct intel_encoder *encoder; 1506 struct intel_crtc *crtc; 1507 1508 if (!intel_connector_needs_modeset(state, connector)) 1509 continue; 1510 1511 intel_connector = to_intel_connector(connector); 1512 encoder = intel_connector_primary_encoder(intel_connector); 1513 if (!encoder->update_prepare) 1514 continue; 1515 1516 crtc = new_conn_state->crtc ? 1517 to_intel_crtc(new_conn_state->crtc) : NULL; 1518 encoder->update_prepare(state, encoder, crtc); 1519 } 1520 } 1521 1522 static void intel_encoders_update_complete(struct intel_atomic_state *state) 1523 { 1524 struct drm_connector_state *new_conn_state; 1525 struct drm_connector *connector; 1526 int i; 1527 1528 if (!state->modeset) 1529 return; 1530 1531 for_each_new_connector_in_state(&state->base, connector, new_conn_state, 1532 i) { 1533 struct intel_connector *intel_connector; 1534 struct intel_encoder *encoder; 1535 struct intel_crtc *crtc; 1536 1537 if (!intel_connector_needs_modeset(state, connector)) 1538 continue; 1539 1540 intel_connector = to_intel_connector(connector); 1541 encoder = intel_connector_primary_encoder(intel_connector); 1542 if (!encoder->update_complete) 1543 continue; 1544 1545 crtc = new_conn_state->crtc ? 1546 to_intel_crtc(new_conn_state->crtc) : NULL; 1547 encoder->update_complete(state, encoder, crtc); 1548 } 1549 } 1550 1551 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, 1552 struct intel_crtc *crtc) 1553 { 1554 const struct intel_crtc_state *crtc_state = 1555 intel_atomic_get_new_crtc_state(state, crtc); 1556 const struct drm_connector_state *conn_state; 1557 struct drm_connector *conn; 1558 int i; 1559 1560 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1561 struct intel_encoder *encoder = 1562 to_intel_encoder(conn_state->best_encoder); 1563 1564 if (conn_state->crtc != &crtc->base) 1565 continue; 1566 1567 if (encoder->pre_pll_enable) 1568 encoder->pre_pll_enable(state, encoder, 1569 crtc_state, conn_state); 1570 } 1571 } 1572 1573 static void intel_encoders_pre_enable(struct intel_atomic_state *state, 1574 struct intel_crtc *crtc) 1575 { 1576 const struct intel_crtc_state *crtc_state = 1577 intel_atomic_get_new_crtc_state(state, crtc); 1578 const struct drm_connector_state *conn_state; 1579 struct drm_connector *conn; 1580 int i; 1581 1582 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1583 struct intel_encoder *encoder = 1584 to_intel_encoder(conn_state->best_encoder); 1585 1586 if (conn_state->crtc != &crtc->base) 1587 continue; 1588 1589 if (encoder->pre_enable) 1590 encoder->pre_enable(state, encoder, 1591 crtc_state, conn_state); 1592 } 1593 } 1594 1595 static void intel_encoders_enable(struct intel_atomic_state *state, 1596 struct intel_crtc *crtc) 1597 { 1598 const struct intel_crtc_state *crtc_state = 1599 intel_atomic_get_new_crtc_state(state, crtc); 1600 const struct drm_connector_state *conn_state; 1601 struct drm_connector *conn; 1602 int i; 1603 1604 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1605 struct intel_encoder *encoder = 1606 to_intel_encoder(conn_state->best_encoder); 1607 1608 if (conn_state->crtc != &crtc->base) 1609 continue; 1610 1611 if (encoder->enable) 1612 encoder->enable(state, encoder, 1613 crtc_state, conn_state); 1614 intel_opregion_notify_encoder(encoder, true); 1615 } 1616 } 1617 1618 static void intel_encoders_disable(struct intel_atomic_state *state, 1619 struct intel_crtc *crtc) 1620 { 1621 const struct intel_crtc_state *old_crtc_state = 1622 intel_atomic_get_old_crtc_state(state, crtc); 1623 const struct drm_connector_state *old_conn_state; 1624 struct drm_connector *conn; 1625 int i; 1626 1627 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1628 struct intel_encoder *encoder = 1629 to_intel_encoder(old_conn_state->best_encoder); 1630 1631 if (old_conn_state->crtc != &crtc->base) 1632 continue; 1633 1634 intel_opregion_notify_encoder(encoder, false); 1635 if (encoder->disable) 1636 encoder->disable(state, encoder, 1637 old_crtc_state, old_conn_state); 1638 } 1639 } 1640 1641 static void intel_encoders_post_disable(struct intel_atomic_state *state, 1642 struct intel_crtc *crtc) 1643 { 1644 const struct intel_crtc_state *old_crtc_state = 1645 intel_atomic_get_old_crtc_state(state, crtc); 1646 const struct drm_connector_state *old_conn_state; 1647 struct drm_connector *conn; 1648 int i; 1649 1650 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1651 struct intel_encoder *encoder = 1652 to_intel_encoder(old_conn_state->best_encoder); 1653 1654 if (old_conn_state->crtc != &crtc->base) 1655 continue; 1656 1657 if (encoder->post_disable) 1658 encoder->post_disable(state, encoder, 1659 old_crtc_state, old_conn_state); 1660 } 1661 } 1662 1663 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, 1664 struct intel_crtc *crtc) 1665 { 1666 const struct intel_crtc_state *old_crtc_state = 1667 intel_atomic_get_old_crtc_state(state, crtc); 1668 const struct drm_connector_state *old_conn_state; 1669 struct drm_connector *conn; 1670 int i; 1671 1672 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1673 struct intel_encoder *encoder = 1674 to_intel_encoder(old_conn_state->best_encoder); 1675 1676 if (old_conn_state->crtc != &crtc->base) 1677 continue; 1678 1679 if (encoder->post_pll_disable) 1680 encoder->post_pll_disable(state, encoder, 1681 old_crtc_state, old_conn_state); 1682 } 1683 } 1684 1685 static void intel_encoders_update_pipe(struct intel_atomic_state *state, 1686 struct intel_crtc *crtc) 1687 { 1688 const struct intel_crtc_state *crtc_state = 1689 intel_atomic_get_new_crtc_state(state, crtc); 1690 const struct drm_connector_state *conn_state; 1691 struct drm_connector *conn; 1692 int i; 1693 1694 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1695 struct intel_encoder *encoder = 1696 to_intel_encoder(conn_state->best_encoder); 1697 1698 if (conn_state->crtc != &crtc->base) 1699 continue; 1700 1701 if (encoder->update_pipe) 1702 encoder->update_pipe(state, encoder, 1703 crtc_state, conn_state); 1704 } 1705 } 1706 1707 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) 1708 { 1709 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1710 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 1711 1712 plane->disable_arm(plane, crtc_state); 1713 } 1714 1715 static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1716 { 1717 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1718 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1719 1720 if (crtc_state->has_pch_encoder) { 1721 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1722 &crtc_state->fdi_m_n); 1723 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1724 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1725 &crtc_state->dp_m_n); 1726 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 1727 &crtc_state->dp_m2_n2); 1728 } 1729 1730 intel_set_transcoder_timings(crtc_state); 1731 1732 ilk_set_pipeconf(crtc_state); 1733 } 1734 1735 static void ilk_crtc_enable(struct intel_atomic_state *state, 1736 struct intel_crtc *crtc) 1737 { 1738 const struct intel_crtc_state *new_crtc_state = 1739 intel_atomic_get_new_crtc_state(state, crtc); 1740 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1741 enum pipe pipe = crtc->pipe; 1742 1743 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 1744 return; 1745 1746 /* 1747 * Sometimes spurious CPU pipe underruns happen during FDI 1748 * training, at least with VGA+HDMI cloning. Suppress them. 1749 * 1750 * On ILK we get an occasional spurious CPU pipe underruns 1751 * between eDP port A enable and vdd enable. Also PCH port 1752 * enable seems to result in the occasional CPU pipe underrun. 1753 * 1754 * Spurious PCH underruns also occur during PCH enabling. 1755 */ 1756 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1757 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 1758 1759 ilk_configure_cpu_transcoder(new_crtc_state); 1760 1761 intel_set_pipe_src_size(new_crtc_state); 1762 1763 crtc->active = true; 1764 1765 intel_encoders_pre_enable(state, crtc); 1766 1767 if (new_crtc_state->has_pch_encoder) { 1768 ilk_pch_pre_enable(state, crtc); 1769 } else { 1770 assert_fdi_tx_disabled(dev_priv, pipe); 1771 assert_fdi_rx_disabled(dev_priv, pipe); 1772 } 1773 1774 ilk_pfit_enable(new_crtc_state); 1775 1776 /* 1777 * On ILK+ LUT must be loaded before the pipe is running but with 1778 * clocks enabled 1779 */ 1780 intel_color_load_luts(new_crtc_state); 1781 intel_color_commit_noarm(new_crtc_state); 1782 intel_color_commit_arm(new_crtc_state); 1783 /* update DSPCNTR to configure gamma for pipe bottom color */ 1784 intel_disable_primary_plane(new_crtc_state); 1785 1786 intel_initial_watermarks(state, crtc); 1787 intel_enable_transcoder(new_crtc_state); 1788 1789 if (new_crtc_state->has_pch_encoder) 1790 ilk_pch_enable(state, crtc); 1791 1792 intel_crtc_vblank_on(new_crtc_state); 1793 1794 intel_encoders_enable(state, crtc); 1795 1796 if (HAS_PCH_CPT(dev_priv)) 1797 cpt_verify_modeset(dev_priv, pipe); 1798 1799 /* 1800 * Must wait for vblank to avoid spurious PCH FIFO underruns. 1801 * And a second vblank wait is needed at least on ILK with 1802 * some interlaced HDMI modes. Let's do the double wait always 1803 * in case there are more corner cases we don't know about. 1804 */ 1805 if (new_crtc_state->has_pch_encoder) { 1806 intel_crtc_wait_for_next_vblank(crtc); 1807 intel_crtc_wait_for_next_vblank(crtc); 1808 } 1809 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 1810 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 1811 } 1812 1813 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, 1814 enum pipe pipe, bool apply) 1815 { 1816 u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)); 1817 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 1818 1819 if (apply) 1820 val |= mask; 1821 else 1822 val &= ~mask; 1823 1824 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val); 1825 } 1826 1827 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state) 1828 { 1829 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1830 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1831 1832 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe), 1833 HSW_LINETIME(crtc_state->linetime) | 1834 HSW_IPS_LINETIME(crtc_state->ips_linetime)); 1835 } 1836 1837 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) 1838 { 1839 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1840 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1841 i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder); 1842 u32 val; 1843 1844 val = intel_de_read(dev_priv, reg); 1845 val &= ~HSW_FRAME_START_DELAY_MASK; 1846 val |= HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 1847 intel_de_write(dev_priv, reg, val); 1848 } 1849 1850 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, 1851 const struct intel_crtc_state *crtc_state) 1852 { 1853 struct intel_crtc *master_crtc = intel_master_crtc(crtc_state); 1854 1855 /* 1856 * Enable sequence steps 1-7 on bigjoiner master 1857 */ 1858 if (intel_crtc_is_bigjoiner_slave(crtc_state)) 1859 intel_encoders_pre_pll_enable(state, master_crtc); 1860 1861 if (crtc_state->shared_dpll) 1862 intel_enable_shared_dpll(crtc_state); 1863 1864 if (intel_crtc_is_bigjoiner_slave(crtc_state)) 1865 intel_encoders_pre_enable(state, master_crtc); 1866 } 1867 1868 static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 1869 { 1870 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1871 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1872 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1873 1874 if (crtc_state->has_pch_encoder) { 1875 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1876 &crtc_state->fdi_m_n); 1877 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1878 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 1879 &crtc_state->dp_m_n); 1880 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 1881 &crtc_state->dp_m2_n2); 1882 } 1883 1884 intel_set_transcoder_timings(crtc_state); 1885 1886 if (cpu_transcoder != TRANSCODER_EDP) 1887 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder), 1888 crtc_state->pixel_multiplier - 1); 1889 1890 hsw_set_frame_start_delay(crtc_state); 1891 1892 hsw_set_transconf(crtc_state); 1893 } 1894 1895 static void hsw_crtc_enable(struct intel_atomic_state *state, 1896 struct intel_crtc *crtc) 1897 { 1898 const struct intel_crtc_state *new_crtc_state = 1899 intel_atomic_get_new_crtc_state(state, crtc); 1900 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1901 enum pipe pipe = crtc->pipe, hsw_workaround_pipe; 1902 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 1903 bool psl_clkgate_wa; 1904 1905 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 1906 return; 1907 1908 if (!new_crtc_state->bigjoiner_pipes) { 1909 intel_encoders_pre_pll_enable(state, crtc); 1910 1911 if (new_crtc_state->shared_dpll) 1912 intel_enable_shared_dpll(new_crtc_state); 1913 1914 intel_encoders_pre_enable(state, crtc); 1915 } else { 1916 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state); 1917 } 1918 1919 intel_dsc_enable(new_crtc_state); 1920 1921 if (DISPLAY_VER(dev_priv) >= 13) 1922 intel_uncompressed_joiner_enable(new_crtc_state); 1923 1924 intel_set_pipe_src_size(new_crtc_state); 1925 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 1926 bdw_set_pipemisc(new_crtc_state); 1927 1928 if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) && 1929 !transcoder_is_dsi(cpu_transcoder)) 1930 hsw_configure_cpu_transcoder(new_crtc_state); 1931 1932 crtc->active = true; 1933 1934 /* Display WA #1180: WaDisableScalarClockGating: glk */ 1935 psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 && 1936 new_crtc_state->pch_pfit.enabled; 1937 if (psl_clkgate_wa) 1938 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); 1939 1940 if (DISPLAY_VER(dev_priv) >= 9) 1941 skl_pfit_enable(new_crtc_state); 1942 else 1943 ilk_pfit_enable(new_crtc_state); 1944 1945 /* 1946 * On ILK+ LUT must be loaded before the pipe is running but with 1947 * clocks enabled 1948 */ 1949 intel_color_load_luts(new_crtc_state); 1950 intel_color_commit_noarm(new_crtc_state); 1951 intel_color_commit_arm(new_crtc_state); 1952 /* update DSPCNTR to configure gamma/csc for pipe bottom color */ 1953 if (DISPLAY_VER(dev_priv) < 9) 1954 intel_disable_primary_plane(new_crtc_state); 1955 1956 hsw_set_linetime_wm(new_crtc_state); 1957 1958 if (DISPLAY_VER(dev_priv) >= 11) 1959 icl_set_pipe_chicken(new_crtc_state); 1960 1961 intel_initial_watermarks(state, crtc); 1962 1963 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) 1964 intel_crtc_vblank_on(new_crtc_state); 1965 1966 intel_encoders_enable(state, crtc); 1967 1968 if (psl_clkgate_wa) { 1969 intel_crtc_wait_for_next_vblank(crtc); 1970 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); 1971 } 1972 1973 /* If we change the relative order between pipe/planes enabling, we need 1974 * to change the workaround. */ 1975 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe; 1976 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 1977 struct intel_crtc *wa_crtc; 1978 1979 wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe); 1980 1981 intel_crtc_wait_for_next_vblank(wa_crtc); 1982 intel_crtc_wait_for_next_vblank(wa_crtc); 1983 } 1984 } 1985 1986 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state) 1987 { 1988 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1989 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1990 enum pipe pipe = crtc->pipe; 1991 1992 /* To avoid upsetting the power well on haswell only disable the pfit if 1993 * it's in use. The hw state code will make sure we get this right. */ 1994 if (!old_crtc_state->pch_pfit.enabled) 1995 return; 1996 1997 intel_de_write_fw(dev_priv, PF_CTL(pipe), 0); 1998 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0); 1999 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0); 2000 } 2001 2002 static void ilk_crtc_disable(struct intel_atomic_state *state, 2003 struct intel_crtc *crtc) 2004 { 2005 const struct intel_crtc_state *old_crtc_state = 2006 intel_atomic_get_old_crtc_state(state, crtc); 2007 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2008 enum pipe pipe = crtc->pipe; 2009 2010 /* 2011 * Sometimes spurious CPU pipe underruns happen when the 2012 * pipe is already disabled, but FDI RX/TX is still enabled. 2013 * Happens at least with VGA+HDMI cloning. Suppress them. 2014 */ 2015 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 2016 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 2017 2018 intel_encoders_disable(state, crtc); 2019 2020 intel_crtc_vblank_off(old_crtc_state); 2021 2022 intel_disable_transcoder(old_crtc_state); 2023 2024 ilk_pfit_disable(old_crtc_state); 2025 2026 if (old_crtc_state->has_pch_encoder) 2027 ilk_pch_disable(state, crtc); 2028 2029 intel_encoders_post_disable(state, crtc); 2030 2031 if (old_crtc_state->has_pch_encoder) 2032 ilk_pch_post_disable(state, crtc); 2033 2034 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 2035 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 2036 } 2037 2038 static void hsw_crtc_disable(struct intel_atomic_state *state, 2039 struct intel_crtc *crtc) 2040 { 2041 const struct intel_crtc_state *old_crtc_state = 2042 intel_atomic_get_old_crtc_state(state, crtc); 2043 2044 /* 2045 * FIXME collapse everything to one hook. 2046 * Need care with mst->ddi interactions. 2047 */ 2048 if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) { 2049 intel_encoders_disable(state, crtc); 2050 intel_encoders_post_disable(state, crtc); 2051 } 2052 } 2053 2054 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) 2055 { 2056 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2057 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2058 2059 if (!crtc_state->gmch_pfit.control) 2060 return; 2061 2062 /* 2063 * The panel fitter should only be adjusted whilst the pipe is disabled, 2064 * according to register description and PRM. 2065 */ 2066 drm_WARN_ON(&dev_priv->drm, 2067 intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE); 2068 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); 2069 2070 intel_de_write(dev_priv, PFIT_PGM_RATIOS, 2071 crtc_state->gmch_pfit.pgm_ratios); 2072 intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control); 2073 2074 /* Border color in case we don't scale up to the full screen. Black by 2075 * default, change to something else for debugging. */ 2076 intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0); 2077 } 2078 2079 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) 2080 { 2081 if (phy == PHY_NONE) 2082 return false; 2083 else if (IS_DG2(dev_priv)) 2084 /* 2085 * DG2 outputs labelled as "combo PHY" in the bspec use 2086 * SNPS PHYs with completely different programming, 2087 * hence we always return false here. 2088 */ 2089 return false; 2090 else if (IS_ALDERLAKE_S(dev_priv)) 2091 return phy <= PHY_E; 2092 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) 2093 return phy <= PHY_D; 2094 else if (IS_JSL_EHL(dev_priv)) 2095 return phy <= PHY_C; 2096 else if (DISPLAY_VER(dev_priv) >= 11) 2097 return phy <= PHY_B; 2098 else 2099 return false; 2100 } 2101 2102 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) 2103 { 2104 if (IS_DG2(dev_priv)) 2105 /* DG2's "TC1" output uses a SNPS PHY */ 2106 return false; 2107 else if (IS_ALDERLAKE_P(dev_priv)) 2108 return phy >= PHY_F && phy <= PHY_I; 2109 else if (IS_TIGERLAKE(dev_priv)) 2110 return phy >= PHY_D && phy <= PHY_I; 2111 else if (IS_ICELAKE(dev_priv)) 2112 return phy >= PHY_C && phy <= PHY_F; 2113 else 2114 return false; 2115 } 2116 2117 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy) 2118 { 2119 if (phy == PHY_NONE) 2120 return false; 2121 else if (IS_DG2(dev_priv)) 2122 /* 2123 * All four "combo" ports and the TC1 port (PHY E) use 2124 * Synopsis PHYs. 2125 */ 2126 return phy <= PHY_E; 2127 2128 return false; 2129 } 2130 2131 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) 2132 { 2133 if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD) 2134 return PHY_D + port - PORT_D_XELPD; 2135 else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1) 2136 return PHY_F + port - PORT_TC1; 2137 else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1) 2138 return PHY_B + port - PORT_TC1; 2139 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1) 2140 return PHY_C + port - PORT_TC1; 2141 else if (IS_JSL_EHL(i915) && port == PORT_D) 2142 return PHY_A; 2143 2144 return PHY_A + port - PORT_A; 2145 } 2146 2147 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) 2148 { 2149 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) 2150 return TC_PORT_NONE; 2151 2152 if (DISPLAY_VER(dev_priv) >= 12) 2153 return TC_PORT_1 + port - PORT_TC1; 2154 else 2155 return TC_PORT_1 + port - PORT_C; 2156 } 2157 2158 enum intel_display_power_domain 2159 intel_aux_power_domain(struct intel_digital_port *dig_port) 2160 { 2161 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 2162 2163 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 2164 return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch); 2165 2166 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); 2167 } 2168 2169 static void get_crtc_power_domains(struct intel_crtc_state *crtc_state, 2170 struct intel_power_domain_mask *mask) 2171 { 2172 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2173 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2174 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2175 struct drm_encoder *encoder; 2176 enum pipe pipe = crtc->pipe; 2177 2178 bitmap_zero(mask->bits, POWER_DOMAIN_NUM); 2179 2180 if (!crtc_state->hw.active) 2181 return; 2182 2183 set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits); 2184 set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits); 2185 if (crtc_state->pch_pfit.enabled || 2186 crtc_state->pch_pfit.force_thru) 2187 set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits); 2188 2189 drm_for_each_encoder_mask(encoder, &dev_priv->drm, 2190 crtc_state->uapi.encoder_mask) { 2191 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 2192 2193 set_bit(intel_encoder->power_domain, mask->bits); 2194 } 2195 2196 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 2197 set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits); 2198 2199 if (crtc_state->shared_dpll) 2200 set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits); 2201 2202 if (crtc_state->dsc.compression_enable) 2203 set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits); 2204 } 2205 2206 void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state, 2207 struct intel_power_domain_mask *old_domains) 2208 { 2209 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2210 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2211 enum intel_display_power_domain domain; 2212 struct intel_power_domain_mask domains, new_domains; 2213 2214 get_crtc_power_domains(crtc_state, &domains); 2215 2216 bitmap_andnot(new_domains.bits, 2217 domains.bits, 2218 crtc->enabled_power_domains.mask.bits, 2219 POWER_DOMAIN_NUM); 2220 bitmap_andnot(old_domains->bits, 2221 crtc->enabled_power_domains.mask.bits, 2222 domains.bits, 2223 POWER_DOMAIN_NUM); 2224 2225 for_each_power_domain(domain, &new_domains) 2226 intel_display_power_get_in_set(dev_priv, 2227 &crtc->enabled_power_domains, 2228 domain); 2229 } 2230 2231 void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc, 2232 struct intel_power_domain_mask *domains) 2233 { 2234 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev), 2235 &crtc->enabled_power_domains, 2236 domains); 2237 } 2238 2239 static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 2240 { 2241 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2242 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2243 2244 if (intel_crtc_has_dp_encoder(crtc_state)) { 2245 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 2246 &crtc_state->dp_m_n); 2247 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 2248 &crtc_state->dp_m2_n2); 2249 } 2250 2251 intel_set_transcoder_timings(crtc_state); 2252 2253 i9xx_set_pipeconf(crtc_state); 2254 } 2255 2256 static void valleyview_crtc_enable(struct intel_atomic_state *state, 2257 struct intel_crtc *crtc) 2258 { 2259 const struct intel_crtc_state *new_crtc_state = 2260 intel_atomic_get_new_crtc_state(state, crtc); 2261 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2262 enum pipe pipe = crtc->pipe; 2263 2264 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 2265 return; 2266 2267 i9xx_configure_cpu_transcoder(new_crtc_state); 2268 2269 intel_set_pipe_src_size(new_crtc_state); 2270 2271 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 2272 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY); 2273 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0); 2274 } 2275 2276 crtc->active = true; 2277 2278 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 2279 2280 intel_encoders_pre_pll_enable(state, crtc); 2281 2282 if (IS_CHERRYVIEW(dev_priv)) 2283 chv_enable_pll(new_crtc_state); 2284 else 2285 vlv_enable_pll(new_crtc_state); 2286 2287 intel_encoders_pre_enable(state, crtc); 2288 2289 i9xx_pfit_enable(new_crtc_state); 2290 2291 intel_color_load_luts(new_crtc_state); 2292 intel_color_commit_noarm(new_crtc_state); 2293 intel_color_commit_arm(new_crtc_state); 2294 /* update DSPCNTR to configure gamma for pipe bottom color */ 2295 intel_disable_primary_plane(new_crtc_state); 2296 2297 intel_initial_watermarks(state, crtc); 2298 intel_enable_transcoder(new_crtc_state); 2299 2300 intel_crtc_vblank_on(new_crtc_state); 2301 2302 intel_encoders_enable(state, crtc); 2303 } 2304 2305 static void i9xx_crtc_enable(struct intel_atomic_state *state, 2306 struct intel_crtc *crtc) 2307 { 2308 const struct intel_crtc_state *new_crtc_state = 2309 intel_atomic_get_new_crtc_state(state, crtc); 2310 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2311 enum pipe pipe = crtc->pipe; 2312 2313 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 2314 return; 2315 2316 i9xx_configure_cpu_transcoder(new_crtc_state); 2317 2318 intel_set_pipe_src_size(new_crtc_state); 2319 2320 crtc->active = true; 2321 2322 if (DISPLAY_VER(dev_priv) != 2) 2323 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 2324 2325 intel_encoders_pre_enable(state, crtc); 2326 2327 i9xx_enable_pll(new_crtc_state); 2328 2329 i9xx_pfit_enable(new_crtc_state); 2330 2331 intel_color_load_luts(new_crtc_state); 2332 intel_color_commit_noarm(new_crtc_state); 2333 intel_color_commit_arm(new_crtc_state); 2334 /* update DSPCNTR to configure gamma for pipe bottom color */ 2335 intel_disable_primary_plane(new_crtc_state); 2336 2337 if (!intel_initial_watermarks(state, crtc)) 2338 intel_update_watermarks(dev_priv); 2339 intel_enable_transcoder(new_crtc_state); 2340 2341 intel_crtc_vblank_on(new_crtc_state); 2342 2343 intel_encoders_enable(state, crtc); 2344 2345 /* prevents spurious underruns */ 2346 if (DISPLAY_VER(dev_priv) == 2) 2347 intel_crtc_wait_for_next_vblank(crtc); 2348 } 2349 2350 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) 2351 { 2352 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 2353 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2354 2355 if (!old_crtc_state->gmch_pfit.control) 2356 return; 2357 2358 assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder); 2359 2360 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n", 2361 intel_de_read(dev_priv, PFIT_CONTROL)); 2362 intel_de_write(dev_priv, PFIT_CONTROL, 0); 2363 } 2364 2365 static void i9xx_crtc_disable(struct intel_atomic_state *state, 2366 struct intel_crtc *crtc) 2367 { 2368 struct intel_crtc_state *old_crtc_state = 2369 intel_atomic_get_old_crtc_state(state, crtc); 2370 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2371 enum pipe pipe = crtc->pipe; 2372 2373 /* 2374 * On gen2 planes are double buffered but the pipe isn't, so we must 2375 * wait for planes to fully turn off before disabling the pipe. 2376 */ 2377 if (DISPLAY_VER(dev_priv) == 2) 2378 intel_crtc_wait_for_next_vblank(crtc); 2379 2380 intel_encoders_disable(state, crtc); 2381 2382 intel_crtc_vblank_off(old_crtc_state); 2383 2384 intel_disable_transcoder(old_crtc_state); 2385 2386 i9xx_pfit_disable(old_crtc_state); 2387 2388 intel_encoders_post_disable(state, crtc); 2389 2390 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 2391 if (IS_CHERRYVIEW(dev_priv)) 2392 chv_disable_pll(dev_priv, pipe); 2393 else if (IS_VALLEYVIEW(dev_priv)) 2394 vlv_disable_pll(dev_priv, pipe); 2395 else 2396 i9xx_disable_pll(old_crtc_state); 2397 } 2398 2399 intel_encoders_post_pll_disable(state, crtc); 2400 2401 if (DISPLAY_VER(dev_priv) != 2) 2402 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 2403 2404 if (!dev_priv->wm_disp->initial_watermarks) 2405 intel_update_watermarks(dev_priv); 2406 2407 /* clock the pipe down to 640x480@60 to potentially save power */ 2408 if (IS_I830(dev_priv)) 2409 i830_enable_pipe(dev_priv, pipe); 2410 } 2411 2412 2413 /* 2414 * turn all crtc's off, but do not adjust state 2415 * This has to be paired with a call to intel_modeset_setup_hw_state. 2416 */ 2417 int intel_display_suspend(struct drm_device *dev) 2418 { 2419 struct drm_i915_private *dev_priv = to_i915(dev); 2420 struct drm_atomic_state *state; 2421 int ret; 2422 2423 if (!HAS_DISPLAY(dev_priv)) 2424 return 0; 2425 2426 state = drm_atomic_helper_suspend(dev); 2427 ret = PTR_ERR_OR_ZERO(state); 2428 if (ret) 2429 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n", 2430 ret); 2431 else 2432 dev_priv->modeset_restore_state = state; 2433 return ret; 2434 } 2435 2436 void intel_encoder_destroy(struct drm_encoder *encoder) 2437 { 2438 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 2439 2440 drm_encoder_cleanup(encoder); 2441 kfree(intel_encoder); 2442 } 2443 2444 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 2445 { 2446 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2447 2448 /* GDG double wide on either pipe, otherwise pipe A only */ 2449 return DISPLAY_VER(dev_priv) < 4 && 2450 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 2451 } 2452 2453 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) 2454 { 2455 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock; 2456 struct drm_rect src; 2457 2458 /* 2459 * We only use IF-ID interlacing. If we ever use 2460 * PF-ID we'll need to adjust the pixel_rate here. 2461 */ 2462 2463 if (!crtc_state->pch_pfit.enabled) 2464 return pixel_rate; 2465 2466 drm_rect_init(&src, 0, 0, 2467 drm_rect_width(&crtc_state->pipe_src) << 16, 2468 drm_rect_height(&crtc_state->pipe_src) << 16); 2469 2470 return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst, 2471 pixel_rate); 2472 } 2473 2474 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode, 2475 const struct drm_display_mode *timings) 2476 { 2477 mode->hdisplay = timings->crtc_hdisplay; 2478 mode->htotal = timings->crtc_htotal; 2479 mode->hsync_start = timings->crtc_hsync_start; 2480 mode->hsync_end = timings->crtc_hsync_end; 2481 2482 mode->vdisplay = timings->crtc_vdisplay; 2483 mode->vtotal = timings->crtc_vtotal; 2484 mode->vsync_start = timings->crtc_vsync_start; 2485 mode->vsync_end = timings->crtc_vsync_end; 2486 2487 mode->flags = timings->flags; 2488 mode->type = DRM_MODE_TYPE_DRIVER; 2489 2490 mode->clock = timings->crtc_clock; 2491 2492 drm_mode_set_name(mode); 2493 } 2494 2495 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 2496 { 2497 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 2498 2499 if (HAS_GMCH(dev_priv)) 2500 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 2501 crtc_state->pixel_rate = 2502 crtc_state->hw.pipe_mode.crtc_clock; 2503 else 2504 crtc_state->pixel_rate = 2505 ilk_pipe_pixel_rate(crtc_state); 2506 } 2507 2508 static void intel_bigjoiner_adjust_timings(const struct intel_crtc_state *crtc_state, 2509 struct drm_display_mode *mode) 2510 { 2511 int num_pipes = intel_bigjoiner_num_pipes(crtc_state); 2512 2513 if (num_pipes < 2) 2514 return; 2515 2516 mode->crtc_clock /= num_pipes; 2517 mode->crtc_hdisplay /= num_pipes; 2518 mode->crtc_hblank_start /= num_pipes; 2519 mode->crtc_hblank_end /= num_pipes; 2520 mode->crtc_hsync_start /= num_pipes; 2521 mode->crtc_hsync_end /= num_pipes; 2522 mode->crtc_htotal /= num_pipes; 2523 } 2524 2525 static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state, 2526 struct drm_display_mode *mode) 2527 { 2528 int overlap = crtc_state->splitter.pixel_overlap; 2529 int n = crtc_state->splitter.link_count; 2530 2531 if (!crtc_state->splitter.enable) 2532 return; 2533 2534 /* 2535 * eDP MSO uses segment timings from EDID for transcoder 2536 * timings, but full mode for everything else. 2537 * 2538 * h_full = (h_segment - pixel_overlap) * link_count 2539 */ 2540 mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n; 2541 mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n; 2542 mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n; 2543 mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n; 2544 mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n; 2545 mode->crtc_htotal = (mode->crtc_htotal - overlap) * n; 2546 mode->crtc_clock *= n; 2547 } 2548 2549 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state) 2550 { 2551 struct drm_display_mode *mode = &crtc_state->hw.mode; 2552 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 2553 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2554 2555 /* 2556 * Start with the adjusted_mode crtc timings, which 2557 * have been filled with the transcoder timings. 2558 */ 2559 drm_mode_copy(pipe_mode, adjusted_mode); 2560 2561 /* Expand MSO per-segment transcoder timings to full */ 2562 intel_splitter_adjust_timings(crtc_state, pipe_mode); 2563 2564 /* 2565 * We want the full numbers in adjusted_mode normal timings, 2566 * adjusted_mode crtc timings are left with the raw transcoder 2567 * timings. 2568 */ 2569 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode); 2570 2571 /* Populate the "user" mode with full numbers */ 2572 drm_mode_copy(mode, pipe_mode); 2573 intel_mode_from_crtc_timings(mode, mode); 2574 mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) * 2575 (intel_bigjoiner_num_pipes(crtc_state) ?: 1); 2576 mode->vdisplay = drm_rect_height(&crtc_state->pipe_src); 2577 2578 /* Derive per-pipe timings in case bigjoiner is used */ 2579 intel_bigjoiner_adjust_timings(crtc_state, pipe_mode); 2580 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2581 2582 intel_crtc_compute_pixel_rate(crtc_state); 2583 } 2584 2585 void intel_encoder_get_config(struct intel_encoder *encoder, 2586 struct intel_crtc_state *crtc_state) 2587 { 2588 encoder->get_config(encoder, crtc_state); 2589 2590 intel_crtc_readout_derived_state(crtc_state); 2591 } 2592 2593 static void intel_bigjoiner_compute_pipe_src(struct intel_crtc_state *crtc_state) 2594 { 2595 int num_pipes = intel_bigjoiner_num_pipes(crtc_state); 2596 int width, height; 2597 2598 if (num_pipes < 2) 2599 return; 2600 2601 width = drm_rect_width(&crtc_state->pipe_src); 2602 height = drm_rect_height(&crtc_state->pipe_src); 2603 2604 drm_rect_init(&crtc_state->pipe_src, 0, 0, 2605 width / num_pipes, height); 2606 } 2607 2608 static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state) 2609 { 2610 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2611 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2612 2613 intel_bigjoiner_compute_pipe_src(crtc_state); 2614 2615 /* 2616 * Pipe horizontal size must be even in: 2617 * - DVO ganged mode 2618 * - LVDS dual channel mode 2619 * - Double wide pipe 2620 */ 2621 if (drm_rect_width(&crtc_state->pipe_src) & 1) { 2622 if (crtc_state->double_wide) { 2623 drm_dbg_kms(&i915->drm, 2624 "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n", 2625 crtc->base.base.id, crtc->base.name); 2626 return -EINVAL; 2627 } 2628 2629 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 2630 intel_is_dual_link_lvds(i915)) { 2631 drm_dbg_kms(&i915->drm, 2632 "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n", 2633 crtc->base.base.id, crtc->base.name); 2634 return -EINVAL; 2635 } 2636 } 2637 2638 return 0; 2639 } 2640 2641 static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state) 2642 { 2643 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2644 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2645 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2646 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 2647 int clock_limit = i915->max_dotclk_freq; 2648 2649 /* 2650 * Start with the adjusted_mode crtc timings, which 2651 * have been filled with the transcoder timings. 2652 */ 2653 drm_mode_copy(pipe_mode, adjusted_mode); 2654 2655 /* Expand MSO per-segment transcoder timings to full */ 2656 intel_splitter_adjust_timings(crtc_state, pipe_mode); 2657 2658 /* Derive per-pipe timings in case bigjoiner is used */ 2659 intel_bigjoiner_adjust_timings(crtc_state, pipe_mode); 2660 intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 2661 2662 if (DISPLAY_VER(i915) < 4) { 2663 clock_limit = i915->max_cdclk_freq * 9 / 10; 2664 2665 /* 2666 * Enable double wide mode when the dot clock 2667 * is > 90% of the (display) core speed. 2668 */ 2669 if (intel_crtc_supports_double_wide(crtc) && 2670 pipe_mode->crtc_clock > clock_limit) { 2671 clock_limit = i915->max_dotclk_freq; 2672 crtc_state->double_wide = true; 2673 } 2674 } 2675 2676 if (pipe_mode->crtc_clock > clock_limit) { 2677 drm_dbg_kms(&i915->drm, 2678 "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 2679 crtc->base.base.id, crtc->base.name, 2680 pipe_mode->crtc_clock, clock_limit, 2681 str_yes_no(crtc_state->double_wide)); 2682 return -EINVAL; 2683 } 2684 2685 return 0; 2686 } 2687 2688 static int intel_crtc_compute_config(struct intel_atomic_state *state, 2689 struct intel_crtc *crtc) 2690 { 2691 struct intel_crtc_state *crtc_state = 2692 intel_atomic_get_new_crtc_state(state, crtc); 2693 int ret; 2694 2695 ret = intel_crtc_compute_pipe_src(crtc_state); 2696 if (ret) 2697 return ret; 2698 2699 ret = intel_crtc_compute_pipe_mode(crtc_state); 2700 if (ret) 2701 return ret; 2702 2703 intel_crtc_compute_pixel_rate(crtc_state); 2704 2705 if (crtc_state->has_pch_encoder) 2706 return ilk_fdi_compute_config(crtc, crtc_state); 2707 2708 return 0; 2709 } 2710 2711 static void 2712 intel_reduce_m_n_ratio(u32 *num, u32 *den) 2713 { 2714 while (*num > DATA_LINK_M_N_MASK || 2715 *den > DATA_LINK_M_N_MASK) { 2716 *num >>= 1; 2717 *den >>= 1; 2718 } 2719 } 2720 2721 static void compute_m_n(unsigned int m, unsigned int n, 2722 u32 *ret_m, u32 *ret_n, 2723 bool constant_n) 2724 { 2725 /* 2726 * Several DP dongles in particular seem to be fussy about 2727 * too large link M/N values. Give N value as 0x8000 that 2728 * should be acceptable by specific devices. 0x8000 is the 2729 * specified fixed N value for asynchronous clock mode, 2730 * which the devices expect also in synchronous clock mode. 2731 */ 2732 if (constant_n) 2733 *ret_n = DP_LINK_CONSTANT_N_VALUE; 2734 else 2735 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 2736 2737 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 2738 intel_reduce_m_n_ratio(ret_m, ret_n); 2739 } 2740 2741 void 2742 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, 2743 int pixel_clock, int link_clock, 2744 struct intel_link_m_n *m_n, 2745 bool constant_n, bool fec_enable) 2746 { 2747 u32 data_clock = bits_per_pixel * pixel_clock; 2748 2749 if (fec_enable) 2750 data_clock = intel_dp_mode_to_fec_clock(data_clock); 2751 2752 m_n->tu = 64; 2753 compute_m_n(data_clock, 2754 link_clock * nlanes * 8, 2755 &m_n->data_m, &m_n->data_n, 2756 constant_n); 2757 2758 compute_m_n(pixel_clock, link_clock, 2759 &m_n->link_m, &m_n->link_n, 2760 constant_n); 2761 } 2762 2763 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) 2764 { 2765 /* 2766 * There may be no VBT; and if the BIOS enabled SSC we can 2767 * just keep using it to avoid unnecessary flicker. Whereas if the 2768 * BIOS isn't using it, don't assume it will work even if the VBT 2769 * indicates as much. 2770 */ 2771 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 2772 bool bios_lvds_use_ssc = intel_de_read(dev_priv, 2773 PCH_DREF_CONTROL) & 2774 DREF_SSC1_ENABLE; 2775 2776 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 2777 drm_dbg_kms(&dev_priv->drm, 2778 "SSC %s by BIOS, overriding VBT which says %s\n", 2779 str_enabled_disabled(bios_lvds_use_ssc), 2780 str_enabled_disabled(dev_priv->vbt.lvds_use_ssc)); 2781 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 2782 } 2783 } 2784 } 2785 2786 void intel_zero_m_n(struct intel_link_m_n *m_n) 2787 { 2788 /* corresponds to 0 register value */ 2789 memset(m_n, 0, sizeof(*m_n)); 2790 m_n->tu = 1; 2791 } 2792 2793 void intel_set_m_n(struct drm_i915_private *i915, 2794 const struct intel_link_m_n *m_n, 2795 i915_reg_t data_m_reg, i915_reg_t data_n_reg, 2796 i915_reg_t link_m_reg, i915_reg_t link_n_reg) 2797 { 2798 intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m); 2799 intel_de_write(i915, data_n_reg, m_n->data_n); 2800 intel_de_write(i915, link_m_reg, m_n->link_m); 2801 /* 2802 * On BDW+ writing LINK_N arms the double buffered update 2803 * of all the M/N registers, so it must be written last. 2804 */ 2805 intel_de_write(i915, link_n_reg, m_n->link_n); 2806 } 2807 2808 bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv, 2809 enum transcoder transcoder) 2810 { 2811 if (IS_HASWELL(dev_priv)) 2812 return transcoder == TRANSCODER_EDP; 2813 2814 return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv); 2815 } 2816 2817 void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc, 2818 enum transcoder transcoder, 2819 const struct intel_link_m_n *m_n) 2820 { 2821 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2822 enum pipe pipe = crtc->pipe; 2823 2824 if (DISPLAY_VER(dev_priv) >= 5) 2825 intel_set_m_n(dev_priv, m_n, 2826 PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder), 2827 PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder)); 2828 else 2829 intel_set_m_n(dev_priv, m_n, 2830 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 2831 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 2832 } 2833 2834 void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc, 2835 enum transcoder transcoder, 2836 const struct intel_link_m_n *m_n) 2837 { 2838 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2839 2840 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) 2841 return; 2842 2843 intel_set_m_n(dev_priv, m_n, 2844 PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), 2845 PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); 2846 } 2847 2848 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state) 2849 { 2850 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2851 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2852 enum pipe pipe = crtc->pipe; 2853 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2854 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2855 u32 crtc_vtotal, crtc_vblank_end; 2856 int vsyncshift = 0; 2857 2858 /* We need to be careful not to changed the adjusted mode, for otherwise 2859 * the hw state checker will get angry at the mismatch. */ 2860 crtc_vtotal = adjusted_mode->crtc_vtotal; 2861 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 2862 2863 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 2864 /* the chip adds 2 halflines automatically */ 2865 crtc_vtotal -= 1; 2866 crtc_vblank_end -= 1; 2867 2868 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 2869 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 2870 else 2871 vsyncshift = adjusted_mode->crtc_hsync_start - 2872 adjusted_mode->crtc_htotal / 2; 2873 if (vsyncshift < 0) 2874 vsyncshift += adjusted_mode->crtc_htotal; 2875 } 2876 2877 if (DISPLAY_VER(dev_priv) > 3) 2878 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder), 2879 vsyncshift); 2880 2881 intel_de_write(dev_priv, HTOTAL(cpu_transcoder), 2882 (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); 2883 intel_de_write(dev_priv, HBLANK(cpu_transcoder), 2884 (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); 2885 intel_de_write(dev_priv, HSYNC(cpu_transcoder), 2886 (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); 2887 2888 intel_de_write(dev_priv, VTOTAL(cpu_transcoder), 2889 (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16)); 2890 intel_de_write(dev_priv, VBLANK(cpu_transcoder), 2891 (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16)); 2892 intel_de_write(dev_priv, VSYNC(cpu_transcoder), 2893 (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); 2894 2895 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 2896 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 2897 * documented on the DDI_FUNC_CTL register description, EDP Input Select 2898 * bits. */ 2899 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 2900 (pipe == PIPE_B || pipe == PIPE_C)) 2901 intel_de_write(dev_priv, VTOTAL(pipe), 2902 intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); 2903 2904 } 2905 2906 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 2907 { 2908 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2909 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2910 int width = drm_rect_width(&crtc_state->pipe_src); 2911 int height = drm_rect_height(&crtc_state->pipe_src); 2912 enum pipe pipe = crtc->pipe; 2913 2914 /* pipesrc controls the size that is scaled from, which should 2915 * always be the user's requested size. 2916 */ 2917 intel_de_write(dev_priv, PIPESRC(pipe), 2918 PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1)); 2919 } 2920 2921 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) 2922 { 2923 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 2924 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2925 2926 if (DISPLAY_VER(dev_priv) == 2) 2927 return false; 2928 2929 if (DISPLAY_VER(dev_priv) >= 9 || 2930 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2931 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW; 2932 else 2933 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK; 2934 } 2935 2936 static void intel_get_transcoder_timings(struct intel_crtc *crtc, 2937 struct intel_crtc_state *pipe_config) 2938 { 2939 struct drm_device *dev = crtc->base.dev; 2940 struct drm_i915_private *dev_priv = to_i915(dev); 2941 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 2942 u32 tmp; 2943 2944 tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder)); 2945 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 2946 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 2947 2948 if (!transcoder_is_dsi(cpu_transcoder)) { 2949 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder)); 2950 pipe_config->hw.adjusted_mode.crtc_hblank_start = 2951 (tmp & 0xffff) + 1; 2952 pipe_config->hw.adjusted_mode.crtc_hblank_end = 2953 ((tmp >> 16) & 0xffff) + 1; 2954 } 2955 tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder)); 2956 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 2957 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 2958 2959 tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder)); 2960 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 2961 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 2962 2963 if (!transcoder_is_dsi(cpu_transcoder)) { 2964 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder)); 2965 pipe_config->hw.adjusted_mode.crtc_vblank_start = 2966 (tmp & 0xffff) + 1; 2967 pipe_config->hw.adjusted_mode.crtc_vblank_end = 2968 ((tmp >> 16) & 0xffff) + 1; 2969 } 2970 tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder)); 2971 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 2972 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 2973 2974 if (intel_pipe_is_interlaced(pipe_config)) { 2975 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 2976 pipe_config->hw.adjusted_mode.crtc_vtotal += 1; 2977 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1; 2978 } 2979 } 2980 2981 static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state) 2982 { 2983 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2984 int num_pipes = intel_bigjoiner_num_pipes(crtc_state); 2985 enum pipe master_pipe, pipe = crtc->pipe; 2986 int width; 2987 2988 if (num_pipes < 2) 2989 return; 2990 2991 master_pipe = bigjoiner_master_pipe(crtc_state); 2992 width = drm_rect_width(&crtc_state->pipe_src); 2993 2994 drm_rect_translate_to(&crtc_state->pipe_src, 2995 (pipe - master_pipe) * width, 0); 2996 } 2997 2998 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 2999 struct intel_crtc_state *pipe_config) 3000 { 3001 struct drm_device *dev = crtc->base.dev; 3002 struct drm_i915_private *dev_priv = to_i915(dev); 3003 u32 tmp; 3004 3005 tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe)); 3006 3007 drm_rect_init(&pipe_config->pipe_src, 0, 0, 3008 REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1, 3009 REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1); 3010 3011 intel_bigjoiner_adjust_pipe_src(pipe_config); 3012 } 3013 3014 void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 3015 { 3016 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3017 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3018 u32 pipeconf = 0; 3019 3020 /* 3021 * - We keep both pipes enabled on 830 3022 * - During modeset the pipe is still disabled and must remain so 3023 * - During fastset the pipe is already enabled and must remain so 3024 */ 3025 if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state)) 3026 pipeconf |= PIPECONF_ENABLE; 3027 3028 if (crtc_state->double_wide) 3029 pipeconf |= PIPECONF_DOUBLE_WIDE; 3030 3031 /* only g4x and later have fancy bpc/dither controls */ 3032 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 3033 IS_CHERRYVIEW(dev_priv)) { 3034 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 3035 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 3036 pipeconf |= PIPECONF_DITHER_EN | 3037 PIPECONF_DITHER_TYPE_SP; 3038 3039 switch (crtc_state->pipe_bpp) { 3040 default: 3041 /* Case prevented by intel_choose_pipe_bpp_dither. */ 3042 MISSING_CASE(crtc_state->pipe_bpp); 3043 fallthrough; 3044 case 18: 3045 pipeconf |= PIPECONF_BPC_6; 3046 break; 3047 case 24: 3048 pipeconf |= PIPECONF_BPC_8; 3049 break; 3050 case 30: 3051 pipeconf |= PIPECONF_BPC_10; 3052 break; 3053 } 3054 } 3055 3056 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 3057 if (DISPLAY_VER(dev_priv) < 4 || 3058 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 3059 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 3060 else 3061 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 3062 } else { 3063 pipeconf |= PIPECONF_INTERLACE_PROGRESSIVE; 3064 } 3065 3066 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 3067 crtc_state->limited_color_range) 3068 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 3069 3070 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 3071 3072 pipeconf |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 3073 3074 intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf); 3075 intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe)); 3076 } 3077 3078 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) 3079 { 3080 if (IS_I830(dev_priv)) 3081 return false; 3082 3083 return DISPLAY_VER(dev_priv) >= 4 || 3084 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 3085 } 3086 3087 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state) 3088 { 3089 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3090 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3091 u32 tmp; 3092 3093 if (!i9xx_has_pfit(dev_priv)) 3094 return; 3095 3096 tmp = intel_de_read(dev_priv, PFIT_CONTROL); 3097 if (!(tmp & PFIT_ENABLE)) 3098 return; 3099 3100 /* Check whether the pfit is attached to our pipe. */ 3101 if (DISPLAY_VER(dev_priv) < 4) { 3102 if (crtc->pipe != PIPE_B) 3103 return; 3104 } else { 3105 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 3106 return; 3107 } 3108 3109 crtc_state->gmch_pfit.control = tmp; 3110 crtc_state->gmch_pfit.pgm_ratios = 3111 intel_de_read(dev_priv, PFIT_PGM_RATIOS); 3112 } 3113 3114 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 3115 struct intel_crtc_state *pipe_config) 3116 { 3117 struct drm_device *dev = crtc->base.dev; 3118 struct drm_i915_private *dev_priv = to_i915(dev); 3119 enum pipe pipe = crtc->pipe; 3120 struct dpll clock; 3121 u32 mdiv; 3122 int refclk = 100000; 3123 3124 /* In case of DSI, DPLL will not be used */ 3125 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 3126 return; 3127 3128 vlv_dpio_get(dev_priv); 3129 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 3130 vlv_dpio_put(dev_priv); 3131 3132 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 3133 clock.m2 = mdiv & DPIO_M2DIV_MASK; 3134 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 3135 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 3136 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 3137 3138 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 3139 } 3140 3141 static void chv_crtc_clock_get(struct intel_crtc *crtc, 3142 struct intel_crtc_state *pipe_config) 3143 { 3144 struct drm_device *dev = crtc->base.dev; 3145 struct drm_i915_private *dev_priv = to_i915(dev); 3146 enum pipe pipe = crtc->pipe; 3147 enum dpio_channel port = vlv_pipe_to_channel(pipe); 3148 struct dpll clock; 3149 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 3150 int refclk = 100000; 3151 3152 /* In case of DSI, DPLL will not be used */ 3153 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 3154 return; 3155 3156 vlv_dpio_get(dev_priv); 3157 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 3158 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 3159 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 3160 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 3161 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 3162 vlv_dpio_put(dev_priv); 3163 3164 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 3165 clock.m2 = (pll_dw0 & 0xff) << 22; 3166 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 3167 clock.m2 |= pll_dw2 & 0x3fffff; 3168 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 3169 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 3170 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 3171 3172 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 3173 } 3174 3175 static enum intel_output_format 3176 bdw_get_pipemisc_output_format(struct intel_crtc *crtc) 3177 { 3178 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3179 u32 tmp; 3180 3181 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); 3182 3183 if (tmp & PIPEMISC_YUV420_ENABLE) { 3184 /* We support 4:2:0 in full blend mode only */ 3185 drm_WARN_ON(&dev_priv->drm, 3186 (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0); 3187 3188 return INTEL_OUTPUT_FORMAT_YCBCR420; 3189 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { 3190 return INTEL_OUTPUT_FORMAT_YCBCR444; 3191 } else { 3192 return INTEL_OUTPUT_FORMAT_RGB; 3193 } 3194 } 3195 3196 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) 3197 { 3198 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3199 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 3200 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3201 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 3202 u32 tmp; 3203 3204 tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); 3205 3206 if (tmp & DISP_PIPE_GAMMA_ENABLE) 3207 crtc_state->gamma_enable = true; 3208 3209 if (!HAS_GMCH(dev_priv) && 3210 tmp & DISP_PIPE_CSC_ENABLE) 3211 crtc_state->csc_enable = true; 3212 } 3213 3214 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 3215 struct intel_crtc_state *pipe_config) 3216 { 3217 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3218 enum intel_display_power_domain power_domain; 3219 intel_wakeref_t wakeref; 3220 u32 tmp; 3221 bool ret; 3222 3223 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 3224 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 3225 if (!wakeref) 3226 return false; 3227 3228 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3229 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 3230 pipe_config->shared_dpll = NULL; 3231 3232 ret = false; 3233 3234 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); 3235 if (!(tmp & PIPECONF_ENABLE)) 3236 goto out; 3237 3238 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 3239 IS_CHERRYVIEW(dev_priv)) { 3240 switch (tmp & PIPECONF_BPC_MASK) { 3241 case PIPECONF_BPC_6: 3242 pipe_config->pipe_bpp = 18; 3243 break; 3244 case PIPECONF_BPC_8: 3245 pipe_config->pipe_bpp = 24; 3246 break; 3247 case PIPECONF_BPC_10: 3248 pipe_config->pipe_bpp = 30; 3249 break; 3250 default: 3251 MISSING_CASE(tmp); 3252 break; 3253 } 3254 } 3255 3256 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 3257 (tmp & PIPECONF_COLOR_RANGE_SELECT)) 3258 pipe_config->limited_color_range = true; 3259 3260 pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp); 3261 3262 pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1; 3263 3264 if (IS_CHERRYVIEW(dev_priv)) 3265 pipe_config->cgm_mode = intel_de_read(dev_priv, 3266 CGM_PIPE_MODE(crtc->pipe)); 3267 3268 i9xx_get_pipe_color_config(pipe_config); 3269 intel_color_get_config(pipe_config); 3270 3271 if (DISPLAY_VER(dev_priv) < 4) 3272 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 3273 3274 intel_get_transcoder_timings(crtc, pipe_config); 3275 intel_get_pipe_src_size(crtc, pipe_config); 3276 3277 i9xx_get_pfit_config(pipe_config); 3278 3279 if (DISPLAY_VER(dev_priv) >= 4) { 3280 /* No way to read it out on pipes B and C */ 3281 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 3282 tmp = dev_priv->chv_dpll_md[crtc->pipe]; 3283 else 3284 tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe)); 3285 pipe_config->pixel_multiplier = 3286 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 3287 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 3288 pipe_config->dpll_hw_state.dpll_md = tmp; 3289 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 3290 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 3291 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe)); 3292 pipe_config->pixel_multiplier = 3293 ((tmp & SDVO_MULTIPLIER_MASK) 3294 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 3295 } else { 3296 /* Note that on i915G/GM the pixel multiplier is in the sdvo 3297 * port and will be fixed up in the encoder->get_config 3298 * function. */ 3299 pipe_config->pixel_multiplier = 1; 3300 } 3301 pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv, 3302 DPLL(crtc->pipe)); 3303 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 3304 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv, 3305 FP0(crtc->pipe)); 3306 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv, 3307 FP1(crtc->pipe)); 3308 } else { 3309 /* Mask out read-only status bits. */ 3310 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 3311 DPLL_PORTC_READY_MASK | 3312 DPLL_PORTB_READY_MASK); 3313 } 3314 3315 if (IS_CHERRYVIEW(dev_priv)) 3316 chv_crtc_clock_get(crtc, pipe_config); 3317 else if (IS_VALLEYVIEW(dev_priv)) 3318 vlv_crtc_clock_get(crtc, pipe_config); 3319 else 3320 i9xx_crtc_clock_get(crtc, pipe_config); 3321 3322 /* 3323 * Normally the dotclock is filled in by the encoder .get_config() 3324 * but in case the pipe is enabled w/o any ports we need a sane 3325 * default. 3326 */ 3327 pipe_config->hw.adjusted_mode.crtc_clock = 3328 pipe_config->port_clock / pipe_config->pixel_multiplier; 3329 3330 ret = true; 3331 3332 out: 3333 intel_display_power_put(dev_priv, power_domain, wakeref); 3334 3335 return ret; 3336 } 3337 3338 void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) 3339 { 3340 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3341 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3342 enum pipe pipe = crtc->pipe; 3343 u32 val = 0; 3344 3345 /* 3346 * - During modeset the pipe is still disabled and must remain so 3347 * - During fastset the pipe is already enabled and must remain so 3348 */ 3349 if (!intel_crtc_needs_modeset(crtc_state)) 3350 val |= PIPECONF_ENABLE; 3351 3352 switch (crtc_state->pipe_bpp) { 3353 default: 3354 /* Case prevented by intel_choose_pipe_bpp_dither. */ 3355 MISSING_CASE(crtc_state->pipe_bpp); 3356 fallthrough; 3357 case 18: 3358 val |= PIPECONF_BPC_6; 3359 break; 3360 case 24: 3361 val |= PIPECONF_BPC_8; 3362 break; 3363 case 30: 3364 val |= PIPECONF_BPC_10; 3365 break; 3366 case 36: 3367 val |= PIPECONF_BPC_12; 3368 break; 3369 } 3370 3371 if (crtc_state->dither) 3372 val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP; 3373 3374 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3375 val |= PIPECONF_INTERLACE_IF_ID_ILK; 3376 else 3377 val |= PIPECONF_INTERLACE_PF_PD_ILK; 3378 3379 /* 3380 * This would end up with an odd purple hue over 3381 * the entire display. Make sure we don't do it. 3382 */ 3383 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range && 3384 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); 3385 3386 if (crtc_state->limited_color_range && 3387 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 3388 val |= PIPECONF_COLOR_RANGE_SELECT; 3389 3390 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3391 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709; 3392 3393 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 3394 3395 val |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 3396 val |= PIPECONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay); 3397 3398 intel_de_write(dev_priv, PIPECONF(pipe), val); 3399 intel_de_posting_read(dev_priv, PIPECONF(pipe)); 3400 } 3401 3402 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) 3403 { 3404 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3405 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3406 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3407 u32 val = 0; 3408 3409 /* 3410 * - During modeset the pipe is still disabled and must remain so 3411 * - During fastset the pipe is already enabled and must remain so 3412 */ 3413 if (!intel_crtc_needs_modeset(crtc_state)) 3414 val |= PIPECONF_ENABLE; 3415 3416 if (IS_HASWELL(dev_priv) && crtc_state->dither) 3417 val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP; 3418 3419 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3420 val |= PIPECONF_INTERLACE_IF_ID_ILK; 3421 else 3422 val |= PIPECONF_INTERLACE_PF_PD_ILK; 3423 3424 if (IS_HASWELL(dev_priv) && 3425 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3426 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW; 3427 3428 intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val); 3429 intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder)); 3430 } 3431 3432 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) 3433 { 3434 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3435 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3436 u32 val = 0; 3437 3438 switch (crtc_state->pipe_bpp) { 3439 case 18: 3440 val |= PIPEMISC_BPC_6; 3441 break; 3442 case 24: 3443 val |= PIPEMISC_BPC_8; 3444 break; 3445 case 30: 3446 val |= PIPEMISC_BPC_10; 3447 break; 3448 case 36: 3449 /* Port output 12BPC defined for ADLP+ */ 3450 if (DISPLAY_VER(dev_priv) > 12) 3451 val |= PIPEMISC_BPC_12_ADLP; 3452 break; 3453 default: 3454 MISSING_CASE(crtc_state->pipe_bpp); 3455 break; 3456 } 3457 3458 if (crtc_state->dither) 3459 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 3460 3461 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 3462 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 3463 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV; 3464 3465 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 3466 val |= PIPEMISC_YUV420_ENABLE | 3467 PIPEMISC_YUV420_MODE_FULL_BLEND; 3468 3469 if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state)) 3470 val |= PIPEMISC_HDR_MODE_PRECISION; 3471 3472 if (DISPLAY_VER(dev_priv) >= 12) 3473 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC; 3474 3475 intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val); 3476 } 3477 3478 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) 3479 { 3480 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3481 u32 tmp; 3482 3483 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); 3484 3485 switch (tmp & PIPEMISC_BPC_MASK) { 3486 case PIPEMISC_BPC_6: 3487 return 18; 3488 case PIPEMISC_BPC_8: 3489 return 24; 3490 case PIPEMISC_BPC_10: 3491 return 30; 3492 /* 3493 * PORT OUTPUT 12 BPC defined for ADLP+. 3494 * 3495 * TODO: 3496 * For previous platforms with DSI interface, bits 5:7 3497 * are used for storing pipe_bpp irrespective of dithering. 3498 * Since the value of 12 BPC is not defined for these bits 3499 * on older platforms, need to find a workaround for 12 BPC 3500 * MIPI DSI HW readout. 3501 */ 3502 case PIPEMISC_BPC_12_ADLP: 3503 if (DISPLAY_VER(dev_priv) > 12) 3504 return 36; 3505 fallthrough; 3506 default: 3507 MISSING_CASE(tmp); 3508 return 0; 3509 } 3510 } 3511 3512 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) 3513 { 3514 /* 3515 * Account for spread spectrum to avoid 3516 * oversubscribing the link. Max center spread 3517 * is 2.5%; use 5% for safety's sake. 3518 */ 3519 u32 bps = target_clock * bpp * 21 / 20; 3520 return DIV_ROUND_UP(bps, link_bw * 8); 3521 } 3522 3523 void intel_get_m_n(struct drm_i915_private *i915, 3524 struct intel_link_m_n *m_n, 3525 i915_reg_t data_m_reg, i915_reg_t data_n_reg, 3526 i915_reg_t link_m_reg, i915_reg_t link_n_reg) 3527 { 3528 m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK; 3529 m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK; 3530 m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK; 3531 m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK; 3532 m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1; 3533 } 3534 3535 void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc, 3536 enum transcoder transcoder, 3537 struct intel_link_m_n *m_n) 3538 { 3539 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3540 enum pipe pipe = crtc->pipe; 3541 3542 if (DISPLAY_VER(dev_priv) >= 5) 3543 intel_get_m_n(dev_priv, m_n, 3544 PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder), 3545 PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder)); 3546 else 3547 intel_get_m_n(dev_priv, m_n, 3548 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 3549 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 3550 } 3551 3552 void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, 3553 enum transcoder transcoder, 3554 struct intel_link_m_n *m_n) 3555 { 3556 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3557 3558 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) 3559 return; 3560 3561 intel_get_m_n(dev_priv, m_n, 3562 PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), 3563 PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); 3564 } 3565 3566 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state, 3567 u32 pos, u32 size) 3568 { 3569 drm_rect_init(&crtc_state->pch_pfit.dst, 3570 pos >> 16, pos & 0xffff, 3571 size >> 16, size & 0xffff); 3572 } 3573 3574 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state) 3575 { 3576 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3577 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3578 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; 3579 int id = -1; 3580 int i; 3581 3582 /* find scaler attached to this pipe */ 3583 for (i = 0; i < crtc->num_scalers; i++) { 3584 u32 ctl, pos, size; 3585 3586 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i)); 3587 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN) 3588 continue; 3589 3590 id = i; 3591 crtc_state->pch_pfit.enabled = true; 3592 3593 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i)); 3594 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i)); 3595 3596 ilk_get_pfit_pos_size(crtc_state, pos, size); 3597 3598 scaler_state->scalers[i].in_use = true; 3599 break; 3600 } 3601 3602 scaler_state->scaler_id = id; 3603 if (id >= 0) 3604 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 3605 else 3606 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 3607 } 3608 3609 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state) 3610 { 3611 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3612 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3613 u32 ctl, pos, size; 3614 3615 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe)); 3616 if ((ctl & PF_ENABLE) == 0) 3617 return; 3618 3619 crtc_state->pch_pfit.enabled = true; 3620 3621 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe)); 3622 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe)); 3623 3624 ilk_get_pfit_pos_size(crtc_state, pos, size); 3625 3626 /* 3627 * We currently do not free assignements of panel fitters on 3628 * ivb/hsw (since we don't use the higher upscaling modes which 3629 * differentiates them) so just WARN about this case for now. 3630 */ 3631 drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 && 3632 (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe)); 3633 } 3634 3635 static bool ilk_get_pipe_config(struct intel_crtc *crtc, 3636 struct intel_crtc_state *pipe_config) 3637 { 3638 struct drm_device *dev = crtc->base.dev; 3639 struct drm_i915_private *dev_priv = to_i915(dev); 3640 enum intel_display_power_domain power_domain; 3641 intel_wakeref_t wakeref; 3642 u32 tmp; 3643 bool ret; 3644 3645 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 3646 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 3647 if (!wakeref) 3648 return false; 3649 3650 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 3651 pipe_config->shared_dpll = NULL; 3652 3653 ret = false; 3654 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); 3655 if (!(tmp & PIPECONF_ENABLE)) 3656 goto out; 3657 3658 switch (tmp & PIPECONF_BPC_MASK) { 3659 case PIPECONF_BPC_6: 3660 pipe_config->pipe_bpp = 18; 3661 break; 3662 case PIPECONF_BPC_8: 3663 pipe_config->pipe_bpp = 24; 3664 break; 3665 case PIPECONF_BPC_10: 3666 pipe_config->pipe_bpp = 30; 3667 break; 3668 case PIPECONF_BPC_12: 3669 pipe_config->pipe_bpp = 36; 3670 break; 3671 default: 3672 break; 3673 } 3674 3675 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 3676 pipe_config->limited_color_range = true; 3677 3678 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) { 3679 case PIPECONF_OUTPUT_COLORSPACE_YUV601: 3680 case PIPECONF_OUTPUT_COLORSPACE_YUV709: 3681 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 3682 break; 3683 default: 3684 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3685 break; 3686 } 3687 3688 pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp); 3689 3690 pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1; 3691 3692 pipe_config->msa_timing_delay = REG_FIELD_GET(PIPECONF_MSA_TIMING_DELAY_MASK, tmp); 3693 3694 pipe_config->csc_mode = intel_de_read(dev_priv, 3695 PIPE_CSC_MODE(crtc->pipe)); 3696 3697 i9xx_get_pipe_color_config(pipe_config); 3698 intel_color_get_config(pipe_config); 3699 3700 pipe_config->pixel_multiplier = 1; 3701 3702 ilk_pch_get_config(pipe_config); 3703 3704 intel_get_transcoder_timings(crtc, pipe_config); 3705 intel_get_pipe_src_size(crtc, pipe_config); 3706 3707 ilk_get_pfit_config(pipe_config); 3708 3709 ret = true; 3710 3711 out: 3712 intel_display_power_put(dev_priv, power_domain, wakeref); 3713 3714 return ret; 3715 } 3716 3717 static u8 bigjoiner_pipes(struct drm_i915_private *i915) 3718 { 3719 if (DISPLAY_VER(i915) >= 12) 3720 return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D); 3721 else if (DISPLAY_VER(i915) >= 11) 3722 return BIT(PIPE_B) | BIT(PIPE_C); 3723 else 3724 return 0; 3725 } 3726 3727 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, 3728 enum transcoder cpu_transcoder) 3729 { 3730 enum intel_display_power_domain power_domain; 3731 intel_wakeref_t wakeref; 3732 u32 tmp = 0; 3733 3734 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 3735 3736 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) 3737 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); 3738 3739 return tmp & TRANS_DDI_FUNC_ENABLE; 3740 } 3741 3742 static void enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv, 3743 u8 *master_pipes, u8 *slave_pipes) 3744 { 3745 struct intel_crtc *crtc; 3746 3747 *master_pipes = 0; 3748 *slave_pipes = 0; 3749 3750 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, 3751 bigjoiner_pipes(dev_priv)) { 3752 enum intel_display_power_domain power_domain; 3753 enum pipe pipe = crtc->pipe; 3754 intel_wakeref_t wakeref; 3755 3756 power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe); 3757 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { 3758 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); 3759 3760 if (!(tmp & BIG_JOINER_ENABLE)) 3761 continue; 3762 3763 if (tmp & MASTER_BIG_JOINER_ENABLE) 3764 *master_pipes |= BIT(pipe); 3765 else 3766 *slave_pipes |= BIT(pipe); 3767 } 3768 3769 if (DISPLAY_VER(dev_priv) < 13) 3770 continue; 3771 3772 power_domain = POWER_DOMAIN_PIPE(pipe); 3773 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { 3774 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); 3775 3776 if (tmp & UNCOMPRESSED_JOINER_MASTER) 3777 *master_pipes |= BIT(pipe); 3778 if (tmp & UNCOMPRESSED_JOINER_SLAVE) 3779 *slave_pipes |= BIT(pipe); 3780 } 3781 } 3782 3783 /* Bigjoiner pipes should always be consecutive master and slave */ 3784 drm_WARN(&dev_priv->drm, *slave_pipes != *master_pipes << 1, 3785 "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n", 3786 *master_pipes, *slave_pipes); 3787 } 3788 3789 static enum pipe get_bigjoiner_master_pipe(enum pipe pipe, u8 master_pipes, u8 slave_pipes) 3790 { 3791 if ((slave_pipes & BIT(pipe)) == 0) 3792 return pipe; 3793 3794 /* ignore everything above our pipe */ 3795 master_pipes &= ~GENMASK(7, pipe); 3796 3797 /* highest remaining bit should be our master pipe */ 3798 return fls(master_pipes) - 1; 3799 } 3800 3801 static u8 get_bigjoiner_slave_pipes(enum pipe pipe, u8 master_pipes, u8 slave_pipes) 3802 { 3803 enum pipe master_pipe, next_master_pipe; 3804 3805 master_pipe = get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes); 3806 3807 if ((master_pipes & BIT(master_pipe)) == 0) 3808 return 0; 3809 3810 /* ignore our master pipe and everything below it */ 3811 master_pipes &= ~GENMASK(master_pipe, 0); 3812 /* make sure a high bit is set for the ffs() */ 3813 master_pipes |= BIT(7); 3814 /* lowest remaining bit should be the next master pipe */ 3815 next_master_pipe = ffs(master_pipes) - 1; 3816 3817 return slave_pipes & GENMASK(next_master_pipe - 1, master_pipe); 3818 } 3819 3820 static u8 hsw_panel_transcoders(struct drm_i915_private *i915) 3821 { 3822 u8 panel_transcoder_mask = BIT(TRANSCODER_EDP); 3823 3824 if (DISPLAY_VER(i915) >= 11) 3825 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 3826 3827 return panel_transcoder_mask; 3828 } 3829 3830 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) 3831 { 3832 struct drm_device *dev = crtc->base.dev; 3833 struct drm_i915_private *dev_priv = to_i915(dev); 3834 u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv); 3835 enum transcoder cpu_transcoder; 3836 u8 master_pipes, slave_pipes; 3837 u8 enabled_transcoders = 0; 3838 3839 /* 3840 * XXX: Do intel_display_power_get_if_enabled before reading this (for 3841 * consistency and less surprising code; it's in always on power). 3842 */ 3843 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, 3844 panel_transcoder_mask) { 3845 enum intel_display_power_domain power_domain; 3846 intel_wakeref_t wakeref; 3847 enum pipe trans_pipe; 3848 u32 tmp = 0; 3849 3850 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 3851 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) 3852 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); 3853 3854 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 3855 continue; 3856 3857 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 3858 default: 3859 drm_WARN(dev, 1, 3860 "unknown pipe linked to transcoder %s\n", 3861 transcoder_name(cpu_transcoder)); 3862 fallthrough; 3863 case TRANS_DDI_EDP_INPUT_A_ONOFF: 3864 case TRANS_DDI_EDP_INPUT_A_ON: 3865 trans_pipe = PIPE_A; 3866 break; 3867 case TRANS_DDI_EDP_INPUT_B_ONOFF: 3868 trans_pipe = PIPE_B; 3869 break; 3870 case TRANS_DDI_EDP_INPUT_C_ONOFF: 3871 trans_pipe = PIPE_C; 3872 break; 3873 case TRANS_DDI_EDP_INPUT_D_ONOFF: 3874 trans_pipe = PIPE_D; 3875 break; 3876 } 3877 3878 if (trans_pipe == crtc->pipe) 3879 enabled_transcoders |= BIT(cpu_transcoder); 3880 } 3881 3882 /* single pipe or bigjoiner master */ 3883 cpu_transcoder = (enum transcoder) crtc->pipe; 3884 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) 3885 enabled_transcoders |= BIT(cpu_transcoder); 3886 3887 /* bigjoiner slave -> consider the master pipe's transcoder as well */ 3888 enabled_bigjoiner_pipes(dev_priv, &master_pipes, &slave_pipes); 3889 if (slave_pipes & BIT(crtc->pipe)) { 3890 cpu_transcoder = (enum transcoder) 3891 get_bigjoiner_master_pipe(crtc->pipe, master_pipes, slave_pipes); 3892 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) 3893 enabled_transcoders |= BIT(cpu_transcoder); 3894 } 3895 3896 return enabled_transcoders; 3897 } 3898 3899 static bool has_edp_transcoders(u8 enabled_transcoders) 3900 { 3901 return enabled_transcoders & BIT(TRANSCODER_EDP); 3902 } 3903 3904 static bool has_dsi_transcoders(u8 enabled_transcoders) 3905 { 3906 return enabled_transcoders & (BIT(TRANSCODER_DSI_0) | 3907 BIT(TRANSCODER_DSI_1)); 3908 } 3909 3910 static bool has_pipe_transcoders(u8 enabled_transcoders) 3911 { 3912 return enabled_transcoders & ~(BIT(TRANSCODER_EDP) | 3913 BIT(TRANSCODER_DSI_0) | 3914 BIT(TRANSCODER_DSI_1)); 3915 } 3916 3917 static void assert_enabled_transcoders(struct drm_i915_private *i915, 3918 u8 enabled_transcoders) 3919 { 3920 /* Only one type of transcoder please */ 3921 drm_WARN_ON(&i915->drm, 3922 has_edp_transcoders(enabled_transcoders) + 3923 has_dsi_transcoders(enabled_transcoders) + 3924 has_pipe_transcoders(enabled_transcoders) > 1); 3925 3926 /* Only DSI transcoders can be ganged */ 3927 drm_WARN_ON(&i915->drm, 3928 !has_dsi_transcoders(enabled_transcoders) && 3929 !is_power_of_2(enabled_transcoders)); 3930 } 3931 3932 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 3933 struct intel_crtc_state *pipe_config, 3934 struct intel_display_power_domain_set *power_domain_set) 3935 { 3936 struct drm_device *dev = crtc->base.dev; 3937 struct drm_i915_private *dev_priv = to_i915(dev); 3938 unsigned long enabled_transcoders; 3939 u32 tmp; 3940 3941 enabled_transcoders = hsw_enabled_transcoders(crtc); 3942 if (!enabled_transcoders) 3943 return false; 3944 3945 assert_enabled_transcoders(dev_priv, enabled_transcoders); 3946 3947 /* 3948 * With the exception of DSI we should only ever have 3949 * a single enabled transcoder. With DSI let's just 3950 * pick the first one. 3951 */ 3952 pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1; 3953 3954 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, 3955 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 3956 return false; 3957 3958 if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) { 3959 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 3960 3961 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF) 3962 pipe_config->pch_pfit.force_thru = true; 3963 } 3964 3965 tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder)); 3966 3967 return tmp & PIPECONF_ENABLE; 3968 } 3969 3970 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 3971 struct intel_crtc_state *pipe_config, 3972 struct intel_display_power_domain_set *power_domain_set) 3973 { 3974 struct drm_device *dev = crtc->base.dev; 3975 struct drm_i915_private *dev_priv = to_i915(dev); 3976 enum transcoder cpu_transcoder; 3977 enum port port; 3978 u32 tmp; 3979 3980 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 3981 if (port == PORT_A) 3982 cpu_transcoder = TRANSCODER_DSI_A; 3983 else 3984 cpu_transcoder = TRANSCODER_DSI_C; 3985 3986 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, 3987 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) 3988 continue; 3989 3990 /* 3991 * The PLL needs to be enabled with a valid divider 3992 * configuration, otherwise accessing DSI registers will hang 3993 * the machine. See BSpec North Display Engine 3994 * registers/MIPI[BXT]. We can break out here early, since we 3995 * need the same DSI PLL to be enabled for both DSI ports. 3996 */ 3997 if (!bxt_dsi_pll_is_enabled(dev_priv)) 3998 break; 3999 4000 /* XXX: this works for video mode only */ 4001 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)); 4002 if (!(tmp & DPI_ENABLE)) 4003 continue; 4004 4005 tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); 4006 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 4007 continue; 4008 4009 pipe_config->cpu_transcoder = cpu_transcoder; 4010 break; 4011 } 4012 4013 return transcoder_is_dsi(pipe_config->cpu_transcoder); 4014 } 4015 4016 static void intel_bigjoiner_get_config(struct intel_crtc_state *crtc_state) 4017 { 4018 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4019 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4020 u8 master_pipes, slave_pipes; 4021 enum pipe pipe = crtc->pipe; 4022 4023 enabled_bigjoiner_pipes(i915, &master_pipes, &slave_pipes); 4024 4025 if (((master_pipes | slave_pipes) & BIT(pipe)) == 0) 4026 return; 4027 4028 crtc_state->bigjoiner_pipes = 4029 BIT(get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes)) | 4030 get_bigjoiner_slave_pipes(pipe, master_pipes, slave_pipes); 4031 } 4032 4033 static bool hsw_get_pipe_config(struct intel_crtc *crtc, 4034 struct intel_crtc_state *pipe_config) 4035 { 4036 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4037 struct intel_display_power_domain_set power_domain_set = { }; 4038 bool active; 4039 u32 tmp; 4040 4041 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set, 4042 POWER_DOMAIN_PIPE(crtc->pipe))) 4043 return false; 4044 4045 pipe_config->shared_dpll = NULL; 4046 4047 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set); 4048 4049 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 4050 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) { 4051 drm_WARN_ON(&dev_priv->drm, active); 4052 active = true; 4053 } 4054 4055 if (!active) 4056 goto out; 4057 4058 intel_dsc_get_config(pipe_config); 4059 intel_bigjoiner_get_config(pipe_config); 4060 4061 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 4062 DISPLAY_VER(dev_priv) >= 11) 4063 intel_get_transcoder_timings(crtc, pipe_config); 4064 4065 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder)) 4066 intel_vrr_get_config(crtc, pipe_config); 4067 4068 intel_get_pipe_src_size(crtc, pipe_config); 4069 4070 if (IS_HASWELL(dev_priv)) { 4071 u32 tmp = intel_de_read(dev_priv, 4072 PIPECONF(pipe_config->cpu_transcoder)); 4073 4074 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW) 4075 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 4076 else 4077 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 4078 } else { 4079 pipe_config->output_format = 4080 bdw_get_pipemisc_output_format(crtc); 4081 } 4082 4083 pipe_config->gamma_mode = intel_de_read(dev_priv, 4084 GAMMA_MODE(crtc->pipe)); 4085 4086 pipe_config->csc_mode = intel_de_read(dev_priv, 4087 PIPE_CSC_MODE(crtc->pipe)); 4088 4089 if (DISPLAY_VER(dev_priv) >= 9) { 4090 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe)); 4091 4092 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) 4093 pipe_config->gamma_enable = true; 4094 4095 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE) 4096 pipe_config->csc_enable = true; 4097 } else { 4098 i9xx_get_pipe_color_config(pipe_config); 4099 } 4100 4101 intel_color_get_config(pipe_config); 4102 4103 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe)); 4104 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp); 4105 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 4106 pipe_config->ips_linetime = 4107 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp); 4108 4109 if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set, 4110 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) { 4111 if (DISPLAY_VER(dev_priv) >= 9) 4112 skl_get_pfit_config(pipe_config); 4113 else 4114 ilk_get_pfit_config(pipe_config); 4115 } 4116 4117 hsw_ips_get_config(pipe_config); 4118 4119 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 4120 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 4121 pipe_config->pixel_multiplier = 4122 intel_de_read(dev_priv, 4123 PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 4124 } else { 4125 pipe_config->pixel_multiplier = 1; 4126 } 4127 4128 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { 4129 tmp = intel_de_read(dev_priv, CHICKEN_TRANS(pipe_config->cpu_transcoder)); 4130 4131 pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1; 4132 } else { 4133 /* no idea if this is correct */ 4134 pipe_config->framestart_delay = 1; 4135 } 4136 4137 out: 4138 intel_display_power_put_all_in_set(dev_priv, &power_domain_set); 4139 4140 return active; 4141 } 4142 4143 bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state) 4144 { 4145 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4146 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4147 4148 if (!i915->display->get_pipe_config(crtc, crtc_state)) 4149 return false; 4150 4151 crtc_state->hw.active = true; 4152 4153 intel_crtc_readout_derived_state(crtc_state); 4154 4155 return true; 4156 } 4157 4158 /* VESA 640x480x72Hz mode to set on the pipe */ 4159 static const struct drm_display_mode load_detect_mode = { 4160 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 4161 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 4162 }; 4163 4164 static int intel_modeset_disable_planes(struct drm_atomic_state *state, 4165 struct drm_crtc *crtc) 4166 { 4167 struct drm_plane *plane; 4168 struct drm_plane_state *plane_state; 4169 int ret, i; 4170 4171 ret = drm_atomic_add_affected_planes(state, crtc); 4172 if (ret) 4173 return ret; 4174 4175 for_each_new_plane_in_state(state, plane, plane_state, i) { 4176 if (plane_state->crtc != crtc) 4177 continue; 4178 4179 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); 4180 if (ret) 4181 return ret; 4182 4183 drm_atomic_set_fb_for_plane(plane_state, NULL); 4184 } 4185 4186 return 0; 4187 } 4188 4189 int intel_get_load_detect_pipe(struct drm_connector *connector, 4190 struct intel_load_detect_pipe *old, 4191 struct drm_modeset_acquire_ctx *ctx) 4192 { 4193 struct intel_encoder *encoder = 4194 intel_attached_encoder(to_intel_connector(connector)); 4195 struct intel_crtc *possible_crtc; 4196 struct intel_crtc *crtc = NULL; 4197 struct drm_device *dev = encoder->base.dev; 4198 struct drm_i915_private *dev_priv = to_i915(dev); 4199 struct drm_mode_config *config = &dev->mode_config; 4200 struct drm_atomic_state *state = NULL, *restore_state = NULL; 4201 struct drm_connector_state *connector_state; 4202 struct intel_crtc_state *crtc_state; 4203 int ret; 4204 4205 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 4206 connector->base.id, connector->name, 4207 encoder->base.base.id, encoder->base.name); 4208 4209 old->restore_state = NULL; 4210 4211 drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex)); 4212 4213 /* 4214 * Algorithm gets a little messy: 4215 * 4216 * - if the connector already has an assigned crtc, use it (but make 4217 * sure it's on first) 4218 * 4219 * - try to find the first unused crtc that can drive this connector, 4220 * and use that if we find one 4221 */ 4222 4223 /* See if we already have a CRTC for this connector */ 4224 if (connector->state->crtc) { 4225 crtc = to_intel_crtc(connector->state->crtc); 4226 4227 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 4228 if (ret) 4229 goto fail; 4230 4231 /* Make sure the crtc and connector are running */ 4232 goto found; 4233 } 4234 4235 /* Find an unused one (if possible) */ 4236 for_each_intel_crtc(dev, possible_crtc) { 4237 if (!(encoder->base.possible_crtcs & 4238 drm_crtc_mask(&possible_crtc->base))) 4239 continue; 4240 4241 ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx); 4242 if (ret) 4243 goto fail; 4244 4245 if (possible_crtc->base.state->enable) { 4246 drm_modeset_unlock(&possible_crtc->base.mutex); 4247 continue; 4248 } 4249 4250 crtc = possible_crtc; 4251 break; 4252 } 4253 4254 /* 4255 * If we didn't find an unused CRTC, don't use any. 4256 */ 4257 if (!crtc) { 4258 drm_dbg_kms(&dev_priv->drm, 4259 "no pipe available for load-detect\n"); 4260 ret = -ENODEV; 4261 goto fail; 4262 } 4263 4264 found: 4265 state = drm_atomic_state_alloc(dev); 4266 restore_state = drm_atomic_state_alloc(dev); 4267 if (!state || !restore_state) { 4268 ret = -ENOMEM; 4269 goto fail; 4270 } 4271 4272 state->acquire_ctx = ctx; 4273 restore_state->acquire_ctx = ctx; 4274 4275 connector_state = drm_atomic_get_connector_state(state, connector); 4276 if (IS_ERR(connector_state)) { 4277 ret = PTR_ERR(connector_state); 4278 goto fail; 4279 } 4280 4281 ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base); 4282 if (ret) 4283 goto fail; 4284 4285 crtc_state = intel_atomic_get_crtc_state(state, crtc); 4286 if (IS_ERR(crtc_state)) { 4287 ret = PTR_ERR(crtc_state); 4288 goto fail; 4289 } 4290 4291 crtc_state->uapi.active = true; 4292 4293 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi, 4294 &load_detect_mode); 4295 if (ret) 4296 goto fail; 4297 4298 ret = intel_modeset_disable_planes(state, &crtc->base); 4299 if (ret) 4300 goto fail; 4301 4302 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); 4303 if (!ret) 4304 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base)); 4305 if (!ret) 4306 ret = drm_atomic_add_affected_planes(restore_state, &crtc->base); 4307 if (ret) { 4308 drm_dbg_kms(&dev_priv->drm, 4309 "Failed to create a copy of old state to restore: %i\n", 4310 ret); 4311 goto fail; 4312 } 4313 4314 ret = drm_atomic_commit(state); 4315 if (ret) { 4316 drm_dbg_kms(&dev_priv->drm, 4317 "failed to set mode on load-detect pipe\n"); 4318 goto fail; 4319 } 4320 4321 old->restore_state = restore_state; 4322 drm_atomic_state_put(state); 4323 4324 /* let the connector get through one full cycle before testing */ 4325 intel_crtc_wait_for_next_vblank(crtc); 4326 4327 return true; 4328 4329 fail: 4330 if (state) { 4331 drm_atomic_state_put(state); 4332 state = NULL; 4333 } 4334 if (restore_state) { 4335 drm_atomic_state_put(restore_state); 4336 restore_state = NULL; 4337 } 4338 4339 if (ret == -EDEADLK) 4340 return ret; 4341 4342 return false; 4343 } 4344 4345 void intel_release_load_detect_pipe(struct drm_connector *connector, 4346 struct intel_load_detect_pipe *old, 4347 struct drm_modeset_acquire_ctx *ctx) 4348 { 4349 struct intel_encoder *intel_encoder = 4350 intel_attached_encoder(to_intel_connector(connector)); 4351 struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev); 4352 struct drm_encoder *encoder = &intel_encoder->base; 4353 struct drm_atomic_state *state = old->restore_state; 4354 int ret; 4355 4356 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 4357 connector->base.id, connector->name, 4358 encoder->base.id, encoder->name); 4359 4360 if (!state) 4361 return; 4362 4363 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 4364 if (ret) 4365 drm_dbg_kms(&i915->drm, 4366 "Couldn't release load detect pipe: %i\n", ret); 4367 drm_atomic_state_put(state); 4368 } 4369 4370 static int i9xx_pll_refclk(struct drm_device *dev, 4371 const struct intel_crtc_state *pipe_config) 4372 { 4373 struct drm_i915_private *dev_priv = to_i915(dev); 4374 u32 dpll = pipe_config->dpll_hw_state.dpll; 4375 4376 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 4377 return dev_priv->vbt.lvds_ssc_freq; 4378 else if (HAS_PCH_SPLIT(dev_priv)) 4379 return 120000; 4380 else if (DISPLAY_VER(dev_priv) != 2) 4381 return 96000; 4382 else 4383 return 48000; 4384 } 4385 4386 /* Returns the clock of the currently programmed mode of the given pipe. */ 4387 void i9xx_crtc_clock_get(struct intel_crtc *crtc, 4388 struct intel_crtc_state *pipe_config) 4389 { 4390 struct drm_device *dev = crtc->base.dev; 4391 struct drm_i915_private *dev_priv = to_i915(dev); 4392 u32 dpll = pipe_config->dpll_hw_state.dpll; 4393 u32 fp; 4394 struct dpll clock; 4395 int port_clock; 4396 int refclk = i9xx_pll_refclk(dev, pipe_config); 4397 4398 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 4399 fp = pipe_config->dpll_hw_state.fp0; 4400 else 4401 fp = pipe_config->dpll_hw_state.fp1; 4402 4403 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 4404 if (IS_PINEVIEW(dev_priv)) { 4405 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 4406 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 4407 } else { 4408 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 4409 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 4410 } 4411 4412 if (DISPLAY_VER(dev_priv) != 2) { 4413 if (IS_PINEVIEW(dev_priv)) 4414 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 4415 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 4416 else 4417 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 4418 DPLL_FPA01_P1_POST_DIV_SHIFT); 4419 4420 switch (dpll & DPLL_MODE_MASK) { 4421 case DPLLB_MODE_DAC_SERIAL: 4422 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 4423 5 : 10; 4424 break; 4425 case DPLLB_MODE_LVDS: 4426 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 4427 7 : 14; 4428 break; 4429 default: 4430 drm_dbg_kms(&dev_priv->drm, 4431 "Unknown DPLL mode %08x in programmed " 4432 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 4433 return; 4434 } 4435 4436 if (IS_PINEVIEW(dev_priv)) 4437 port_clock = pnv_calc_dpll_params(refclk, &clock); 4438 else 4439 port_clock = i9xx_calc_dpll_params(refclk, &clock); 4440 } else { 4441 enum pipe lvds_pipe; 4442 4443 if (IS_I85X(dev_priv) && 4444 intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) && 4445 lvds_pipe == crtc->pipe) { 4446 u32 lvds = intel_de_read(dev_priv, LVDS); 4447 4448 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 4449 DPLL_FPA01_P1_POST_DIV_SHIFT); 4450 4451 if (lvds & LVDS_CLKB_POWER_UP) 4452 clock.p2 = 7; 4453 else 4454 clock.p2 = 14; 4455 } else { 4456 if (dpll & PLL_P1_DIVIDE_BY_TWO) 4457 clock.p1 = 2; 4458 else { 4459 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 4460 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 4461 } 4462 if (dpll & PLL_P2_DIVIDE_BY_4) 4463 clock.p2 = 4; 4464 else 4465 clock.p2 = 2; 4466 } 4467 4468 port_clock = i9xx_calc_dpll_params(refclk, &clock); 4469 } 4470 4471 /* 4472 * This value includes pixel_multiplier. We will use 4473 * port_clock to compute adjusted_mode.crtc_clock in the 4474 * encoder's get_config() function. 4475 */ 4476 pipe_config->port_clock = port_clock; 4477 } 4478 4479 int intel_dotclock_calculate(int link_freq, 4480 const struct intel_link_m_n *m_n) 4481 { 4482 /* 4483 * The calculation for the data clock is: 4484 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 4485 * But we want to avoid losing precison if possible, so: 4486 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 4487 * 4488 * and the link clock is simpler: 4489 * link_clock = (m * link_clock) / n 4490 */ 4491 4492 if (!m_n->link_n) 4493 return 0; 4494 4495 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); 4496 } 4497 4498 /* Returns the currently programmed mode of the given encoder. */ 4499 struct drm_display_mode * 4500 intel_encoder_current_mode(struct intel_encoder *encoder) 4501 { 4502 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4503 struct intel_crtc_state *crtc_state; 4504 struct drm_display_mode *mode; 4505 struct intel_crtc *crtc; 4506 enum pipe pipe; 4507 4508 if (!encoder->get_hw_state(encoder, &pipe)) 4509 return NULL; 4510 4511 crtc = intel_crtc_for_pipe(dev_priv, pipe); 4512 4513 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 4514 if (!mode) 4515 return NULL; 4516 4517 crtc_state = intel_crtc_state_alloc(crtc); 4518 if (!crtc_state) { 4519 kfree(mode); 4520 return NULL; 4521 } 4522 4523 if (!intel_crtc_get_pipe_config(crtc_state)) { 4524 kfree(crtc_state); 4525 kfree(mode); 4526 return NULL; 4527 } 4528 4529 intel_encoder_get_config(encoder, crtc_state); 4530 4531 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode); 4532 4533 kfree(crtc_state); 4534 4535 return mode; 4536 } 4537 4538 static bool encoders_cloneable(const struct intel_encoder *a, 4539 const struct intel_encoder *b) 4540 { 4541 /* masks could be asymmetric, so check both ways */ 4542 return a == b || (a->cloneable & (1 << b->type) && 4543 b->cloneable & (1 << a->type)); 4544 } 4545 4546 static bool check_single_encoder_cloning(struct intel_atomic_state *state, 4547 struct intel_crtc *crtc, 4548 struct intel_encoder *encoder) 4549 { 4550 struct intel_encoder *source_encoder; 4551 struct drm_connector *connector; 4552 struct drm_connector_state *connector_state; 4553 int i; 4554 4555 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4556 if (connector_state->crtc != &crtc->base) 4557 continue; 4558 4559 source_encoder = 4560 to_intel_encoder(connector_state->best_encoder); 4561 if (!encoders_cloneable(encoder, source_encoder)) 4562 return false; 4563 } 4564 4565 return true; 4566 } 4567 4568 static int icl_add_linked_planes(struct intel_atomic_state *state) 4569 { 4570 struct intel_plane *plane, *linked; 4571 struct intel_plane_state *plane_state, *linked_plane_state; 4572 int i; 4573 4574 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4575 linked = plane_state->planar_linked_plane; 4576 4577 if (!linked) 4578 continue; 4579 4580 linked_plane_state = intel_atomic_get_plane_state(state, linked); 4581 if (IS_ERR(linked_plane_state)) 4582 return PTR_ERR(linked_plane_state); 4583 4584 drm_WARN_ON(state->base.dev, 4585 linked_plane_state->planar_linked_plane != plane); 4586 drm_WARN_ON(state->base.dev, 4587 linked_plane_state->planar_slave == plane_state->planar_slave); 4588 } 4589 4590 return 0; 4591 } 4592 4593 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) 4594 { 4595 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4596 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4597 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); 4598 struct intel_plane *plane, *linked; 4599 struct intel_plane_state *plane_state; 4600 int i; 4601 4602 if (DISPLAY_VER(dev_priv) < 11) 4603 return 0; 4604 4605 /* 4606 * Destroy all old plane links and make the slave plane invisible 4607 * in the crtc_state->active_planes mask. 4608 */ 4609 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4610 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane) 4611 continue; 4612 4613 plane_state->planar_linked_plane = NULL; 4614 if (plane_state->planar_slave && !plane_state->uapi.visible) { 4615 crtc_state->enabled_planes &= ~BIT(plane->id); 4616 crtc_state->active_planes &= ~BIT(plane->id); 4617 crtc_state->update_planes |= BIT(plane->id); 4618 crtc_state->data_rate[plane->id] = 0; 4619 crtc_state->rel_data_rate[plane->id] = 0; 4620 } 4621 4622 plane_state->planar_slave = false; 4623 } 4624 4625 if (!crtc_state->nv12_planes) 4626 return 0; 4627 4628 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4629 struct intel_plane_state *linked_state = NULL; 4630 4631 if (plane->pipe != crtc->pipe || 4632 !(crtc_state->nv12_planes & BIT(plane->id))) 4633 continue; 4634 4635 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { 4636 if (!icl_is_nv12_y_plane(dev_priv, linked->id)) 4637 continue; 4638 4639 if (crtc_state->active_planes & BIT(linked->id)) 4640 continue; 4641 4642 linked_state = intel_atomic_get_plane_state(state, linked); 4643 if (IS_ERR(linked_state)) 4644 return PTR_ERR(linked_state); 4645 4646 break; 4647 } 4648 4649 if (!linked_state) { 4650 drm_dbg_kms(&dev_priv->drm, 4651 "Need %d free Y planes for planar YUV\n", 4652 hweight8(crtc_state->nv12_planes)); 4653 4654 return -EINVAL; 4655 } 4656 4657 plane_state->planar_linked_plane = linked; 4658 4659 linked_state->planar_slave = true; 4660 linked_state->planar_linked_plane = plane; 4661 crtc_state->enabled_planes |= BIT(linked->id); 4662 crtc_state->active_planes |= BIT(linked->id); 4663 crtc_state->update_planes |= BIT(linked->id); 4664 crtc_state->data_rate[linked->id] = 4665 crtc_state->data_rate_y[plane->id]; 4666 crtc_state->rel_data_rate[linked->id] = 4667 crtc_state->rel_data_rate_y[plane->id]; 4668 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n", 4669 linked->base.name, plane->base.name); 4670 4671 /* Copy parameters to slave plane */ 4672 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; 4673 linked_state->color_ctl = plane_state->color_ctl; 4674 linked_state->view = plane_state->view; 4675 linked_state->decrypt = plane_state->decrypt; 4676 4677 intel_plane_copy_hw_state(linked_state, plane_state); 4678 linked_state->uapi.src = plane_state->uapi.src; 4679 linked_state->uapi.dst = plane_state->uapi.dst; 4680 4681 if (icl_is_hdr_plane(dev_priv, plane->id)) { 4682 if (linked->id == PLANE_SPRITE5) 4683 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL; 4684 else if (linked->id == PLANE_SPRITE4) 4685 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL; 4686 else if (linked->id == PLANE_SPRITE3) 4687 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL; 4688 else if (linked->id == PLANE_SPRITE2) 4689 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL; 4690 else 4691 MISSING_CASE(linked->id); 4692 } 4693 } 4694 4695 return 0; 4696 } 4697 4698 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) 4699 { 4700 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 4701 struct intel_atomic_state *state = 4702 to_intel_atomic_state(new_crtc_state->uapi.state); 4703 const struct intel_crtc_state *old_crtc_state = 4704 intel_atomic_get_old_crtc_state(state, crtc); 4705 4706 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; 4707 } 4708 4709 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state) 4710 { 4711 const struct drm_display_mode *pipe_mode = 4712 &crtc_state->hw.pipe_mode; 4713 int linetime_wm; 4714 4715 if (!crtc_state->hw.enable) 4716 return 0; 4717 4718 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4719 pipe_mode->crtc_clock); 4720 4721 return min(linetime_wm, 0x1ff); 4722 } 4723 4724 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, 4725 const struct intel_cdclk_state *cdclk_state) 4726 { 4727 const struct drm_display_mode *pipe_mode = 4728 &crtc_state->hw.pipe_mode; 4729 int linetime_wm; 4730 4731 if (!crtc_state->hw.enable) 4732 return 0; 4733 4734 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4735 cdclk_state->logical.cdclk); 4736 4737 return min(linetime_wm, 0x1ff); 4738 } 4739 4740 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) 4741 { 4742 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4743 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4744 const struct drm_display_mode *pipe_mode = 4745 &crtc_state->hw.pipe_mode; 4746 int linetime_wm; 4747 4748 if (!crtc_state->hw.enable) 4749 return 0; 4750 4751 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8, 4752 crtc_state->pixel_rate); 4753 4754 /* Display WA #1135: BXT:ALL GLK:ALL */ 4755 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 4756 dev_priv->ipc_enabled) 4757 linetime_wm /= 2; 4758 4759 return min(linetime_wm, 0x1ff); 4760 } 4761 4762 static int hsw_compute_linetime_wm(struct intel_atomic_state *state, 4763 struct intel_crtc *crtc) 4764 { 4765 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4766 struct intel_crtc_state *crtc_state = 4767 intel_atomic_get_new_crtc_state(state, crtc); 4768 const struct intel_cdclk_state *cdclk_state; 4769 4770 if (DISPLAY_VER(dev_priv) >= 9) 4771 crtc_state->linetime = skl_linetime_wm(crtc_state); 4772 else 4773 crtc_state->linetime = hsw_linetime_wm(crtc_state); 4774 4775 if (!hsw_crtc_supports_ips(crtc)) 4776 return 0; 4777 4778 cdclk_state = intel_atomic_get_cdclk_state(state); 4779 if (IS_ERR(cdclk_state)) 4780 return PTR_ERR(cdclk_state); 4781 4782 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state, 4783 cdclk_state); 4784 4785 return 0; 4786 } 4787 4788 static int intel_crtc_atomic_check(struct intel_atomic_state *state, 4789 struct intel_crtc *crtc) 4790 { 4791 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4792 struct intel_crtc_state *crtc_state = 4793 intel_atomic_get_new_crtc_state(state, crtc); 4794 bool mode_changed = intel_crtc_needs_modeset(crtc_state); 4795 int ret; 4796 4797 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) && 4798 mode_changed && !crtc_state->hw.active) 4799 crtc_state->update_wm_post = true; 4800 4801 if (mode_changed) { 4802 ret = intel_dpll_crtc_compute_clock(state, crtc); 4803 if (ret) 4804 return ret; 4805 4806 ret = intel_dpll_crtc_get_shared_dpll(state, crtc); 4807 if (ret) 4808 return ret; 4809 } 4810 4811 /* 4812 * May need to update pipe gamma enable bits 4813 * when C8 planes are getting enabled/disabled. 4814 */ 4815 if (c8_planes_changed(crtc_state)) 4816 crtc_state->uapi.color_mgmt_changed = true; 4817 4818 if (mode_changed || crtc_state->update_pipe || 4819 crtc_state->uapi.color_mgmt_changed) { 4820 ret = intel_color_check(crtc_state); 4821 if (ret) 4822 return ret; 4823 } 4824 4825 ret = intel_compute_pipe_wm(state, crtc); 4826 if (ret) { 4827 drm_dbg_kms(&dev_priv->drm, 4828 "Target pipe watermarks are invalid\n"); 4829 return ret; 4830 } 4831 4832 /* 4833 * Calculate 'intermediate' watermarks that satisfy both the 4834 * old state and the new state. We can program these 4835 * immediately. 4836 */ 4837 ret = intel_compute_intermediate_wm(state, crtc); 4838 if (ret) { 4839 drm_dbg_kms(&dev_priv->drm, 4840 "No valid intermediate pipe watermarks are possible\n"); 4841 return ret; 4842 } 4843 4844 if (DISPLAY_VER(dev_priv) >= 9) { 4845 if (mode_changed || crtc_state->update_pipe) { 4846 ret = skl_update_scaler_crtc(crtc_state); 4847 if (ret) 4848 return ret; 4849 } 4850 4851 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state); 4852 if (ret) 4853 return ret; 4854 } 4855 4856 if (HAS_IPS(dev_priv)) { 4857 ret = hsw_ips_compute_config(state, crtc); 4858 if (ret) 4859 return ret; 4860 } 4861 4862 if (DISPLAY_VER(dev_priv) >= 9 || 4863 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 4864 ret = hsw_compute_linetime_wm(state, crtc); 4865 if (ret) 4866 return ret; 4867 4868 } 4869 4870 ret = intel_psr2_sel_fetch_update(state, crtc); 4871 if (ret) 4872 return ret; 4873 4874 return 0; 4875 } 4876 4877 static int 4878 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 4879 struct intel_crtc_state *crtc_state) 4880 { 4881 struct drm_connector *connector = conn_state->connector; 4882 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 4883 const struct drm_display_info *info = &connector->display_info; 4884 int bpp; 4885 4886 switch (conn_state->max_bpc) { 4887 case 6 ... 7: 4888 bpp = 6 * 3; 4889 break; 4890 case 8 ... 9: 4891 bpp = 8 * 3; 4892 break; 4893 case 10 ... 11: 4894 bpp = 10 * 3; 4895 break; 4896 case 12 ... 16: 4897 bpp = 12 * 3; 4898 break; 4899 default: 4900 MISSING_CASE(conn_state->max_bpc); 4901 return -EINVAL; 4902 } 4903 4904 if (bpp < crtc_state->pipe_bpp) { 4905 drm_dbg_kms(&i915->drm, 4906 "[CONNECTOR:%d:%s] Limiting display bpp to %d " 4907 "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n", 4908 connector->base.id, connector->name, 4909 bpp, 3 * info->bpc, 4910 3 * conn_state->max_requested_bpc, 4911 crtc_state->pipe_bpp); 4912 4913 crtc_state->pipe_bpp = bpp; 4914 } 4915 4916 return 0; 4917 } 4918 4919 static int 4920 compute_baseline_pipe_bpp(struct intel_atomic_state *state, 4921 struct intel_crtc *crtc) 4922 { 4923 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4924 struct intel_crtc_state *crtc_state = 4925 intel_atomic_get_new_crtc_state(state, crtc); 4926 struct drm_connector *connector; 4927 struct drm_connector_state *connector_state; 4928 int bpp, i; 4929 4930 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 4931 IS_CHERRYVIEW(dev_priv))) 4932 bpp = 10*3; 4933 else if (DISPLAY_VER(dev_priv) >= 5) 4934 bpp = 12*3; 4935 else 4936 bpp = 8*3; 4937 4938 crtc_state->pipe_bpp = bpp; 4939 4940 /* Clamp display bpp to connector max bpp */ 4941 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4942 int ret; 4943 4944 if (connector_state->crtc != &crtc->base) 4945 continue; 4946 4947 ret = compute_sink_pipe_bpp(connector_state, crtc_state); 4948 if (ret) 4949 return ret; 4950 } 4951 4952 return 0; 4953 } 4954 4955 static bool check_digital_port_conflicts(struct intel_atomic_state *state) 4956 { 4957 struct drm_device *dev = state->base.dev; 4958 struct drm_connector *connector; 4959 struct drm_connector_list_iter conn_iter; 4960 unsigned int used_ports = 0; 4961 unsigned int used_mst_ports = 0; 4962 bool ret = true; 4963 4964 /* 4965 * We're going to peek into connector->state, 4966 * hence connection_mutex must be held. 4967 */ 4968 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex); 4969 4970 /* 4971 * Walk the connector list instead of the encoder 4972 * list to detect the problem on ddi platforms 4973 * where there's just one encoder per digital port. 4974 */ 4975 drm_connector_list_iter_begin(dev, &conn_iter); 4976 drm_for_each_connector_iter(connector, &conn_iter) { 4977 struct drm_connector_state *connector_state; 4978 struct intel_encoder *encoder; 4979 4980 connector_state = 4981 drm_atomic_get_new_connector_state(&state->base, 4982 connector); 4983 if (!connector_state) 4984 connector_state = connector->state; 4985 4986 if (!connector_state->best_encoder) 4987 continue; 4988 4989 encoder = to_intel_encoder(connector_state->best_encoder); 4990 4991 drm_WARN_ON(dev, !connector_state->crtc); 4992 4993 switch (encoder->type) { 4994 case INTEL_OUTPUT_DDI: 4995 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev)))) 4996 break; 4997 fallthrough; 4998 case INTEL_OUTPUT_DP: 4999 case INTEL_OUTPUT_HDMI: 5000 case INTEL_OUTPUT_EDP: 5001 /* the same port mustn't appear more than once */ 5002 if (used_ports & BIT(encoder->port)) 5003 ret = false; 5004 5005 used_ports |= BIT(encoder->port); 5006 break; 5007 case INTEL_OUTPUT_DP_MST: 5008 used_mst_ports |= 5009 1 << encoder->port; 5010 break; 5011 default: 5012 break; 5013 } 5014 } 5015 drm_connector_list_iter_end(&conn_iter); 5016 5017 /* can't mix MST and SST/HDMI on the same port */ 5018 if (used_ports & used_mst_ports) 5019 return false; 5020 5021 return ret; 5022 } 5023 5024 static void 5025 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state, 5026 struct intel_crtc *crtc) 5027 { 5028 struct intel_crtc_state *crtc_state = 5029 intel_atomic_get_new_crtc_state(state, crtc); 5030 5031 WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state)); 5032 5033 drm_property_replace_blob(&crtc_state->hw.degamma_lut, 5034 crtc_state->uapi.degamma_lut); 5035 drm_property_replace_blob(&crtc_state->hw.gamma_lut, 5036 crtc_state->uapi.gamma_lut); 5037 drm_property_replace_blob(&crtc_state->hw.ctm, 5038 crtc_state->uapi.ctm); 5039 } 5040 5041 static void 5042 intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state, 5043 struct intel_crtc *crtc) 5044 { 5045 struct intel_crtc_state *crtc_state = 5046 intel_atomic_get_new_crtc_state(state, crtc); 5047 5048 WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state)); 5049 5050 crtc_state->hw.enable = crtc_state->uapi.enable; 5051 crtc_state->hw.active = crtc_state->uapi.active; 5052 drm_mode_copy(&crtc_state->hw.mode, 5053 &crtc_state->uapi.mode); 5054 drm_mode_copy(&crtc_state->hw.adjusted_mode, 5055 &crtc_state->uapi.adjusted_mode); 5056 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter; 5057 5058 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 5059 } 5060 5061 static void 5062 copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state, 5063 struct intel_crtc *slave_crtc) 5064 { 5065 struct intel_crtc_state *slave_crtc_state = 5066 intel_atomic_get_new_crtc_state(state, slave_crtc); 5067 struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state); 5068 const struct intel_crtc_state *master_crtc_state = 5069 intel_atomic_get_new_crtc_state(state, master_crtc); 5070 5071 drm_property_replace_blob(&slave_crtc_state->hw.degamma_lut, 5072 master_crtc_state->hw.degamma_lut); 5073 drm_property_replace_blob(&slave_crtc_state->hw.gamma_lut, 5074 master_crtc_state->hw.gamma_lut); 5075 drm_property_replace_blob(&slave_crtc_state->hw.ctm, 5076 master_crtc_state->hw.ctm); 5077 5078 slave_crtc_state->uapi.color_mgmt_changed = master_crtc_state->uapi.color_mgmt_changed; 5079 } 5080 5081 static int 5082 copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state, 5083 struct intel_crtc *slave_crtc) 5084 { 5085 struct intel_crtc_state *slave_crtc_state = 5086 intel_atomic_get_new_crtc_state(state, slave_crtc); 5087 struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state); 5088 const struct intel_crtc_state *master_crtc_state = 5089 intel_atomic_get_new_crtc_state(state, master_crtc); 5090 struct intel_crtc_state *saved_state; 5091 5092 WARN_ON(master_crtc_state->bigjoiner_pipes != 5093 slave_crtc_state->bigjoiner_pipes); 5094 5095 saved_state = kmemdup(master_crtc_state, sizeof(*saved_state), GFP_KERNEL); 5096 if (!saved_state) 5097 return -ENOMEM; 5098 5099 /* preserve some things from the slave's original crtc state */ 5100 saved_state->uapi = slave_crtc_state->uapi; 5101 saved_state->scaler_state = slave_crtc_state->scaler_state; 5102 saved_state->shared_dpll = slave_crtc_state->shared_dpll; 5103 saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state; 5104 saved_state->crc_enabled = slave_crtc_state->crc_enabled; 5105 5106 intel_crtc_free_hw_state(slave_crtc_state); 5107 memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state)); 5108 kfree(saved_state); 5109 5110 /* Re-init hw state */ 5111 memset(&slave_crtc_state->hw, 0, sizeof(slave_crtc_state->hw)); 5112 slave_crtc_state->hw.enable = master_crtc_state->hw.enable; 5113 slave_crtc_state->hw.active = master_crtc_state->hw.active; 5114 drm_mode_copy(&slave_crtc_state->hw.mode, 5115 &master_crtc_state->hw.mode); 5116 drm_mode_copy(&slave_crtc_state->hw.pipe_mode, 5117 &master_crtc_state->hw.pipe_mode); 5118 drm_mode_copy(&slave_crtc_state->hw.adjusted_mode, 5119 &master_crtc_state->hw.adjusted_mode); 5120 slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter; 5121 5122 copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc); 5123 5124 slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed; 5125 slave_crtc_state->uapi.connectors_changed = master_crtc_state->uapi.connectors_changed; 5126 slave_crtc_state->uapi.active_changed = master_crtc_state->uapi.active_changed; 5127 5128 WARN_ON(master_crtc_state->bigjoiner_pipes != 5129 slave_crtc_state->bigjoiner_pipes); 5130 5131 return 0; 5132 } 5133 5134 static int 5135 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state, 5136 struct intel_crtc *crtc) 5137 { 5138 struct intel_crtc_state *crtc_state = 5139 intel_atomic_get_new_crtc_state(state, crtc); 5140 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5141 struct intel_crtc_state *saved_state; 5142 5143 saved_state = intel_crtc_state_alloc(crtc); 5144 if (!saved_state) 5145 return -ENOMEM; 5146 5147 /* free the old crtc_state->hw members */ 5148 intel_crtc_free_hw_state(crtc_state); 5149 5150 /* FIXME: before the switch to atomic started, a new pipe_config was 5151 * kzalloc'd. Code that depends on any field being zero should be 5152 * fixed, so that the crtc_state can be safely duplicated. For now, 5153 * only fields that are know to not cause problems are preserved. */ 5154 5155 saved_state->uapi = crtc_state->uapi; 5156 saved_state->scaler_state = crtc_state->scaler_state; 5157 saved_state->shared_dpll = crtc_state->shared_dpll; 5158 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 5159 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 5160 sizeof(saved_state->icl_port_dplls)); 5161 saved_state->crc_enabled = crtc_state->crc_enabled; 5162 if (IS_G4X(dev_priv) || 5163 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5164 saved_state->wm = crtc_state->wm; 5165 5166 memcpy(crtc_state, saved_state, sizeof(*crtc_state)); 5167 kfree(saved_state); 5168 5169 intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc); 5170 5171 return 0; 5172 } 5173 5174 static int 5175 intel_modeset_pipe_config(struct intel_atomic_state *state, 5176 struct intel_crtc *crtc) 5177 { 5178 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 5179 struct intel_crtc_state *crtc_state = 5180 intel_atomic_get_new_crtc_state(state, crtc); 5181 struct drm_connector *connector; 5182 struct drm_connector_state *connector_state; 5183 int pipe_src_w, pipe_src_h; 5184 int base_bpp, ret, i; 5185 bool retry = true; 5186 5187 crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe; 5188 5189 crtc_state->framestart_delay = 1; 5190 5191 /* 5192 * Sanitize sync polarity flags based on requested ones. If neither 5193 * positive or negative polarity is requested, treat this as meaning 5194 * negative polarity. 5195 */ 5196 if (!(crtc_state->hw.adjusted_mode.flags & 5197 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 5198 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 5199 5200 if (!(crtc_state->hw.adjusted_mode.flags & 5201 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 5202 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 5203 5204 ret = compute_baseline_pipe_bpp(state, crtc); 5205 if (ret) 5206 return ret; 5207 5208 base_bpp = crtc_state->pipe_bpp; 5209 5210 /* 5211 * Determine the real pipe dimensions. Note that stereo modes can 5212 * increase the actual pipe size due to the frame doubling and 5213 * insertion of additional space for blanks between the frame. This 5214 * is stored in the crtc timings. We use the requested mode to do this 5215 * computation to clearly distinguish it from the adjusted mode, which 5216 * can be changed by the connectors in the below retry loop. 5217 */ 5218 drm_mode_get_hv_timing(&crtc_state->hw.mode, 5219 &pipe_src_w, &pipe_src_h); 5220 drm_rect_init(&crtc_state->pipe_src, 0, 0, 5221 pipe_src_w, pipe_src_h); 5222 5223 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 5224 struct intel_encoder *encoder = 5225 to_intel_encoder(connector_state->best_encoder); 5226 5227 if (connector_state->crtc != &crtc->base) 5228 continue; 5229 5230 if (!check_single_encoder_cloning(state, crtc, encoder)) { 5231 drm_dbg_kms(&i915->drm, 5232 "[ENCODER:%d:%s] rejecting invalid cloning configuration\n", 5233 encoder->base.base.id, encoder->base.name); 5234 return -EINVAL; 5235 } 5236 5237 /* 5238 * Determine output_types before calling the .compute_config() 5239 * hooks so that the hooks can use this information safely. 5240 */ 5241 if (encoder->compute_output_type) 5242 crtc_state->output_types |= 5243 BIT(encoder->compute_output_type(encoder, crtc_state, 5244 connector_state)); 5245 else 5246 crtc_state->output_types |= BIT(encoder->type); 5247 } 5248 5249 encoder_retry: 5250 /* Ensure the port clock defaults are reset when retrying. */ 5251 crtc_state->port_clock = 0; 5252 crtc_state->pixel_multiplier = 1; 5253 5254 /* Fill in default crtc timings, allow encoders to overwrite them. */ 5255 drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode, 5256 CRTC_STEREO_DOUBLE); 5257 5258 /* Pass our mode to the connectors and the CRTC to give them a chance to 5259 * adjust it according to limitations or connector properties, and also 5260 * a chance to reject the mode entirely. 5261 */ 5262 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 5263 struct intel_encoder *encoder = 5264 to_intel_encoder(connector_state->best_encoder); 5265 5266 if (connector_state->crtc != &crtc->base) 5267 continue; 5268 5269 ret = encoder->compute_config(encoder, crtc_state, 5270 connector_state); 5271 if (ret == -EDEADLK) 5272 return ret; 5273 if (ret < 0) { 5274 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n", 5275 encoder->base.base.id, encoder->base.name, ret); 5276 return ret; 5277 } 5278 } 5279 5280 /* Set default port clock if not overwritten by the encoder. Needs to be 5281 * done afterwards in case the encoder adjusts the mode. */ 5282 if (!crtc_state->port_clock) 5283 crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock 5284 * crtc_state->pixel_multiplier; 5285 5286 ret = intel_crtc_compute_config(state, crtc); 5287 if (ret == -EDEADLK) 5288 return ret; 5289 if (ret == -EAGAIN) { 5290 if (drm_WARN(&i915->drm, !retry, 5291 "[CRTC:%d:%s] loop in pipe configuration computation\n", 5292 crtc->base.base.id, crtc->base.name)) 5293 return -EINVAL; 5294 5295 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] bw constrained, retrying\n", 5296 crtc->base.base.id, crtc->base.name); 5297 retry = false; 5298 goto encoder_retry; 5299 } 5300 if (ret < 0) { 5301 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n", 5302 crtc->base.base.id, crtc->base.name, ret); 5303 return ret; 5304 } 5305 5306 /* Dithering seems to not pass-through bits correctly when it should, so 5307 * only enable it on 6bpc panels and when its not a compliance 5308 * test requesting 6bpc video pattern. 5309 */ 5310 crtc_state->dither = (crtc_state->pipe_bpp == 6*3) && 5311 !crtc_state->dither_force_disable; 5312 drm_dbg_kms(&i915->drm, 5313 "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 5314 crtc->base.base.id, crtc->base.name, 5315 base_bpp, crtc_state->pipe_bpp, crtc_state->dither); 5316 5317 return 0; 5318 } 5319 5320 static int 5321 intel_modeset_pipe_config_late(struct intel_atomic_state *state, 5322 struct intel_crtc *crtc) 5323 { 5324 struct intel_crtc_state *crtc_state = 5325 intel_atomic_get_new_crtc_state(state, crtc); 5326 struct drm_connector_state *conn_state; 5327 struct drm_connector *connector; 5328 int i; 5329 5330 intel_bigjoiner_adjust_pipe_src(crtc_state); 5331 5332 for_each_new_connector_in_state(&state->base, connector, 5333 conn_state, i) { 5334 struct intel_encoder *encoder = 5335 to_intel_encoder(conn_state->best_encoder); 5336 int ret; 5337 5338 if (conn_state->crtc != &crtc->base || 5339 !encoder->compute_config_late) 5340 continue; 5341 5342 ret = encoder->compute_config_late(encoder, crtc_state, 5343 conn_state); 5344 if (ret) 5345 return ret; 5346 } 5347 5348 return 0; 5349 } 5350 5351 bool intel_fuzzy_clock_check(int clock1, int clock2) 5352 { 5353 int diff; 5354 5355 if (clock1 == clock2) 5356 return true; 5357 5358 if (!clock1 || !clock2) 5359 return false; 5360 5361 diff = abs(clock1 - clock2); 5362 5363 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 5364 return true; 5365 5366 return false; 5367 } 5368 5369 static bool 5370 intel_compare_m_n(unsigned int m, unsigned int n, 5371 unsigned int m2, unsigned int n2, 5372 bool exact) 5373 { 5374 if (m == m2 && n == n2) 5375 return true; 5376 5377 if (exact || !m || !n || !m2 || !n2) 5378 return false; 5379 5380 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 5381 5382 if (n > n2) { 5383 while (n > n2) { 5384 m2 <<= 1; 5385 n2 <<= 1; 5386 } 5387 } else if (n < n2) { 5388 while (n < n2) { 5389 m <<= 1; 5390 n <<= 1; 5391 } 5392 } 5393 5394 if (n != n2) 5395 return false; 5396 5397 return intel_fuzzy_clock_check(m, m2); 5398 } 5399 5400 static bool 5401 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 5402 const struct intel_link_m_n *m2_n2, 5403 bool exact) 5404 { 5405 return m_n->tu == m2_n2->tu && 5406 intel_compare_m_n(m_n->data_m, m_n->data_n, 5407 m2_n2->data_m, m2_n2->data_n, exact) && 5408 intel_compare_m_n(m_n->link_m, m_n->link_n, 5409 m2_n2->link_m, m2_n2->link_n, exact); 5410 } 5411 5412 static bool 5413 intel_compare_infoframe(const union hdmi_infoframe *a, 5414 const union hdmi_infoframe *b) 5415 { 5416 return memcmp(a, b, sizeof(*a)) == 0; 5417 } 5418 5419 static bool 5420 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a, 5421 const struct drm_dp_vsc_sdp *b) 5422 { 5423 return memcmp(a, b, sizeof(*a)) == 0; 5424 } 5425 5426 static void 5427 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, 5428 bool fastset, const char *name, 5429 const union hdmi_infoframe *a, 5430 const union hdmi_infoframe *b) 5431 { 5432 if (fastset) { 5433 if (!drm_debug_enabled(DRM_UT_KMS)) 5434 return; 5435 5436 drm_dbg_kms(&dev_priv->drm, 5437 "fastset mismatch in %s infoframe\n", name); 5438 drm_dbg_kms(&dev_priv->drm, "expected:\n"); 5439 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); 5440 drm_dbg_kms(&dev_priv->drm, "found:\n"); 5441 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); 5442 } else { 5443 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name); 5444 drm_err(&dev_priv->drm, "expected:\n"); 5445 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); 5446 drm_err(&dev_priv->drm, "found:\n"); 5447 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); 5448 } 5449 } 5450 5451 static void 5452 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv, 5453 bool fastset, const char *name, 5454 const struct drm_dp_vsc_sdp *a, 5455 const struct drm_dp_vsc_sdp *b) 5456 { 5457 if (fastset) { 5458 if (!drm_debug_enabled(DRM_UT_KMS)) 5459 return; 5460 5461 drm_dbg_kms(&dev_priv->drm, 5462 "fastset mismatch in %s dp sdp\n", name); 5463 drm_dbg_kms(&dev_priv->drm, "expected:\n"); 5464 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a); 5465 drm_dbg_kms(&dev_priv->drm, "found:\n"); 5466 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b); 5467 } else { 5468 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name); 5469 drm_err(&dev_priv->drm, "expected:\n"); 5470 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a); 5471 drm_err(&dev_priv->drm, "found:\n"); 5472 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b); 5473 } 5474 } 5475 5476 static void __printf(4, 5) 5477 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, 5478 const char *name, const char *format, ...) 5479 { 5480 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 5481 struct va_format vaf; 5482 va_list args; 5483 5484 va_start(args, format); 5485 vaf.fmt = format; 5486 vaf.va = &args; 5487 5488 if (fastset) 5489 drm_dbg_kms(&i915->drm, 5490 "[CRTC:%d:%s] fastset mismatch in %s %pV\n", 5491 crtc->base.base.id, crtc->base.name, name, &vaf); 5492 else 5493 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n", 5494 crtc->base.base.id, crtc->base.name, name, &vaf); 5495 5496 va_end(args); 5497 } 5498 5499 static bool fastboot_enabled(struct drm_i915_private *dev_priv) 5500 { 5501 if (dev_priv->params.fastboot != -1) 5502 return dev_priv->params.fastboot; 5503 5504 /* Enable fastboot by default on Skylake and newer */ 5505 if (DISPLAY_VER(dev_priv) >= 9) 5506 return true; 5507 5508 /* Enable fastboot by default on VLV and CHV */ 5509 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5510 return true; 5511 5512 /* Disabled by default on all others */ 5513 return false; 5514 } 5515 5516 bool 5517 intel_pipe_config_compare(const struct intel_crtc_state *current_config, 5518 const struct intel_crtc_state *pipe_config, 5519 bool fastset) 5520 { 5521 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev); 5522 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 5523 bool ret = true; 5524 u32 bp_gamma = 0; 5525 bool fixup_inherited = fastset && 5526 current_config->inherited && !pipe_config->inherited; 5527 5528 if (fixup_inherited && !fastboot_enabled(dev_priv)) { 5529 drm_dbg_kms(&dev_priv->drm, 5530 "initial modeset and fastboot not set\n"); 5531 ret = false; 5532 } 5533 5534 #define PIPE_CONF_CHECK_X(name) do { \ 5535 if (current_config->name != pipe_config->name) { \ 5536 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5537 "(expected 0x%08x, found 0x%08x)", \ 5538 current_config->name, \ 5539 pipe_config->name); \ 5540 ret = false; \ 5541 } \ 5542 } while (0) 5543 5544 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \ 5545 if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \ 5546 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5547 "(expected 0x%08x, found 0x%08x)", \ 5548 current_config->name & (mask), \ 5549 pipe_config->name & (mask)); \ 5550 ret = false; \ 5551 } \ 5552 } while (0) 5553 5554 #define PIPE_CONF_CHECK_I(name) do { \ 5555 if (current_config->name != pipe_config->name) { \ 5556 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5557 "(expected %i, found %i)", \ 5558 current_config->name, \ 5559 pipe_config->name); \ 5560 ret = false; \ 5561 } \ 5562 } while (0) 5563 5564 #define PIPE_CONF_CHECK_BOOL(name) do { \ 5565 if (current_config->name != pipe_config->name) { \ 5566 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5567 "(expected %s, found %s)", \ 5568 str_yes_no(current_config->name), \ 5569 str_yes_no(pipe_config->name)); \ 5570 ret = false; \ 5571 } \ 5572 } while (0) 5573 5574 /* 5575 * Checks state where we only read out the enabling, but not the entire 5576 * state itself (like full infoframes or ELD for audio). These states 5577 * require a full modeset on bootup to fix up. 5578 */ 5579 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \ 5580 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \ 5581 PIPE_CONF_CHECK_BOOL(name); \ 5582 } else { \ 5583 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5584 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \ 5585 str_yes_no(current_config->name), \ 5586 str_yes_no(pipe_config->name)); \ 5587 ret = false; \ 5588 } \ 5589 } while (0) 5590 5591 #define PIPE_CONF_CHECK_P(name) do { \ 5592 if (current_config->name != pipe_config->name) { \ 5593 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5594 "(expected %p, found %p)", \ 5595 current_config->name, \ 5596 pipe_config->name); \ 5597 ret = false; \ 5598 } \ 5599 } while (0) 5600 5601 #define PIPE_CONF_CHECK_M_N(name) do { \ 5602 if (!intel_compare_link_m_n(¤t_config->name, \ 5603 &pipe_config->name,\ 5604 !fastset)) { \ 5605 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5606 "(expected tu %i data %i/%i link %i/%i, " \ 5607 "found tu %i, data %i/%i link %i/%i)", \ 5608 current_config->name.tu, \ 5609 current_config->name.data_m, \ 5610 current_config->name.data_n, \ 5611 current_config->name.link_m, \ 5612 current_config->name.link_n, \ 5613 pipe_config->name.tu, \ 5614 pipe_config->name.data_m, \ 5615 pipe_config->name.data_n, \ 5616 pipe_config->name.link_m, \ 5617 pipe_config->name.link_n); \ 5618 ret = false; \ 5619 } \ 5620 } while (0) 5621 5622 #define PIPE_CONF_CHECK_TIMINGS(name) do { \ 5623 PIPE_CONF_CHECK_I(name.crtc_hdisplay); \ 5624 PIPE_CONF_CHECK_I(name.crtc_htotal); \ 5625 PIPE_CONF_CHECK_I(name.crtc_hblank_start); \ 5626 PIPE_CONF_CHECK_I(name.crtc_hblank_end); \ 5627 PIPE_CONF_CHECK_I(name.crtc_hsync_start); \ 5628 PIPE_CONF_CHECK_I(name.crtc_hsync_end); \ 5629 PIPE_CONF_CHECK_I(name.crtc_vdisplay); \ 5630 PIPE_CONF_CHECK_I(name.crtc_vtotal); \ 5631 PIPE_CONF_CHECK_I(name.crtc_vblank_start); \ 5632 PIPE_CONF_CHECK_I(name.crtc_vblank_end); \ 5633 PIPE_CONF_CHECK_I(name.crtc_vsync_start); \ 5634 PIPE_CONF_CHECK_I(name.crtc_vsync_end); \ 5635 } while (0) 5636 5637 #define PIPE_CONF_CHECK_RECT(name) do { \ 5638 PIPE_CONF_CHECK_I(name.x1); \ 5639 PIPE_CONF_CHECK_I(name.x2); \ 5640 PIPE_CONF_CHECK_I(name.y1); \ 5641 PIPE_CONF_CHECK_I(name.y2); \ 5642 } while (0) 5643 5644 /* This is required for BDW+ where there is only one set of registers for 5645 * switching between high and low RR. 5646 * This macro can be used whenever a comparison has to be made between one 5647 * hw state and multiple sw state variables. 5648 */ 5649 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \ 5650 if (!intel_compare_link_m_n(¤t_config->name, \ 5651 &pipe_config->name, !fastset) && \ 5652 !intel_compare_link_m_n(¤t_config->alt_name, \ 5653 &pipe_config->name, !fastset)) { \ 5654 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5655 "(expected tu %i data %i/%i link %i/%i, " \ 5656 "or tu %i data %i/%i link %i/%i, " \ 5657 "found tu %i, data %i/%i link %i/%i)", \ 5658 current_config->name.tu, \ 5659 current_config->name.data_m, \ 5660 current_config->name.data_n, \ 5661 current_config->name.link_m, \ 5662 current_config->name.link_n, \ 5663 current_config->alt_name.tu, \ 5664 current_config->alt_name.data_m, \ 5665 current_config->alt_name.data_n, \ 5666 current_config->alt_name.link_m, \ 5667 current_config->alt_name.link_n, \ 5668 pipe_config->name.tu, \ 5669 pipe_config->name.data_m, \ 5670 pipe_config->name.data_n, \ 5671 pipe_config->name.link_m, \ 5672 pipe_config->name.link_n); \ 5673 ret = false; \ 5674 } \ 5675 } while (0) 5676 5677 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 5678 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 5679 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5680 "(%x) (expected %i, found %i)", \ 5681 (mask), \ 5682 current_config->name & (mask), \ 5683 pipe_config->name & (mask)); \ 5684 ret = false; \ 5685 } \ 5686 } while (0) 5687 5688 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \ 5689 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 5690 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5691 "(expected %i, found %i)", \ 5692 current_config->name, \ 5693 pipe_config->name); \ 5694 ret = false; \ 5695 } \ 5696 } while (0) 5697 5698 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 5699 if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 5700 &pipe_config->infoframes.name)) { \ 5701 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \ 5702 ¤t_config->infoframes.name, \ 5703 &pipe_config->infoframes.name); \ 5704 ret = false; \ 5705 } \ 5706 } while (0) 5707 5708 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \ 5709 if (!current_config->has_psr && !pipe_config->has_psr && \ 5710 !intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \ 5711 &pipe_config->infoframes.name)) { \ 5712 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \ 5713 ¤t_config->infoframes.name, \ 5714 &pipe_config->infoframes.name); \ 5715 ret = false; \ 5716 } \ 5717 } while (0) 5718 5719 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \ 5720 if (current_config->name1 != pipe_config->name1) { \ 5721 pipe_config_mismatch(fastset, crtc, __stringify(name1), \ 5722 "(expected %i, found %i, won't compare lut values)", \ 5723 current_config->name1, \ 5724 pipe_config->name1); \ 5725 ret = false;\ 5726 } else { \ 5727 if (!intel_color_lut_equal(current_config->name2, \ 5728 pipe_config->name2, pipe_config->name1, \ 5729 bit_precision)) { \ 5730 pipe_config_mismatch(fastset, crtc, __stringify(name2), \ 5731 "hw_state doesn't match sw_state"); \ 5732 ret = false; \ 5733 } \ 5734 } \ 5735 } while (0) 5736 5737 #define PIPE_CONF_QUIRK(quirk) \ 5738 ((current_config->quirks | pipe_config->quirks) & (quirk)) 5739 5740 PIPE_CONF_CHECK_I(hw.enable); 5741 PIPE_CONF_CHECK_I(hw.active); 5742 5743 PIPE_CONF_CHECK_I(cpu_transcoder); 5744 PIPE_CONF_CHECK_I(mst_master_transcoder); 5745 5746 PIPE_CONF_CHECK_BOOL(has_pch_encoder); 5747 PIPE_CONF_CHECK_I(fdi_lanes); 5748 PIPE_CONF_CHECK_M_N(fdi_m_n); 5749 5750 PIPE_CONF_CHECK_I(lane_count); 5751 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 5752 5753 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) { 5754 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 5755 } else { 5756 PIPE_CONF_CHECK_M_N(dp_m_n); 5757 PIPE_CONF_CHECK_M_N(dp_m2_n2); 5758 } 5759 5760 PIPE_CONF_CHECK_X(output_types); 5761 5762 PIPE_CONF_CHECK_I(framestart_delay); 5763 PIPE_CONF_CHECK_I(msa_timing_delay); 5764 5765 PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode); 5766 PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode); 5767 5768 PIPE_CONF_CHECK_I(pixel_multiplier); 5769 5770 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5771 DRM_MODE_FLAG_INTERLACE); 5772 5773 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 5774 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5775 DRM_MODE_FLAG_PHSYNC); 5776 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5777 DRM_MODE_FLAG_NHSYNC); 5778 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5779 DRM_MODE_FLAG_PVSYNC); 5780 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5781 DRM_MODE_FLAG_NVSYNC); 5782 } 5783 5784 PIPE_CONF_CHECK_I(output_format); 5785 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 5786 if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 5787 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5788 PIPE_CONF_CHECK_BOOL(limited_color_range); 5789 5790 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 5791 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 5792 PIPE_CONF_CHECK_BOOL(has_infoframe); 5793 PIPE_CONF_CHECK_BOOL(fec_enable); 5794 5795 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); 5796 5797 PIPE_CONF_CHECK_X(gmch_pfit.control); 5798 /* pfit ratios are autocomputed by the hw on gen4+ */ 5799 if (DISPLAY_VER(dev_priv) < 4) 5800 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 5801 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 5802 5803 /* 5804 * Changing the EDP transcoder input mux 5805 * (A_ONOFF vs. A_ON) requires a full modeset. 5806 */ 5807 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 5808 5809 if (!fastset) { 5810 PIPE_CONF_CHECK_RECT(pipe_src); 5811 5812 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 5813 PIPE_CONF_CHECK_RECT(pch_pfit.dst); 5814 5815 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 5816 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); 5817 5818 PIPE_CONF_CHECK_X(gamma_mode); 5819 if (IS_CHERRYVIEW(dev_priv)) 5820 PIPE_CONF_CHECK_X(cgm_mode); 5821 else 5822 PIPE_CONF_CHECK_X(csc_mode); 5823 PIPE_CONF_CHECK_BOOL(gamma_enable); 5824 PIPE_CONF_CHECK_BOOL(csc_enable); 5825 5826 PIPE_CONF_CHECK_I(linetime); 5827 PIPE_CONF_CHECK_I(ips_linetime); 5828 5829 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config); 5830 if (bp_gamma) 5831 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma); 5832 5833 if (current_config->active_planes) { 5834 PIPE_CONF_CHECK_BOOL(has_psr); 5835 PIPE_CONF_CHECK_BOOL(has_psr2); 5836 PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch); 5837 PIPE_CONF_CHECK_I(dc3co_exitline); 5838 } 5839 } 5840 5841 PIPE_CONF_CHECK_BOOL(double_wide); 5842 5843 if (dev_priv->dpll.mgr) { 5844 PIPE_CONF_CHECK_P(shared_dpll); 5845 5846 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 5847 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 5848 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 5849 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 5850 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 5851 PIPE_CONF_CHECK_X(dpll_hw_state.spll); 5852 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 5853 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 5854 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 5855 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0); 5856 PIPE_CONF_CHECK_X(dpll_hw_state.div0); 5857 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0); 5858 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4); 5859 PIPE_CONF_CHECK_X(dpll_hw_state.pll0); 5860 PIPE_CONF_CHECK_X(dpll_hw_state.pll1); 5861 PIPE_CONF_CHECK_X(dpll_hw_state.pll2); 5862 PIPE_CONF_CHECK_X(dpll_hw_state.pll3); 5863 PIPE_CONF_CHECK_X(dpll_hw_state.pll6); 5864 PIPE_CONF_CHECK_X(dpll_hw_state.pll8); 5865 PIPE_CONF_CHECK_X(dpll_hw_state.pll9); 5866 PIPE_CONF_CHECK_X(dpll_hw_state.pll10); 5867 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12); 5868 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl); 5869 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1); 5870 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl); 5871 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0); 5872 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1); 5873 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf); 5874 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock); 5875 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc); 5876 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias); 5877 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); 5878 } 5879 5880 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 5881 PIPE_CONF_CHECK_X(dsi_pll.div); 5882 5883 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) 5884 PIPE_CONF_CHECK_I(pipe_bpp); 5885 5886 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock); 5887 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); 5888 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 5889 5890 PIPE_CONF_CHECK_I(min_voltage_level); 5891 5892 if (current_config->has_psr || pipe_config->has_psr) 5893 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable, 5894 ~intel_hdmi_infoframe_enable(DP_SDP_VSC)); 5895 else 5896 PIPE_CONF_CHECK_X(infoframes.enable); 5897 5898 PIPE_CONF_CHECK_X(infoframes.gcp); 5899 PIPE_CONF_CHECK_INFOFRAME(avi); 5900 PIPE_CONF_CHECK_INFOFRAME(spd); 5901 PIPE_CONF_CHECK_INFOFRAME(hdmi); 5902 PIPE_CONF_CHECK_INFOFRAME(drm); 5903 PIPE_CONF_CHECK_DP_VSC_SDP(vsc); 5904 5905 PIPE_CONF_CHECK_X(sync_mode_slaves_mask); 5906 PIPE_CONF_CHECK_I(master_transcoder); 5907 PIPE_CONF_CHECK_X(bigjoiner_pipes); 5908 5909 PIPE_CONF_CHECK_I(dsc.compression_enable); 5910 PIPE_CONF_CHECK_I(dsc.dsc_split); 5911 PIPE_CONF_CHECK_I(dsc.compressed_bpp); 5912 5913 PIPE_CONF_CHECK_BOOL(splitter.enable); 5914 PIPE_CONF_CHECK_I(splitter.link_count); 5915 PIPE_CONF_CHECK_I(splitter.pixel_overlap); 5916 5917 PIPE_CONF_CHECK_BOOL(vrr.enable); 5918 PIPE_CONF_CHECK_I(vrr.vmin); 5919 PIPE_CONF_CHECK_I(vrr.vmax); 5920 PIPE_CONF_CHECK_I(vrr.flipline); 5921 PIPE_CONF_CHECK_I(vrr.pipeline_full); 5922 PIPE_CONF_CHECK_I(vrr.guardband); 5923 5924 #undef PIPE_CONF_CHECK_X 5925 #undef PIPE_CONF_CHECK_I 5926 #undef PIPE_CONF_CHECK_BOOL 5927 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE 5928 #undef PIPE_CONF_CHECK_P 5929 #undef PIPE_CONF_CHECK_FLAGS 5930 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 5931 #undef PIPE_CONF_CHECK_COLOR_LUT 5932 #undef PIPE_CONF_CHECK_TIMINGS 5933 #undef PIPE_CONF_CHECK_RECT 5934 #undef PIPE_CONF_QUIRK 5935 5936 return ret; 5937 } 5938 5939 static void 5940 intel_verify_planes(struct intel_atomic_state *state) 5941 { 5942 struct intel_plane *plane; 5943 const struct intel_plane_state *plane_state; 5944 int i; 5945 5946 for_each_new_intel_plane_in_state(state, plane, 5947 plane_state, i) 5948 assert_plane(plane, plane_state->planar_slave || 5949 plane_state->uapi.visible); 5950 } 5951 5952 int intel_modeset_all_pipes(struct intel_atomic_state *state) 5953 { 5954 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5955 struct intel_crtc *crtc; 5956 5957 /* 5958 * Add all pipes to the state, and force 5959 * a modeset on all the active ones. 5960 */ 5961 for_each_intel_crtc(&dev_priv->drm, crtc) { 5962 struct intel_crtc_state *crtc_state; 5963 int ret; 5964 5965 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5966 if (IS_ERR(crtc_state)) 5967 return PTR_ERR(crtc_state); 5968 5969 if (!crtc_state->hw.active || 5970 drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) 5971 continue; 5972 5973 crtc_state->uapi.mode_changed = true; 5974 5975 ret = drm_atomic_add_affected_connectors(&state->base, 5976 &crtc->base); 5977 if (ret) 5978 return ret; 5979 5980 ret = intel_atomic_add_affected_planes(state, crtc); 5981 if (ret) 5982 return ret; 5983 5984 crtc_state->update_planes |= crtc_state->active_planes; 5985 } 5986 5987 return 0; 5988 } 5989 5990 void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) 5991 { 5992 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5993 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5994 struct drm_display_mode adjusted_mode; 5995 5996 drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode); 5997 5998 if (crtc_state->vrr.enable) { 5999 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax; 6000 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax; 6001 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state); 6002 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state); 6003 } 6004 6005 drm_calc_timestamping_constants(&crtc->base, &adjusted_mode); 6006 6007 crtc->mode_flags = crtc_state->mode_flags; 6008 6009 /* 6010 * The scanline counter increments at the leading edge of hsync. 6011 * 6012 * On most platforms it starts counting from vtotal-1 on the 6013 * first active line. That means the scanline counter value is 6014 * always one less than what we would expect. Ie. just after 6015 * start of vblank, which also occurs at start of hsync (on the 6016 * last active line), the scanline counter will read vblank_start-1. 6017 * 6018 * On gen2 the scanline counter starts counting from 1 instead 6019 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 6020 * to keep the value positive), instead of adding one. 6021 * 6022 * On HSW+ the behaviour of the scanline counter depends on the output 6023 * type. For DP ports it behaves like most other platforms, but on HDMI 6024 * there's an extra 1 line difference. So we need to add two instead of 6025 * one to the value. 6026 * 6027 * On VLV/CHV DSI the scanline counter would appear to increment 6028 * approx. 1/3 of a scanline before start of vblank. Unfortunately 6029 * that means we can't tell whether we're in vblank or not while 6030 * we're on that particular line. We must still set scanline_offset 6031 * to 1 so that the vblank timestamps come out correct when we query 6032 * the scanline counter from within the vblank interrupt handler. 6033 * However if queried just before the start of vblank we'll get an 6034 * answer that's slightly in the future. 6035 */ 6036 if (DISPLAY_VER(dev_priv) == 2) { 6037 int vtotal; 6038 6039 vtotal = adjusted_mode.crtc_vtotal; 6040 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 6041 vtotal /= 2; 6042 6043 crtc->scanline_offset = vtotal - 1; 6044 } else if (HAS_DDI(dev_priv) && 6045 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 6046 crtc->scanline_offset = 2; 6047 } else { 6048 crtc->scanline_offset = 1; 6049 } 6050 } 6051 6052 static void intel_modeset_clear_plls(struct intel_atomic_state *state) 6053 { 6054 struct intel_crtc_state *new_crtc_state; 6055 struct intel_crtc *crtc; 6056 int i; 6057 6058 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6059 if (!intel_crtc_needs_modeset(new_crtc_state)) 6060 continue; 6061 6062 intel_release_shared_dplls(state, crtc); 6063 } 6064 } 6065 6066 /* 6067 * This implements the workaround described in the "notes" section of the mode 6068 * set sequence documentation. When going from no pipes or single pipe to 6069 * multiple pipes, and planes are enabled after the pipe, we need to wait at 6070 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 6071 */ 6072 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) 6073 { 6074 struct intel_crtc_state *crtc_state; 6075 struct intel_crtc *crtc; 6076 struct intel_crtc_state *first_crtc_state = NULL; 6077 struct intel_crtc_state *other_crtc_state = NULL; 6078 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 6079 int i; 6080 6081 /* look at all crtc's that are going to be enabled in during modeset */ 6082 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6083 if (!crtc_state->hw.active || 6084 !intel_crtc_needs_modeset(crtc_state)) 6085 continue; 6086 6087 if (first_crtc_state) { 6088 other_crtc_state = crtc_state; 6089 break; 6090 } else { 6091 first_crtc_state = crtc_state; 6092 first_pipe = crtc->pipe; 6093 } 6094 } 6095 6096 /* No workaround needed? */ 6097 if (!first_crtc_state) 6098 return 0; 6099 6100 /* w/a possibly needed, check how many crtc's are already enabled. */ 6101 for_each_intel_crtc(state->base.dev, crtc) { 6102 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6103 if (IS_ERR(crtc_state)) 6104 return PTR_ERR(crtc_state); 6105 6106 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 6107 6108 if (!crtc_state->hw.active || 6109 intel_crtc_needs_modeset(crtc_state)) 6110 continue; 6111 6112 /* 2 or more enabled crtcs means no need for w/a */ 6113 if (enabled_pipe != INVALID_PIPE) 6114 return 0; 6115 6116 enabled_pipe = crtc->pipe; 6117 } 6118 6119 if (enabled_pipe != INVALID_PIPE) 6120 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 6121 else if (other_crtc_state) 6122 other_crtc_state->hsw_workaround_pipe = first_pipe; 6123 6124 return 0; 6125 } 6126 6127 u8 intel_calc_active_pipes(struct intel_atomic_state *state, 6128 u8 active_pipes) 6129 { 6130 const struct intel_crtc_state *crtc_state; 6131 struct intel_crtc *crtc; 6132 int i; 6133 6134 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6135 if (crtc_state->hw.active) 6136 active_pipes |= BIT(crtc->pipe); 6137 else 6138 active_pipes &= ~BIT(crtc->pipe); 6139 } 6140 6141 return active_pipes; 6142 } 6143 6144 static int intel_modeset_checks(struct intel_atomic_state *state) 6145 { 6146 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6147 6148 state->modeset = true; 6149 6150 if (IS_HASWELL(dev_priv)) 6151 return hsw_mode_set_planes_workaround(state); 6152 6153 return 0; 6154 } 6155 6156 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 6157 struct intel_crtc_state *new_crtc_state) 6158 { 6159 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) 6160 return; 6161 6162 new_crtc_state->uapi.mode_changed = false; 6163 new_crtc_state->update_pipe = true; 6164 } 6165 6166 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state, 6167 struct intel_crtc_state *new_crtc_state) 6168 { 6169 /* 6170 * If we're not doing the full modeset we want to 6171 * keep the current M/N values as they may be 6172 * sufficiently different to the computed values 6173 * to cause problems. 6174 * 6175 * FIXME: should really copy more fuzzy state here 6176 */ 6177 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n; 6178 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n; 6179 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2; 6180 new_crtc_state->has_drrs = old_crtc_state->has_drrs; 6181 } 6182 6183 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, 6184 struct intel_crtc *crtc, 6185 u8 plane_ids_mask) 6186 { 6187 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6188 struct intel_plane *plane; 6189 6190 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 6191 struct intel_plane_state *plane_state; 6192 6193 if ((plane_ids_mask & BIT(plane->id)) == 0) 6194 continue; 6195 6196 plane_state = intel_atomic_get_plane_state(state, plane); 6197 if (IS_ERR(plane_state)) 6198 return PTR_ERR(plane_state); 6199 } 6200 6201 return 0; 6202 } 6203 6204 int intel_atomic_add_affected_planes(struct intel_atomic_state *state, 6205 struct intel_crtc *crtc) 6206 { 6207 const struct intel_crtc_state *old_crtc_state = 6208 intel_atomic_get_old_crtc_state(state, crtc); 6209 const struct intel_crtc_state *new_crtc_state = 6210 intel_atomic_get_new_crtc_state(state, crtc); 6211 6212 return intel_crtc_add_planes_to_state(state, crtc, 6213 old_crtc_state->enabled_planes | 6214 new_crtc_state->enabled_planes); 6215 } 6216 6217 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) 6218 { 6219 /* See {hsw,vlv,ivb}_plane_ratio() */ 6220 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) || 6221 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) || 6222 IS_IVYBRIDGE(dev_priv); 6223 } 6224 6225 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state, 6226 struct intel_crtc *crtc, 6227 struct intel_crtc *other) 6228 { 6229 const struct intel_plane_state *plane_state; 6230 struct intel_plane *plane; 6231 u8 plane_ids = 0; 6232 int i; 6233 6234 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 6235 if (plane->pipe == crtc->pipe) 6236 plane_ids |= BIT(plane->id); 6237 } 6238 6239 return intel_crtc_add_planes_to_state(state, other, plane_ids); 6240 } 6241 6242 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state) 6243 { 6244 struct drm_i915_private *i915 = to_i915(state->base.dev); 6245 const struct intel_crtc_state *crtc_state; 6246 struct intel_crtc *crtc; 6247 int i; 6248 6249 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6250 struct intel_crtc *other; 6251 6252 for_each_intel_crtc_in_pipe_mask(&i915->drm, other, 6253 crtc_state->bigjoiner_pipes) { 6254 int ret; 6255 6256 if (crtc == other) 6257 continue; 6258 6259 ret = intel_crtc_add_bigjoiner_planes(state, crtc, other); 6260 if (ret) 6261 return ret; 6262 } 6263 } 6264 6265 return 0; 6266 } 6267 6268 static int intel_atomic_check_planes(struct intel_atomic_state *state) 6269 { 6270 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6271 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6272 struct intel_plane_state *plane_state; 6273 struct intel_plane *plane; 6274 struct intel_crtc *crtc; 6275 int i, ret; 6276 6277 ret = icl_add_linked_planes(state); 6278 if (ret) 6279 return ret; 6280 6281 ret = intel_bigjoiner_add_affected_planes(state); 6282 if (ret) 6283 return ret; 6284 6285 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 6286 ret = intel_plane_atomic_check(state, plane); 6287 if (ret) { 6288 drm_dbg_atomic(&dev_priv->drm, 6289 "[PLANE:%d:%s] atomic driver check failed\n", 6290 plane->base.base.id, plane->base.name); 6291 return ret; 6292 } 6293 } 6294 6295 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6296 new_crtc_state, i) { 6297 u8 old_active_planes, new_active_planes; 6298 6299 ret = icl_check_nv12_planes(new_crtc_state); 6300 if (ret) 6301 return ret; 6302 6303 /* 6304 * On some platforms the number of active planes affects 6305 * the planes' minimum cdclk calculation. Add such planes 6306 * to the state before we compute the minimum cdclk. 6307 */ 6308 if (!active_planes_affects_min_cdclk(dev_priv)) 6309 continue; 6310 6311 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 6312 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 6313 6314 if (hweight8(old_active_planes) == hweight8(new_active_planes)) 6315 continue; 6316 6317 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes); 6318 if (ret) 6319 return ret; 6320 } 6321 6322 return 0; 6323 } 6324 6325 static int intel_atomic_check_crtcs(struct intel_atomic_state *state) 6326 { 6327 struct intel_crtc_state *crtc_state; 6328 struct intel_crtc *crtc; 6329 int i; 6330 6331 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6332 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 6333 int ret; 6334 6335 ret = intel_crtc_atomic_check(state, crtc); 6336 if (ret) { 6337 drm_dbg_atomic(&i915->drm, 6338 "[CRTC:%d:%s] atomic driver check failed\n", 6339 crtc->base.base.id, crtc->base.name); 6340 return ret; 6341 } 6342 } 6343 6344 return 0; 6345 } 6346 6347 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, 6348 u8 transcoders) 6349 { 6350 const struct intel_crtc_state *new_crtc_state; 6351 struct intel_crtc *crtc; 6352 int i; 6353 6354 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6355 if (new_crtc_state->hw.enable && 6356 transcoders & BIT(new_crtc_state->cpu_transcoder) && 6357 intel_crtc_needs_modeset(new_crtc_state)) 6358 return true; 6359 } 6360 6361 return false; 6362 } 6363 6364 static bool intel_pipes_need_modeset(struct intel_atomic_state *state, 6365 u8 pipes) 6366 { 6367 const struct intel_crtc_state *new_crtc_state; 6368 struct intel_crtc *crtc; 6369 int i; 6370 6371 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6372 if (new_crtc_state->hw.enable && 6373 pipes & BIT(crtc->pipe) && 6374 intel_crtc_needs_modeset(new_crtc_state)) 6375 return true; 6376 } 6377 6378 return false; 6379 } 6380 6381 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, 6382 struct intel_crtc *master_crtc) 6383 { 6384 struct drm_i915_private *i915 = to_i915(state->base.dev); 6385 struct intel_crtc_state *master_crtc_state = 6386 intel_atomic_get_new_crtc_state(state, master_crtc); 6387 struct intel_crtc *slave_crtc; 6388 6389 if (!master_crtc_state->bigjoiner_pipes) 6390 return 0; 6391 6392 /* sanity check */ 6393 if (drm_WARN_ON(&i915->drm, 6394 master_crtc->pipe != bigjoiner_master_pipe(master_crtc_state))) 6395 return -EINVAL; 6396 6397 if (master_crtc_state->bigjoiner_pipes & ~bigjoiner_pipes(i915)) { 6398 drm_dbg_kms(&i915->drm, 6399 "[CRTC:%d:%s] Cannot act as big joiner master " 6400 "(need 0x%x as pipes, only 0x%x possible)\n", 6401 master_crtc->base.base.id, master_crtc->base.name, 6402 master_crtc_state->bigjoiner_pipes, bigjoiner_pipes(i915)); 6403 return -EINVAL; 6404 } 6405 6406 for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, 6407 intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) { 6408 struct intel_crtc_state *slave_crtc_state; 6409 int ret; 6410 6411 slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc); 6412 if (IS_ERR(slave_crtc_state)) 6413 return PTR_ERR(slave_crtc_state); 6414 6415 /* master being enabled, slave was already configured? */ 6416 if (slave_crtc_state->uapi.enable) { 6417 drm_dbg_kms(&i915->drm, 6418 "[CRTC:%d:%s] Slave is enabled as normal CRTC, but " 6419 "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n", 6420 slave_crtc->base.base.id, slave_crtc->base.name, 6421 master_crtc->base.base.id, master_crtc->base.name); 6422 return -EINVAL; 6423 } 6424 6425 /* 6426 * The state copy logic assumes the master crtc gets processed 6427 * before the slave crtc during the main compute_config loop. 6428 * This works because the crtcs are created in pipe order, 6429 * and the hardware requires master pipe < slave pipe as well. 6430 * Should that change we need to rethink the logic. 6431 */ 6432 if (WARN_ON(drm_crtc_index(&master_crtc->base) > 6433 drm_crtc_index(&slave_crtc->base))) 6434 return -EINVAL; 6435 6436 drm_dbg_kms(&i915->drm, 6437 "[CRTC:%d:%s] Used as slave for big joiner master [CRTC:%d:%s]\n", 6438 slave_crtc->base.base.id, slave_crtc->base.name, 6439 master_crtc->base.base.id, master_crtc->base.name); 6440 6441 slave_crtc_state->bigjoiner_pipes = 6442 master_crtc_state->bigjoiner_pipes; 6443 6444 ret = copy_bigjoiner_crtc_state_modeset(state, slave_crtc); 6445 if (ret) 6446 return ret; 6447 } 6448 6449 return 0; 6450 } 6451 6452 static void kill_bigjoiner_slave(struct intel_atomic_state *state, 6453 struct intel_crtc *master_crtc) 6454 { 6455 struct drm_i915_private *i915 = to_i915(state->base.dev); 6456 struct intel_crtc_state *master_crtc_state = 6457 intel_atomic_get_new_crtc_state(state, master_crtc); 6458 struct intel_crtc *slave_crtc; 6459 6460 for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, 6461 intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) { 6462 struct intel_crtc_state *slave_crtc_state = 6463 intel_atomic_get_new_crtc_state(state, slave_crtc); 6464 6465 slave_crtc_state->bigjoiner_pipes = 0; 6466 6467 intel_crtc_copy_uapi_to_hw_state_modeset(state, slave_crtc); 6468 } 6469 6470 master_crtc_state->bigjoiner_pipes = 0; 6471 } 6472 6473 /** 6474 * DOC: asynchronous flip implementation 6475 * 6476 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC 6477 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL. 6478 * Correspondingly, support is currently added for primary plane only. 6479 * 6480 * Async flip can only change the plane surface address, so anything else 6481 * changing is rejected from the intel_async_flip_check_hw() function. 6482 * Once this check is cleared, flip done interrupt is enabled using 6483 * the intel_crtc_enable_flip_done() function. 6484 * 6485 * As soon as the surface address register is written, flip done interrupt is 6486 * generated and the requested events are sent to the usersapce in the interrupt 6487 * handler itself. The timestamp and sequence sent during the flip done event 6488 * correspond to the last vblank and have no relation to the actual time when 6489 * the flip done event was sent. 6490 */ 6491 static int intel_async_flip_check_uapi(struct intel_atomic_state *state, 6492 struct intel_crtc *crtc) 6493 { 6494 struct drm_i915_private *i915 = to_i915(state->base.dev); 6495 const struct intel_crtc_state *new_crtc_state = 6496 intel_atomic_get_new_crtc_state(state, crtc); 6497 const struct intel_plane_state *old_plane_state; 6498 struct intel_plane_state *new_plane_state; 6499 struct intel_plane *plane; 6500 int i; 6501 6502 if (!new_crtc_state->uapi.async_flip) 6503 return 0; 6504 6505 if (!new_crtc_state->uapi.active) { 6506 drm_dbg_kms(&i915->drm, 6507 "[CRTC:%d:%s] not active\n", 6508 crtc->base.base.id, crtc->base.name); 6509 return -EINVAL; 6510 } 6511 6512 if (intel_crtc_needs_modeset(new_crtc_state)) { 6513 drm_dbg_kms(&i915->drm, 6514 "[CRTC:%d:%s] modeset required\n", 6515 crtc->base.base.id, crtc->base.name); 6516 return -EINVAL; 6517 } 6518 6519 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 6520 new_plane_state, i) { 6521 if (plane->pipe != crtc->pipe) 6522 continue; 6523 6524 /* 6525 * TODO: Async flip is only supported through the page flip IOCTL 6526 * as of now. So support currently added for primary plane only. 6527 * Support for other planes on platforms on which supports 6528 * this(vlv/chv and icl+) should be added when async flip is 6529 * enabled in the atomic IOCTL path. 6530 */ 6531 if (!plane->async_flip) { 6532 drm_dbg_kms(&i915->drm, 6533 "[PLANE:%d:%s] async flip not supported\n", 6534 plane->base.base.id, plane->base.name); 6535 return -EINVAL; 6536 } 6537 6538 if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) { 6539 drm_dbg_kms(&i915->drm, 6540 "[PLANE:%d:%s] no old or new framebuffer\n", 6541 plane->base.base.id, plane->base.name); 6542 return -EINVAL; 6543 } 6544 } 6545 6546 return 0; 6547 } 6548 6549 static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc) 6550 { 6551 struct drm_i915_private *i915 = to_i915(state->base.dev); 6552 const struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6553 const struct intel_plane_state *new_plane_state, *old_plane_state; 6554 struct intel_plane *plane; 6555 int i; 6556 6557 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 6558 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6559 6560 if (!new_crtc_state->uapi.async_flip) 6561 return 0; 6562 6563 if (!new_crtc_state->hw.active) { 6564 drm_dbg_kms(&i915->drm, 6565 "[CRTC:%d:%s] not active\n", 6566 crtc->base.base.id, crtc->base.name); 6567 return -EINVAL; 6568 } 6569 6570 if (intel_crtc_needs_modeset(new_crtc_state)) { 6571 drm_dbg_kms(&i915->drm, 6572 "[CRTC:%d:%s] modeset required\n", 6573 crtc->base.base.id, crtc->base.name); 6574 return -EINVAL; 6575 } 6576 6577 if (old_crtc_state->active_planes != new_crtc_state->active_planes) { 6578 drm_dbg_kms(&i915->drm, 6579 "[CRTC:%d:%s] Active planes cannot be in async flip\n", 6580 crtc->base.base.id, crtc->base.name); 6581 return -EINVAL; 6582 } 6583 6584 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 6585 new_plane_state, i) { 6586 if (plane->pipe != crtc->pipe) 6587 continue; 6588 6589 /* 6590 * Only async flip capable planes should be in the state 6591 * if we're really about to ask the hardware to perform 6592 * an async flip. We should never get this far otherwise. 6593 */ 6594 if (drm_WARN_ON(&i915->drm, 6595 new_crtc_state->do_async_flip && !plane->async_flip)) 6596 return -EINVAL; 6597 6598 /* 6599 * Only check async flip capable planes other planes 6600 * may be involved in the initial commit due to 6601 * the wm0/ddb optimization. 6602 * 6603 * TODO maybe should track which planes actually 6604 * were requested to do the async flip... 6605 */ 6606 if (!plane->async_flip) 6607 continue; 6608 6609 /* 6610 * FIXME: This check is kept generic for all platforms. 6611 * Need to verify this for all gen9 platforms to enable 6612 * this selectively if required. 6613 */ 6614 switch (new_plane_state->hw.fb->modifier) { 6615 case I915_FORMAT_MOD_X_TILED: 6616 case I915_FORMAT_MOD_Y_TILED: 6617 case I915_FORMAT_MOD_Yf_TILED: 6618 case I915_FORMAT_MOD_4_TILED: 6619 break; 6620 default: 6621 drm_dbg_kms(&i915->drm, 6622 "[PLANE:%d:%s] Modifier does not support async flips\n", 6623 plane->base.base.id, plane->base.name); 6624 return -EINVAL; 6625 } 6626 6627 if (new_plane_state->hw.fb->format->num_planes > 1) { 6628 drm_dbg_kms(&i915->drm, 6629 "[PLANE:%d:%s] Planar formats do not support async flips\n", 6630 plane->base.base.id, plane->base.name); 6631 return -EINVAL; 6632 } 6633 6634 if (old_plane_state->view.color_plane[0].mapping_stride != 6635 new_plane_state->view.color_plane[0].mapping_stride) { 6636 drm_dbg_kms(&i915->drm, 6637 "[PLANE:%d:%s] Stride cannot be changed in async flip\n", 6638 plane->base.base.id, plane->base.name); 6639 return -EINVAL; 6640 } 6641 6642 if (old_plane_state->hw.fb->modifier != 6643 new_plane_state->hw.fb->modifier) { 6644 drm_dbg_kms(&i915->drm, 6645 "[PLANE:%d:%s] Modifier cannot be changed in async flip\n", 6646 plane->base.base.id, plane->base.name); 6647 return -EINVAL; 6648 } 6649 6650 if (old_plane_state->hw.fb->format != 6651 new_plane_state->hw.fb->format) { 6652 drm_dbg_kms(&i915->drm, 6653 "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n", 6654 plane->base.base.id, plane->base.name); 6655 return -EINVAL; 6656 } 6657 6658 if (old_plane_state->hw.rotation != 6659 new_plane_state->hw.rotation) { 6660 drm_dbg_kms(&i915->drm, 6661 "[PLANE:%d:%s] Rotation cannot be changed in async flip\n", 6662 plane->base.base.id, plane->base.name); 6663 return -EINVAL; 6664 } 6665 6666 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) || 6667 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) { 6668 drm_dbg_kms(&i915->drm, 6669 "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n", 6670 plane->base.base.id, plane->base.name); 6671 return -EINVAL; 6672 } 6673 6674 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) { 6675 drm_dbg_kms(&i915->drm, 6676 "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n", 6677 plane->base.base.id, plane->base.name); 6678 return -EINVAL; 6679 } 6680 6681 if (old_plane_state->hw.pixel_blend_mode != 6682 new_plane_state->hw.pixel_blend_mode) { 6683 drm_dbg_kms(&i915->drm, 6684 "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n", 6685 plane->base.base.id, plane->base.name); 6686 return -EINVAL; 6687 } 6688 6689 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) { 6690 drm_dbg_kms(&i915->drm, 6691 "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n", 6692 plane->base.base.id, plane->base.name); 6693 return -EINVAL; 6694 } 6695 6696 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) { 6697 drm_dbg_kms(&i915->drm, 6698 "[PLANE:%d:%s] Color range cannot be changed in async flip\n", 6699 plane->base.base.id, plane->base.name); 6700 return -EINVAL; 6701 } 6702 6703 /* plane decryption is allow to change only in synchronous flips */ 6704 if (old_plane_state->decrypt != new_plane_state->decrypt) { 6705 drm_dbg_kms(&i915->drm, 6706 "[PLANE:%d:%s] Decryption cannot be changed in async flip\n", 6707 plane->base.base.id, plane->base.name); 6708 return -EINVAL; 6709 } 6710 } 6711 6712 return 0; 6713 } 6714 6715 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state) 6716 { 6717 struct drm_i915_private *i915 = to_i915(state->base.dev); 6718 struct intel_crtc_state *crtc_state; 6719 struct intel_crtc *crtc; 6720 u8 affected_pipes = 0; 6721 u8 modeset_pipes = 0; 6722 int i; 6723 6724 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6725 affected_pipes |= crtc_state->bigjoiner_pipes; 6726 if (intel_crtc_needs_modeset(crtc_state)) 6727 modeset_pipes |= crtc_state->bigjoiner_pipes; 6728 } 6729 6730 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) { 6731 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6732 if (IS_ERR(crtc_state)) 6733 return PTR_ERR(crtc_state); 6734 } 6735 6736 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) { 6737 int ret; 6738 6739 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6740 6741 crtc_state->uapi.mode_changed = true; 6742 6743 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 6744 if (ret) 6745 return ret; 6746 6747 ret = intel_atomic_add_affected_planes(state, crtc); 6748 if (ret) 6749 return ret; 6750 } 6751 6752 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6753 /* Kill old bigjoiner link, we may re-establish afterwards */ 6754 if (intel_crtc_needs_modeset(crtc_state) && 6755 intel_crtc_is_bigjoiner_master(crtc_state)) 6756 kill_bigjoiner_slave(state, crtc); 6757 } 6758 6759 return 0; 6760 } 6761 6762 /** 6763 * intel_atomic_check - validate state object 6764 * @dev: drm device 6765 * @_state: state to validate 6766 */ 6767 static int intel_atomic_check(struct drm_device *dev, 6768 struct drm_atomic_state *_state) 6769 { 6770 struct drm_i915_private *dev_priv = to_i915(dev); 6771 struct intel_atomic_state *state = to_intel_atomic_state(_state); 6772 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6773 struct intel_crtc *crtc; 6774 int ret, i; 6775 bool any_ms = false; 6776 6777 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6778 new_crtc_state, i) { 6779 if (new_crtc_state->inherited != old_crtc_state->inherited) 6780 new_crtc_state->uapi.mode_changed = true; 6781 6782 if (new_crtc_state->uapi.scaling_filter != 6783 old_crtc_state->uapi.scaling_filter) 6784 new_crtc_state->uapi.mode_changed = true; 6785 } 6786 6787 intel_vrr_check_modeset(state); 6788 6789 ret = drm_atomic_helper_check_modeset(dev, &state->base); 6790 if (ret) 6791 goto fail; 6792 6793 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6794 ret = intel_async_flip_check_uapi(state, crtc); 6795 if (ret) 6796 return ret; 6797 } 6798 6799 ret = intel_bigjoiner_add_affected_crtcs(state); 6800 if (ret) 6801 goto fail; 6802 6803 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6804 new_crtc_state, i) { 6805 if (!intel_crtc_needs_modeset(new_crtc_state)) { 6806 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) 6807 copy_bigjoiner_crtc_state_nomodeset(state, crtc); 6808 else 6809 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 6810 continue; 6811 } 6812 6813 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) { 6814 drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable); 6815 continue; 6816 } 6817 6818 ret = intel_crtc_prepare_cleared_state(state, crtc); 6819 if (ret) 6820 goto fail; 6821 6822 if (!new_crtc_state->hw.enable) 6823 continue; 6824 6825 ret = intel_modeset_pipe_config(state, crtc); 6826 if (ret) 6827 goto fail; 6828 6829 ret = intel_atomic_check_bigjoiner(state, crtc); 6830 if (ret) 6831 goto fail; 6832 } 6833 6834 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6835 new_crtc_state, i) { 6836 if (!intel_crtc_needs_modeset(new_crtc_state)) 6837 continue; 6838 6839 ret = intel_modeset_pipe_config_late(state, crtc); 6840 if (ret) 6841 goto fail; 6842 6843 intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 6844 } 6845 6846 /** 6847 * Check if fastset is allowed by external dependencies like other 6848 * pipes and transcoders. 6849 * 6850 * Right now it only forces a fullmodeset when the MST master 6851 * transcoder did not changed but the pipe of the master transcoder 6852 * needs a fullmodeset so all slaves also needs to do a fullmodeset or 6853 * in case of port synced crtcs, if one of the synced crtcs 6854 * needs a full modeset, all other synced crtcs should be 6855 * forced a full modeset. 6856 */ 6857 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6858 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state)) 6859 continue; 6860 6861 if (intel_dp_mst_is_slave_trans(new_crtc_state)) { 6862 enum transcoder master = new_crtc_state->mst_master_transcoder; 6863 6864 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) { 6865 new_crtc_state->uapi.mode_changed = true; 6866 new_crtc_state->update_pipe = false; 6867 } 6868 } 6869 6870 if (is_trans_port_sync_mode(new_crtc_state)) { 6871 u8 trans = new_crtc_state->sync_mode_slaves_mask; 6872 6873 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER) 6874 trans |= BIT(new_crtc_state->master_transcoder); 6875 6876 if (intel_cpu_transcoders_need_modeset(state, trans)) { 6877 new_crtc_state->uapi.mode_changed = true; 6878 new_crtc_state->update_pipe = false; 6879 } 6880 } 6881 6882 if (new_crtc_state->bigjoiner_pipes) { 6883 if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) { 6884 new_crtc_state->uapi.mode_changed = true; 6885 new_crtc_state->update_pipe = false; 6886 } 6887 } 6888 } 6889 6890 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6891 new_crtc_state, i) { 6892 if (intel_crtc_needs_modeset(new_crtc_state)) { 6893 any_ms = true; 6894 continue; 6895 } 6896 6897 if (!new_crtc_state->update_pipe) 6898 continue; 6899 6900 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state); 6901 } 6902 6903 if (any_ms && !check_digital_port_conflicts(state)) { 6904 drm_dbg_kms(&dev_priv->drm, 6905 "rejecting conflicting digital port configuration\n"); 6906 ret = -EINVAL; 6907 goto fail; 6908 } 6909 6910 ret = drm_dp_mst_atomic_check(&state->base); 6911 if (ret) 6912 goto fail; 6913 6914 ret = intel_atomic_check_planes(state); 6915 if (ret) 6916 goto fail; 6917 6918 ret = intel_compute_global_watermarks(state); 6919 if (ret) 6920 goto fail; 6921 6922 ret = intel_bw_atomic_check(state); 6923 if (ret) 6924 goto fail; 6925 6926 ret = intel_cdclk_atomic_check(state, &any_ms); 6927 if (ret) 6928 goto fail; 6929 6930 if (intel_any_crtc_needs_modeset(state)) 6931 any_ms = true; 6932 6933 if (any_ms) { 6934 ret = intel_modeset_checks(state); 6935 if (ret) 6936 goto fail; 6937 6938 ret = intel_modeset_calc_cdclk(state); 6939 if (ret) 6940 return ret; 6941 6942 intel_modeset_clear_plls(state); 6943 } 6944 6945 ret = intel_atomic_check_crtcs(state); 6946 if (ret) 6947 goto fail; 6948 6949 ret = intel_fbc_atomic_check(state); 6950 if (ret) 6951 goto fail; 6952 6953 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6954 new_crtc_state, i) { 6955 ret = intel_async_flip_check_hw(state, crtc); 6956 if (ret) 6957 goto fail; 6958 6959 if (!intel_crtc_needs_modeset(new_crtc_state) && 6960 !new_crtc_state->update_pipe) 6961 continue; 6962 6963 intel_crtc_state_dump(new_crtc_state, state, 6964 intel_crtc_needs_modeset(new_crtc_state) ? 6965 "modeset" : "fastset"); 6966 } 6967 6968 return 0; 6969 6970 fail: 6971 if (ret == -EDEADLK) 6972 return ret; 6973 6974 /* 6975 * FIXME would probably be nice to know which crtc specifically 6976 * caused the failure, in cases where we can pinpoint it. 6977 */ 6978 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6979 new_crtc_state, i) 6980 intel_crtc_state_dump(new_crtc_state, state, "failed"); 6981 6982 return ret; 6983 } 6984 6985 static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 6986 { 6987 struct intel_crtc_state *crtc_state; 6988 struct intel_crtc *crtc; 6989 int i, ret; 6990 6991 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base); 6992 if (ret < 0) 6993 return ret; 6994 6995 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6996 bool mode_changed = intel_crtc_needs_modeset(crtc_state); 6997 6998 if (mode_changed || crtc_state->update_pipe || 6999 crtc_state->uapi.color_mgmt_changed) { 7000 intel_dsb_prepare(crtc_state); 7001 } 7002 } 7003 7004 return 0; 7005 } 7006 7007 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 7008 struct intel_crtc_state *crtc_state) 7009 { 7010 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7011 7012 if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes) 7013 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 7014 7015 if (crtc_state->has_pch_encoder) { 7016 enum pipe pch_transcoder = 7017 intel_crtc_pch_transcoder(crtc); 7018 7019 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); 7020 } 7021 } 7022 7023 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, 7024 const struct intel_crtc_state *new_crtc_state) 7025 { 7026 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 7027 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7028 7029 /* 7030 * Update pipe size and adjust fitter if needed: the reason for this is 7031 * that in compute_mode_changes we check the native mode (not the pfit 7032 * mode) to see if we can flip rather than do a full mode set. In the 7033 * fastboot case, we'll flip, but if we don't update the pipesrc and 7034 * pfit state, we'll end up with a big fb scanned out into the wrong 7035 * sized surface. 7036 */ 7037 intel_set_pipe_src_size(new_crtc_state); 7038 7039 /* on skylake this is done by detaching scalers */ 7040 if (DISPLAY_VER(dev_priv) >= 9) { 7041 if (new_crtc_state->pch_pfit.enabled) 7042 skl_pfit_enable(new_crtc_state); 7043 } else if (HAS_PCH_SPLIT(dev_priv)) { 7044 if (new_crtc_state->pch_pfit.enabled) 7045 ilk_pfit_enable(new_crtc_state); 7046 else if (old_crtc_state->pch_pfit.enabled) 7047 ilk_pfit_disable(old_crtc_state); 7048 } 7049 7050 /* 7051 * The register is supposedly single buffered so perhaps 7052 * not 100% correct to do this here. But SKL+ calculate 7053 * this based on the adjust pixel rate so pfit changes do 7054 * affect it and so it must be updated for fastsets. 7055 * HSW/BDW only really need this here for fastboot, after 7056 * that the value should not change without a full modeset. 7057 */ 7058 if (DISPLAY_VER(dev_priv) >= 9 || 7059 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 7060 hsw_set_linetime_wm(new_crtc_state); 7061 } 7062 7063 static void commit_pipe_pre_planes(struct intel_atomic_state *state, 7064 struct intel_crtc *crtc) 7065 { 7066 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7067 const struct intel_crtc_state *old_crtc_state = 7068 intel_atomic_get_old_crtc_state(state, crtc); 7069 const struct intel_crtc_state *new_crtc_state = 7070 intel_atomic_get_new_crtc_state(state, crtc); 7071 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 7072 7073 /* 7074 * During modesets pipe configuration was programmed as the 7075 * CRTC was enabled. 7076 */ 7077 if (!modeset) { 7078 if (new_crtc_state->uapi.color_mgmt_changed || 7079 new_crtc_state->update_pipe) 7080 intel_color_commit_arm(new_crtc_state); 7081 7082 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 7083 bdw_set_pipemisc(new_crtc_state); 7084 7085 if (new_crtc_state->update_pipe) 7086 intel_pipe_fastset(old_crtc_state, new_crtc_state); 7087 } 7088 7089 intel_psr2_program_trans_man_trk_ctl(new_crtc_state); 7090 7091 intel_atomic_update_watermarks(state, crtc); 7092 } 7093 7094 static void commit_pipe_post_planes(struct intel_atomic_state *state, 7095 struct intel_crtc *crtc) 7096 { 7097 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7098 const struct intel_crtc_state *new_crtc_state = 7099 intel_atomic_get_new_crtc_state(state, crtc); 7100 7101 /* 7102 * Disable the scaler(s) after the plane(s) so that we don't 7103 * get a catastrophic underrun even if the two operations 7104 * end up happening in two different frames. 7105 */ 7106 if (DISPLAY_VER(dev_priv) >= 9 && 7107 !intel_crtc_needs_modeset(new_crtc_state)) 7108 skl_detach_scalers(new_crtc_state); 7109 } 7110 7111 static void intel_enable_crtc(struct intel_atomic_state *state, 7112 struct intel_crtc *crtc) 7113 { 7114 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7115 const struct intel_crtc_state *new_crtc_state = 7116 intel_atomic_get_new_crtc_state(state, crtc); 7117 7118 if (!intel_crtc_needs_modeset(new_crtc_state)) 7119 return; 7120 7121 intel_crtc_update_active_timings(new_crtc_state); 7122 7123 dev_priv->display->crtc_enable(state, crtc); 7124 7125 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) 7126 return; 7127 7128 /* vblanks work again, re-enable pipe CRC. */ 7129 intel_crtc_enable_pipe_crc(crtc); 7130 } 7131 7132 static void intel_update_crtc(struct intel_atomic_state *state, 7133 struct intel_crtc *crtc) 7134 { 7135 struct drm_i915_private *i915 = to_i915(state->base.dev); 7136 const struct intel_crtc_state *old_crtc_state = 7137 intel_atomic_get_old_crtc_state(state, crtc); 7138 struct intel_crtc_state *new_crtc_state = 7139 intel_atomic_get_new_crtc_state(state, crtc); 7140 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 7141 7142 if (!modeset) { 7143 if (new_crtc_state->preload_luts && 7144 (new_crtc_state->uapi.color_mgmt_changed || 7145 new_crtc_state->update_pipe)) 7146 intel_color_load_luts(new_crtc_state); 7147 7148 intel_pre_plane_update(state, crtc); 7149 7150 if (new_crtc_state->update_pipe) 7151 intel_encoders_update_pipe(state, crtc); 7152 7153 if (DISPLAY_VER(i915) >= 11 && 7154 new_crtc_state->update_pipe) 7155 icl_set_pipe_chicken(new_crtc_state); 7156 } 7157 7158 intel_fbc_update(state, crtc); 7159 7160 if (!modeset && 7161 (new_crtc_state->uapi.color_mgmt_changed || 7162 new_crtc_state->update_pipe)) 7163 intel_color_commit_noarm(new_crtc_state); 7164 7165 intel_crtc_planes_update_noarm(state, crtc); 7166 7167 /* Perform vblank evasion around commit operation */ 7168 intel_pipe_update_start(new_crtc_state); 7169 7170 commit_pipe_pre_planes(state, crtc); 7171 7172 intel_crtc_planes_update_arm(state, crtc); 7173 7174 commit_pipe_post_planes(state, crtc); 7175 7176 intel_pipe_update_end(new_crtc_state); 7177 7178 /* 7179 * We usually enable FIFO underrun interrupts as part of the 7180 * CRTC enable sequence during modesets. But when we inherit a 7181 * valid pipe configuration from the BIOS we need to take care 7182 * of enabling them on the CRTC's first fastset. 7183 */ 7184 if (new_crtc_state->update_pipe && !modeset && 7185 old_crtc_state->inherited) 7186 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 7187 } 7188 7189 static void intel_old_crtc_state_disables(struct intel_atomic_state *state, 7190 struct intel_crtc_state *old_crtc_state, 7191 struct intel_crtc_state *new_crtc_state, 7192 struct intel_crtc *crtc) 7193 { 7194 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7195 7196 /* 7197 * We need to disable pipe CRC before disabling the pipe, 7198 * or we race against vblank off. 7199 */ 7200 intel_crtc_disable_pipe_crc(crtc); 7201 7202 dev_priv->display->crtc_disable(state, crtc); 7203 crtc->active = false; 7204 intel_fbc_disable(crtc); 7205 intel_disable_shared_dpll(old_crtc_state); 7206 7207 /* FIXME unify this for all platforms */ 7208 if (!new_crtc_state->hw.active && 7209 !HAS_GMCH(dev_priv)) 7210 intel_initial_watermarks(state, crtc); 7211 } 7212 7213 static void intel_commit_modeset_disables(struct intel_atomic_state *state) 7214 { 7215 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 7216 struct intel_crtc *crtc; 7217 u32 handled = 0; 7218 int i; 7219 7220 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7221 new_crtc_state, i) { 7222 if (!intel_crtc_needs_modeset(new_crtc_state)) 7223 continue; 7224 7225 if (!old_crtc_state->hw.active) 7226 continue; 7227 7228 intel_pre_plane_update(state, crtc); 7229 intel_crtc_disable_planes(state, crtc); 7230 } 7231 7232 /* Only disable port sync and MST slaves */ 7233 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7234 new_crtc_state, i) { 7235 if (!intel_crtc_needs_modeset(new_crtc_state)) 7236 continue; 7237 7238 if (!old_crtc_state->hw.active) 7239 continue; 7240 7241 /* In case of Transcoder port Sync master slave CRTCs can be 7242 * assigned in any order and we need to make sure that 7243 * slave CRTCs are disabled first and then master CRTC since 7244 * Slave vblanks are masked till Master Vblanks. 7245 */ 7246 if (!is_trans_port_sync_slave(old_crtc_state) && 7247 !intel_dp_mst_is_slave_trans(old_crtc_state) && 7248 !intel_crtc_is_bigjoiner_slave(old_crtc_state)) 7249 continue; 7250 7251 intel_old_crtc_state_disables(state, old_crtc_state, 7252 new_crtc_state, crtc); 7253 handled |= BIT(crtc->pipe); 7254 } 7255 7256 /* Disable everything else left on */ 7257 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7258 new_crtc_state, i) { 7259 if (!intel_crtc_needs_modeset(new_crtc_state) || 7260 (handled & BIT(crtc->pipe))) 7261 continue; 7262 7263 if (!old_crtc_state->hw.active) 7264 continue; 7265 7266 intel_old_crtc_state_disables(state, old_crtc_state, 7267 new_crtc_state, crtc); 7268 } 7269 } 7270 7271 static void intel_commit_modeset_enables(struct intel_atomic_state *state) 7272 { 7273 struct intel_crtc_state *new_crtc_state; 7274 struct intel_crtc *crtc; 7275 int i; 7276 7277 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7278 if (!new_crtc_state->hw.active) 7279 continue; 7280 7281 intel_enable_crtc(state, crtc); 7282 intel_update_crtc(state, crtc); 7283 } 7284 } 7285 7286 static void skl_commit_modeset_enables(struct intel_atomic_state *state) 7287 { 7288 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7289 struct intel_crtc *crtc; 7290 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 7291 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 7292 u8 update_pipes = 0, modeset_pipes = 0; 7293 int i; 7294 7295 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7296 enum pipe pipe = crtc->pipe; 7297 7298 if (!new_crtc_state->hw.active) 7299 continue; 7300 7301 /* ignore allocations for crtc's that have been turned off. */ 7302 if (!intel_crtc_needs_modeset(new_crtc_state)) { 7303 entries[pipe] = old_crtc_state->wm.skl.ddb; 7304 update_pipes |= BIT(pipe); 7305 } else { 7306 modeset_pipes |= BIT(pipe); 7307 } 7308 } 7309 7310 /* 7311 * Whenever the number of active pipes changes, we need to make sure we 7312 * update the pipes in the right order so that their ddb allocations 7313 * never overlap with each other between CRTC updates. Otherwise we'll 7314 * cause pipe underruns and other bad stuff. 7315 * 7316 * So first lets enable all pipes that do not need a fullmodeset as 7317 * those don't have any external dependency. 7318 */ 7319 while (update_pipes) { 7320 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7321 new_crtc_state, i) { 7322 enum pipe pipe = crtc->pipe; 7323 7324 if ((update_pipes & BIT(pipe)) == 0) 7325 continue; 7326 7327 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 7328 entries, I915_MAX_PIPES, pipe)) 7329 continue; 7330 7331 entries[pipe] = new_crtc_state->wm.skl.ddb; 7332 update_pipes &= ~BIT(pipe); 7333 7334 intel_update_crtc(state, crtc); 7335 7336 /* 7337 * If this is an already active pipe, it's DDB changed, 7338 * and this isn't the last pipe that needs updating 7339 * then we need to wait for a vblank to pass for the 7340 * new ddb allocation to take effect. 7341 */ 7342 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 7343 &old_crtc_state->wm.skl.ddb) && 7344 (update_pipes | modeset_pipes)) 7345 intel_crtc_wait_for_next_vblank(crtc); 7346 } 7347 } 7348 7349 update_pipes = modeset_pipes; 7350 7351 /* 7352 * Enable all pipes that needs a modeset and do not depends on other 7353 * pipes 7354 */ 7355 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7356 enum pipe pipe = crtc->pipe; 7357 7358 if ((modeset_pipes & BIT(pipe)) == 0) 7359 continue; 7360 7361 if (intel_dp_mst_is_slave_trans(new_crtc_state) || 7362 is_trans_port_sync_master(new_crtc_state) || 7363 intel_crtc_is_bigjoiner_master(new_crtc_state)) 7364 continue; 7365 7366 modeset_pipes &= ~BIT(pipe); 7367 7368 intel_enable_crtc(state, crtc); 7369 } 7370 7371 /* 7372 * Then we enable all remaining pipes that depend on other 7373 * pipes: MST slaves and port sync masters, big joiner master 7374 */ 7375 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7376 enum pipe pipe = crtc->pipe; 7377 7378 if ((modeset_pipes & BIT(pipe)) == 0) 7379 continue; 7380 7381 modeset_pipes &= ~BIT(pipe); 7382 7383 intel_enable_crtc(state, crtc); 7384 } 7385 7386 /* 7387 * Finally we do the plane updates/etc. for all pipes that got enabled. 7388 */ 7389 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7390 enum pipe pipe = crtc->pipe; 7391 7392 if ((update_pipes & BIT(pipe)) == 0) 7393 continue; 7394 7395 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 7396 entries, I915_MAX_PIPES, pipe)); 7397 7398 entries[pipe] = new_crtc_state->wm.skl.ddb; 7399 update_pipes &= ~BIT(pipe); 7400 7401 intel_update_crtc(state, crtc); 7402 } 7403 7404 drm_WARN_ON(&dev_priv->drm, modeset_pipes); 7405 drm_WARN_ON(&dev_priv->drm, update_pipes); 7406 } 7407 7408 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) 7409 { 7410 struct intel_atomic_state *state, *next; 7411 struct llist_node *freed; 7412 7413 freed = llist_del_all(&dev_priv->atomic_helper.free_list); 7414 llist_for_each_entry_safe(state, next, freed, freed) 7415 drm_atomic_state_put(&state->base); 7416 } 7417 7418 static void intel_atomic_helper_free_state_worker(struct work_struct *work) 7419 { 7420 struct drm_i915_private *dev_priv = 7421 container_of(work, typeof(*dev_priv), atomic_helper.free_work); 7422 7423 intel_atomic_helper_free_state(dev_priv); 7424 } 7425 7426 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 7427 { 7428 struct wait_queue_entry wait_fence, wait_reset; 7429 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); 7430 7431 init_wait_entry(&wait_fence, 0); 7432 init_wait_entry(&wait_reset, 0); 7433 for (;;) { 7434 prepare_to_wait(&intel_state->commit_ready.wait, 7435 &wait_fence, TASK_UNINTERRUPTIBLE); 7436 prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags, 7437 I915_RESET_MODESET), 7438 &wait_reset, TASK_UNINTERRUPTIBLE); 7439 7440 7441 if (i915_sw_fence_done(&intel_state->commit_ready) || 7442 test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags)) 7443 break; 7444 7445 schedule(); 7446 } 7447 finish_wait(&intel_state->commit_ready.wait, &wait_fence); 7448 finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags, 7449 I915_RESET_MODESET), 7450 &wait_reset); 7451 } 7452 7453 static void intel_cleanup_dsbs(struct intel_atomic_state *state) 7454 { 7455 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 7456 struct intel_crtc *crtc; 7457 int i; 7458 7459 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7460 new_crtc_state, i) 7461 intel_dsb_cleanup(old_crtc_state); 7462 } 7463 7464 static void intel_atomic_cleanup_work(struct work_struct *work) 7465 { 7466 struct intel_atomic_state *state = 7467 container_of(work, struct intel_atomic_state, base.commit_work); 7468 struct drm_i915_private *i915 = to_i915(state->base.dev); 7469 7470 intel_cleanup_dsbs(state); 7471 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base); 7472 drm_atomic_helper_commit_cleanup_done(&state->base); 7473 drm_atomic_state_put(&state->base); 7474 7475 intel_atomic_helper_free_state(i915); 7476 } 7477 7478 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state) 7479 { 7480 struct drm_i915_private *i915 = to_i915(state->base.dev); 7481 struct intel_plane *plane; 7482 struct intel_plane_state *plane_state; 7483 int i; 7484 7485 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 7486 struct drm_framebuffer *fb = plane_state->hw.fb; 7487 int cc_plane; 7488 int ret; 7489 7490 if (!fb) 7491 continue; 7492 7493 cc_plane = intel_fb_rc_ccs_cc_plane(fb); 7494 if (cc_plane < 0) 7495 continue; 7496 7497 /* 7498 * The layout of the fast clear color value expected by HW 7499 * (the DRM ABI requiring this value to be located in fb at 7500 * offset 0 of cc plane, plane #2 previous generations or 7501 * plane #1 for flat ccs): 7502 * - 4 x 4 bytes per-channel value 7503 * (in surface type specific float/int format provided by the fb user) 7504 * - 8 bytes native color value used by the display 7505 * (converted/written by GPU during a fast clear operation using the 7506 * above per-channel values) 7507 * 7508 * The commit's FB prepare hook already ensured that FB obj is pinned and the 7509 * caller made sure that the object is synced wrt. the related color clear value 7510 * GPU write on it. 7511 */ 7512 ret = i915_gem_object_read_from_page(intel_fb_obj(fb), 7513 fb->offsets[cc_plane] + 16, 7514 &plane_state->ccval, 7515 sizeof(plane_state->ccval)); 7516 /* The above could only fail if the FB obj has an unexpected backing store type. */ 7517 drm_WARN_ON(&i915->drm, ret); 7518 } 7519 } 7520 7521 static void intel_atomic_commit_tail(struct intel_atomic_state *state) 7522 { 7523 struct drm_device *dev = state->base.dev; 7524 struct drm_i915_private *dev_priv = to_i915(dev); 7525 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 7526 struct intel_crtc *crtc; 7527 struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {}; 7528 intel_wakeref_t wakeref = 0; 7529 int i; 7530 7531 intel_atomic_commit_fence_wait(state); 7532 7533 drm_atomic_helper_wait_for_dependencies(&state->base); 7534 drm_dp_mst_atomic_wait_for_dependencies(&state->base); 7535 7536 if (state->modeset) 7537 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 7538 7539 intel_atomic_prepare_plane_clear_colors(state); 7540 7541 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7542 new_crtc_state, i) { 7543 if (intel_crtc_needs_modeset(new_crtc_state) || 7544 new_crtc_state->update_pipe) { 7545 intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]); 7546 } 7547 } 7548 7549 intel_commit_modeset_disables(state); 7550 7551 /* FIXME: Eventually get rid of our crtc->config pointer */ 7552 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7553 crtc->config = new_crtc_state; 7554 7555 if (state->modeset) { 7556 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); 7557 7558 intel_set_cdclk_pre_plane_update(state); 7559 7560 intel_modeset_verify_disabled(dev_priv, state); 7561 } 7562 7563 intel_sagv_pre_plane_update(state); 7564 7565 /* Complete the events for pipes that have now been disabled */ 7566 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7567 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 7568 7569 /* Complete events for now disable pipes here. */ 7570 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { 7571 spin_lock_irq(&dev->event_lock); 7572 drm_crtc_send_vblank_event(&crtc->base, 7573 new_crtc_state->uapi.event); 7574 spin_unlock_irq(&dev->event_lock); 7575 7576 new_crtc_state->uapi.event = NULL; 7577 } 7578 } 7579 7580 intel_encoders_update_prepare(state); 7581 7582 intel_dbuf_pre_plane_update(state); 7583 intel_mbus_dbox_update(state); 7584 7585 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7586 if (new_crtc_state->do_async_flip) 7587 intel_crtc_enable_flip_done(state, crtc); 7588 } 7589 7590 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 7591 dev_priv->display->commit_modeset_enables(state); 7592 7593 intel_encoders_update_complete(state); 7594 7595 if (state->modeset) 7596 intel_set_cdclk_post_plane_update(state); 7597 7598 intel_wait_for_vblank_workers(state); 7599 7600 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 7601 * already, but still need the state for the delayed optimization. To 7602 * fix this: 7603 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 7604 * - schedule that vblank worker _before_ calling hw_done 7605 * - at the start of commit_tail, cancel it _synchrously 7606 * - switch over to the vblank wait helper in the core after that since 7607 * we don't need out special handling any more. 7608 */ 7609 drm_atomic_helper_wait_for_flip_done(dev, &state->base); 7610 7611 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7612 if (new_crtc_state->do_async_flip) 7613 intel_crtc_disable_flip_done(state, crtc); 7614 } 7615 7616 /* 7617 * Now that the vblank has passed, we can go ahead and program the 7618 * optimal watermarks on platforms that need two-step watermark 7619 * programming. 7620 * 7621 * TODO: Move this (and other cleanup) to an async worker eventually. 7622 */ 7623 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7624 new_crtc_state, i) { 7625 /* 7626 * Gen2 reports pipe underruns whenever all planes are disabled. 7627 * So re-enable underrun reporting after some planes get enabled. 7628 * 7629 * We do this before .optimize_watermarks() so that we have a 7630 * chance of catching underruns with the intermediate watermarks 7631 * vs. the new plane configuration. 7632 */ 7633 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state)) 7634 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 7635 7636 intel_optimize_watermarks(state, crtc); 7637 } 7638 7639 intel_dbuf_post_plane_update(state); 7640 intel_psr_post_plane_update(state); 7641 7642 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7643 intel_post_plane_update(state, crtc); 7644 7645 intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]); 7646 7647 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); 7648 7649 /* 7650 * DSB cleanup is done in cleanup_work aligning with framebuffer 7651 * cleanup. So copy and reset the dsb structure to sync with 7652 * commit_done and later do dsb cleanup in cleanup_work. 7653 */ 7654 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb); 7655 } 7656 7657 /* Underruns don't always raise interrupts, so check manually */ 7658 intel_check_cpu_fifo_underruns(dev_priv); 7659 intel_check_pch_fifo_underruns(dev_priv); 7660 7661 if (state->modeset) 7662 intel_verify_planes(state); 7663 7664 intel_sagv_post_plane_update(state); 7665 7666 drm_atomic_helper_commit_hw_done(&state->base); 7667 7668 if (state->modeset) { 7669 /* As one of the primary mmio accessors, KMS has a high 7670 * likelihood of triggering bugs in unclaimed access. After we 7671 * finish modesetting, see if an error has been flagged, and if 7672 * so enable debugging for the next modeset - and hope we catch 7673 * the culprit. 7674 */ 7675 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 7676 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); 7677 } 7678 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7679 7680 /* 7681 * Defer the cleanup of the old state to a separate worker to not 7682 * impede the current task (userspace for blocking modesets) that 7683 * are executed inline. For out-of-line asynchronous modesets/flips, 7684 * deferring to a new worker seems overkill, but we would place a 7685 * schedule point (cond_resched()) here anyway to keep latencies 7686 * down. 7687 */ 7688 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work); 7689 queue_work(system_highpri_wq, &state->base.commit_work); 7690 } 7691 7692 static void intel_atomic_commit_work(struct work_struct *work) 7693 { 7694 struct intel_atomic_state *state = 7695 container_of(work, struct intel_atomic_state, base.commit_work); 7696 7697 intel_atomic_commit_tail(state); 7698 } 7699 7700 static int 7701 intel_atomic_commit_ready(struct i915_sw_fence *fence, 7702 enum i915_sw_fence_notify notify) 7703 { 7704 struct intel_atomic_state *state = 7705 container_of(fence, struct intel_atomic_state, commit_ready); 7706 7707 switch (notify) { 7708 case FENCE_COMPLETE: 7709 /* we do blocking waits in the worker, nothing to do here */ 7710 break; 7711 case FENCE_FREE: 7712 { 7713 struct intel_atomic_helper *helper = 7714 &to_i915(state->base.dev)->atomic_helper; 7715 7716 if (llist_add(&state->freed, &helper->free_list)) 7717 schedule_work(&helper->free_work); 7718 break; 7719 } 7720 } 7721 7722 return NOTIFY_DONE; 7723 } 7724 7725 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 7726 { 7727 struct intel_plane_state *old_plane_state, *new_plane_state; 7728 struct intel_plane *plane; 7729 int i; 7730 7731 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 7732 new_plane_state, i) 7733 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 7734 to_intel_frontbuffer(new_plane_state->hw.fb), 7735 plane->frontbuffer_bit); 7736 } 7737 7738 static int intel_atomic_commit(struct drm_device *dev, 7739 struct drm_atomic_state *_state, 7740 bool nonblock) 7741 { 7742 struct intel_atomic_state *state = to_intel_atomic_state(_state); 7743 struct drm_i915_private *dev_priv = to_i915(dev); 7744 int ret = 0; 7745 7746 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 7747 7748 drm_atomic_state_get(&state->base); 7749 i915_sw_fence_init(&state->commit_ready, 7750 intel_atomic_commit_ready); 7751 7752 /* 7753 * The intel_legacy_cursor_update() fast path takes care 7754 * of avoiding the vblank waits for simple cursor 7755 * movement and flips. For cursor on/off and size changes, 7756 * we want to perform the vblank waits so that watermark 7757 * updates happen during the correct frames. Gen9+ have 7758 * double buffered watermarks and so shouldn't need this. 7759 * 7760 * Unset state->legacy_cursor_update before the call to 7761 * drm_atomic_helper_setup_commit() because otherwise 7762 * drm_atomic_helper_wait_for_flip_done() is a noop and 7763 * we get FIFO underruns because we didn't wait 7764 * for vblank. 7765 * 7766 * FIXME doing watermarks and fb cleanup from a vblank worker 7767 * (assuming we had any) would solve these problems. 7768 */ 7769 if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) { 7770 struct intel_crtc_state *new_crtc_state; 7771 struct intel_crtc *crtc; 7772 int i; 7773 7774 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7775 if (new_crtc_state->wm.need_postvbl_update || 7776 new_crtc_state->update_wm_post) 7777 state->base.legacy_cursor_update = false; 7778 } 7779 7780 ret = intel_atomic_prepare_commit(state); 7781 if (ret) { 7782 drm_dbg_atomic(&dev_priv->drm, 7783 "Preparing state failed with %i\n", ret); 7784 i915_sw_fence_commit(&state->commit_ready); 7785 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7786 return ret; 7787 } 7788 7789 ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 7790 if (!ret) 7791 ret = drm_atomic_helper_swap_state(&state->base, true); 7792 if (!ret) 7793 intel_atomic_swap_global_state(state); 7794 7795 if (ret) { 7796 struct intel_crtc_state *new_crtc_state; 7797 struct intel_crtc *crtc; 7798 int i; 7799 7800 i915_sw_fence_commit(&state->commit_ready); 7801 7802 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7803 intel_dsb_cleanup(new_crtc_state); 7804 7805 drm_atomic_helper_cleanup_planes(dev, &state->base); 7806 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7807 return ret; 7808 } 7809 intel_shared_dpll_swap_state(state); 7810 intel_atomic_track_fbs(state); 7811 7812 drm_atomic_state_get(&state->base); 7813 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 7814 7815 i915_sw_fence_commit(&state->commit_ready); 7816 if (nonblock && state->modeset) { 7817 queue_work(dev_priv->modeset_wq, &state->base.commit_work); 7818 } else if (nonblock) { 7819 queue_work(dev_priv->flip_wq, &state->base.commit_work); 7820 } else { 7821 if (state->modeset) 7822 flush_workqueue(dev_priv->modeset_wq); 7823 intel_atomic_commit_tail(state); 7824 } 7825 7826 return 0; 7827 } 7828 7829 /** 7830 * intel_plane_destroy - destroy a plane 7831 * @plane: plane to destroy 7832 * 7833 * Common destruction function for all types of planes (primary, cursor, 7834 * sprite). 7835 */ 7836 void intel_plane_destroy(struct drm_plane *plane) 7837 { 7838 drm_plane_cleanup(plane); 7839 kfree(to_intel_plane(plane)); 7840 } 7841 7842 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv) 7843 { 7844 struct intel_plane *plane; 7845 7846 for_each_intel_plane(&dev_priv->drm, plane) { 7847 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, 7848 plane->pipe); 7849 7850 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base); 7851 } 7852 } 7853 7854 7855 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 7856 struct drm_file *file) 7857 { 7858 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 7859 struct drm_crtc *drmmode_crtc; 7860 struct intel_crtc *crtc; 7861 7862 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); 7863 if (!drmmode_crtc) 7864 return -ENOENT; 7865 7866 crtc = to_intel_crtc(drmmode_crtc); 7867 pipe_from_crtc_id->pipe = crtc->pipe; 7868 7869 return 0; 7870 } 7871 7872 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) 7873 { 7874 struct drm_device *dev = encoder->base.dev; 7875 struct intel_encoder *source_encoder; 7876 u32 possible_clones = 0; 7877 7878 for_each_intel_encoder(dev, source_encoder) { 7879 if (encoders_cloneable(encoder, source_encoder)) 7880 possible_clones |= drm_encoder_mask(&source_encoder->base); 7881 } 7882 7883 return possible_clones; 7884 } 7885 7886 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) 7887 { 7888 struct drm_device *dev = encoder->base.dev; 7889 struct intel_crtc *crtc; 7890 u32 possible_crtcs = 0; 7891 7892 for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask) 7893 possible_crtcs |= drm_crtc_mask(&crtc->base); 7894 7895 return possible_crtcs; 7896 } 7897 7898 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) 7899 { 7900 if (!IS_MOBILE(dev_priv)) 7901 return false; 7902 7903 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0) 7904 return false; 7905 7906 if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE)) 7907 return false; 7908 7909 return true; 7910 } 7911 7912 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) 7913 { 7914 if (DISPLAY_VER(dev_priv) >= 9) 7915 return false; 7916 7917 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) 7918 return false; 7919 7920 if (HAS_PCH_LPT_H(dev_priv) && 7921 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 7922 return false; 7923 7924 /* DDI E can't be used if DDI A requires 4 lanes */ 7925 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 7926 return false; 7927 7928 if (!dev_priv->vbt.int_crt_support) 7929 return false; 7930 7931 return true; 7932 } 7933 7934 static void intel_setup_outputs(struct drm_i915_private *dev_priv) 7935 { 7936 struct intel_encoder *encoder; 7937 bool dpd_is_edp = false; 7938 7939 intel_pps_unlock_regs_wa(dev_priv); 7940 7941 if (!HAS_DISPLAY(dev_priv)) 7942 return; 7943 7944 if (IS_DG2(dev_priv)) { 7945 intel_ddi_init(dev_priv, PORT_A); 7946 intel_ddi_init(dev_priv, PORT_B); 7947 intel_ddi_init(dev_priv, PORT_C); 7948 intel_ddi_init(dev_priv, PORT_D_XELPD); 7949 intel_ddi_init(dev_priv, PORT_TC1); 7950 } else if (IS_ALDERLAKE_P(dev_priv)) { 7951 intel_ddi_init(dev_priv, PORT_A); 7952 intel_ddi_init(dev_priv, PORT_B); 7953 intel_ddi_init(dev_priv, PORT_TC1); 7954 intel_ddi_init(dev_priv, PORT_TC2); 7955 intel_ddi_init(dev_priv, PORT_TC3); 7956 intel_ddi_init(dev_priv, PORT_TC4); 7957 icl_dsi_init(dev_priv); 7958 } else if (IS_ALDERLAKE_S(dev_priv)) { 7959 intel_ddi_init(dev_priv, PORT_A); 7960 intel_ddi_init(dev_priv, PORT_TC1); 7961 intel_ddi_init(dev_priv, PORT_TC2); 7962 intel_ddi_init(dev_priv, PORT_TC3); 7963 intel_ddi_init(dev_priv, PORT_TC4); 7964 } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) { 7965 intel_ddi_init(dev_priv, PORT_A); 7966 intel_ddi_init(dev_priv, PORT_B); 7967 intel_ddi_init(dev_priv, PORT_TC1); 7968 intel_ddi_init(dev_priv, PORT_TC2); 7969 } else if (DISPLAY_VER(dev_priv) >= 12) { 7970 intel_ddi_init(dev_priv, PORT_A); 7971 intel_ddi_init(dev_priv, PORT_B); 7972 intel_ddi_init(dev_priv, PORT_TC1); 7973 intel_ddi_init(dev_priv, PORT_TC2); 7974 intel_ddi_init(dev_priv, PORT_TC3); 7975 intel_ddi_init(dev_priv, PORT_TC4); 7976 intel_ddi_init(dev_priv, PORT_TC5); 7977 intel_ddi_init(dev_priv, PORT_TC6); 7978 icl_dsi_init(dev_priv); 7979 } else if (IS_JSL_EHL(dev_priv)) { 7980 intel_ddi_init(dev_priv, PORT_A); 7981 intel_ddi_init(dev_priv, PORT_B); 7982 intel_ddi_init(dev_priv, PORT_C); 7983 intel_ddi_init(dev_priv, PORT_D); 7984 icl_dsi_init(dev_priv); 7985 } else if (DISPLAY_VER(dev_priv) == 11) { 7986 intel_ddi_init(dev_priv, PORT_A); 7987 intel_ddi_init(dev_priv, PORT_B); 7988 intel_ddi_init(dev_priv, PORT_C); 7989 intel_ddi_init(dev_priv, PORT_D); 7990 intel_ddi_init(dev_priv, PORT_E); 7991 intel_ddi_init(dev_priv, PORT_F); 7992 icl_dsi_init(dev_priv); 7993 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 7994 intel_ddi_init(dev_priv, PORT_A); 7995 intel_ddi_init(dev_priv, PORT_B); 7996 intel_ddi_init(dev_priv, PORT_C); 7997 vlv_dsi_init(dev_priv); 7998 } else if (DISPLAY_VER(dev_priv) >= 9) { 7999 intel_ddi_init(dev_priv, PORT_A); 8000 intel_ddi_init(dev_priv, PORT_B); 8001 intel_ddi_init(dev_priv, PORT_C); 8002 intel_ddi_init(dev_priv, PORT_D); 8003 intel_ddi_init(dev_priv, PORT_E); 8004 } else if (HAS_DDI(dev_priv)) { 8005 u32 found; 8006 8007 if (intel_ddi_crt_present(dev_priv)) 8008 intel_crt_init(dev_priv); 8009 8010 /* Haswell uses DDI functions to detect digital outputs. */ 8011 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 8012 if (found) 8013 intel_ddi_init(dev_priv, PORT_A); 8014 8015 found = intel_de_read(dev_priv, SFUSE_STRAP); 8016 if (found & SFUSE_STRAP_DDIB_DETECTED) 8017 intel_ddi_init(dev_priv, PORT_B); 8018 if (found & SFUSE_STRAP_DDIC_DETECTED) 8019 intel_ddi_init(dev_priv, PORT_C); 8020 if (found & SFUSE_STRAP_DDID_DETECTED) 8021 intel_ddi_init(dev_priv, PORT_D); 8022 if (found & SFUSE_STRAP_DDIF_DETECTED) 8023 intel_ddi_init(dev_priv, PORT_F); 8024 } else if (HAS_PCH_SPLIT(dev_priv)) { 8025 int found; 8026 8027 /* 8028 * intel_edp_init_connector() depends on this completing first, 8029 * to prevent the registration of both eDP and LVDS and the 8030 * incorrect sharing of the PPS. 8031 */ 8032 intel_lvds_init(dev_priv); 8033 intel_crt_init(dev_priv); 8034 8035 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); 8036 8037 if (ilk_has_edp_a(dev_priv)) 8038 g4x_dp_init(dev_priv, DP_A, PORT_A); 8039 8040 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) { 8041 /* PCH SDVOB multiplex with HDMIB */ 8042 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 8043 if (!found) 8044 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 8045 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED)) 8046 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B); 8047 } 8048 8049 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED) 8050 g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 8051 8052 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED) 8053 g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 8054 8055 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED) 8056 g4x_dp_init(dev_priv, PCH_DP_C, PORT_C); 8057 8058 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED) 8059 g4x_dp_init(dev_priv, PCH_DP_D, PORT_D); 8060 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 8061 bool has_edp, has_port; 8062 8063 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support) 8064 intel_crt_init(dev_priv); 8065 8066 /* 8067 * The DP_DETECTED bit is the latched state of the DDC 8068 * SDA pin at boot. However since eDP doesn't require DDC 8069 * (no way to plug in a DP->HDMI dongle) the DDC pins for 8070 * eDP ports may have been muxed to an alternate function. 8071 * Thus we can't rely on the DP_DETECTED bit alone to detect 8072 * eDP ports. Consult the VBT as well as DP_DETECTED to 8073 * detect eDP ports. 8074 * 8075 * Sadly the straps seem to be missing sometimes even for HDMI 8076 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 8077 * and VBT for the presence of the port. Additionally we can't 8078 * trust the port type the VBT declares as we've seen at least 8079 * HDMI ports that the VBT claim are DP or eDP. 8080 */ 8081 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); 8082 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 8083 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port) 8084 has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B); 8085 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 8086 g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 8087 8088 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); 8089 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 8090 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port) 8091 has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C); 8092 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 8093 g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 8094 8095 if (IS_CHERRYVIEW(dev_priv)) { 8096 /* 8097 * eDP not supported on port D, 8098 * so no need to worry about it 8099 */ 8100 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 8101 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port) 8102 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D); 8103 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port) 8104 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 8105 } 8106 8107 vlv_dsi_init(dev_priv); 8108 } else if (IS_PINEVIEW(dev_priv)) { 8109 intel_lvds_init(dev_priv); 8110 intel_crt_init(dev_priv); 8111 } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) { 8112 bool found = false; 8113 8114 if (IS_MOBILE(dev_priv)) 8115 intel_lvds_init(dev_priv); 8116 8117 intel_crt_init(dev_priv); 8118 8119 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 8120 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n"); 8121 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 8122 if (!found && IS_G4X(dev_priv)) { 8123 drm_dbg_kms(&dev_priv->drm, 8124 "probing HDMI on SDVOB\n"); 8125 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 8126 } 8127 8128 if (!found && IS_G4X(dev_priv)) 8129 g4x_dp_init(dev_priv, DP_B, PORT_B); 8130 } 8131 8132 /* Before G4X SDVOC doesn't have its own detect register */ 8133 8134 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 8135 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n"); 8136 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 8137 } 8138 8139 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) { 8140 8141 if (IS_G4X(dev_priv)) { 8142 drm_dbg_kms(&dev_priv->drm, 8143 "probing HDMI on SDVOC\n"); 8144 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 8145 } 8146 if (IS_G4X(dev_priv)) 8147 g4x_dp_init(dev_priv, DP_C, PORT_C); 8148 } 8149 8150 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED)) 8151 g4x_dp_init(dev_priv, DP_D, PORT_D); 8152 8153 if (SUPPORTS_TV(dev_priv)) 8154 intel_tv_init(dev_priv); 8155 } else if (DISPLAY_VER(dev_priv) == 2) { 8156 if (IS_I85X(dev_priv)) 8157 intel_lvds_init(dev_priv); 8158 8159 intel_crt_init(dev_priv); 8160 intel_dvo_init(dev_priv); 8161 } 8162 8163 for_each_intel_encoder(&dev_priv->drm, encoder) { 8164 encoder->base.possible_crtcs = 8165 intel_encoder_possible_crtcs(encoder); 8166 encoder->base.possible_clones = 8167 intel_encoder_possible_clones(encoder); 8168 } 8169 8170 intel_init_pch_refclk(dev_priv); 8171 8172 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 8173 } 8174 8175 static enum drm_mode_status 8176 intel_mode_valid(struct drm_device *dev, 8177 const struct drm_display_mode *mode) 8178 { 8179 struct drm_i915_private *dev_priv = to_i915(dev); 8180 int hdisplay_max, htotal_max; 8181 int vdisplay_max, vtotal_max; 8182 8183 /* 8184 * Can't reject DBLSCAN here because Xorg ddxen can add piles 8185 * of DBLSCAN modes to the output's mode list when they detect 8186 * the scaling mode property on the connector. And they don't 8187 * ask the kernel to validate those modes in any way until 8188 * modeset time at which point the client gets a protocol error. 8189 * So in order to not upset those clients we silently ignore the 8190 * DBLSCAN flag on such connectors. For other connectors we will 8191 * reject modes with the DBLSCAN flag in encoder->compute_config(). 8192 * And we always reject DBLSCAN modes in connector->mode_valid() 8193 * as we never want such modes on the connector's mode list. 8194 */ 8195 8196 if (mode->vscan > 1) 8197 return MODE_NO_VSCAN; 8198 8199 if (mode->flags & DRM_MODE_FLAG_HSKEW) 8200 return MODE_H_ILLEGAL; 8201 8202 if (mode->flags & (DRM_MODE_FLAG_CSYNC | 8203 DRM_MODE_FLAG_NCSYNC | 8204 DRM_MODE_FLAG_PCSYNC)) 8205 return MODE_HSYNC; 8206 8207 if (mode->flags & (DRM_MODE_FLAG_BCAST | 8208 DRM_MODE_FLAG_PIXMUX | 8209 DRM_MODE_FLAG_CLKDIV2)) 8210 return MODE_BAD; 8211 8212 /* Transcoder timing limits */ 8213 if (DISPLAY_VER(dev_priv) >= 11) { 8214 hdisplay_max = 16384; 8215 vdisplay_max = 8192; 8216 htotal_max = 16384; 8217 vtotal_max = 8192; 8218 } else if (DISPLAY_VER(dev_priv) >= 9 || 8219 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 8220 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 8221 vdisplay_max = 4096; 8222 htotal_max = 8192; 8223 vtotal_max = 8192; 8224 } else if (DISPLAY_VER(dev_priv) >= 3) { 8225 hdisplay_max = 4096; 8226 vdisplay_max = 4096; 8227 htotal_max = 8192; 8228 vtotal_max = 8192; 8229 } else { 8230 hdisplay_max = 2048; 8231 vdisplay_max = 2048; 8232 htotal_max = 4096; 8233 vtotal_max = 4096; 8234 } 8235 8236 if (mode->hdisplay > hdisplay_max || 8237 mode->hsync_start > htotal_max || 8238 mode->hsync_end > htotal_max || 8239 mode->htotal > htotal_max) 8240 return MODE_H_ILLEGAL; 8241 8242 if (mode->vdisplay > vdisplay_max || 8243 mode->vsync_start > vtotal_max || 8244 mode->vsync_end > vtotal_max || 8245 mode->vtotal > vtotal_max) 8246 return MODE_V_ILLEGAL; 8247 8248 if (DISPLAY_VER(dev_priv) >= 5) { 8249 if (mode->hdisplay < 64 || 8250 mode->htotal - mode->hdisplay < 32) 8251 return MODE_H_ILLEGAL; 8252 8253 if (mode->vtotal - mode->vdisplay < 5) 8254 return MODE_V_ILLEGAL; 8255 } else { 8256 if (mode->htotal - mode->hdisplay < 32) 8257 return MODE_H_ILLEGAL; 8258 8259 if (mode->vtotal - mode->vdisplay < 3) 8260 return MODE_V_ILLEGAL; 8261 } 8262 8263 /* 8264 * Cantiga+ cannot handle modes with a hsync front porch of 0. 8265 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 8266 */ 8267 if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) && 8268 mode->hsync_start == mode->hdisplay) 8269 return MODE_H_ILLEGAL; 8270 8271 return MODE_OK; 8272 } 8273 8274 enum drm_mode_status 8275 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, 8276 const struct drm_display_mode *mode, 8277 bool bigjoiner) 8278 { 8279 int plane_width_max, plane_height_max; 8280 8281 /* 8282 * intel_mode_valid() should be 8283 * sufficient on older platforms. 8284 */ 8285 if (DISPLAY_VER(dev_priv) < 9) 8286 return MODE_OK; 8287 8288 /* 8289 * Most people will probably want a fullscreen 8290 * plane so let's not advertize modes that are 8291 * too big for that. 8292 */ 8293 if (DISPLAY_VER(dev_priv) >= 11) { 8294 plane_width_max = 5120 << bigjoiner; 8295 plane_height_max = 4320; 8296 } else { 8297 plane_width_max = 5120; 8298 plane_height_max = 4096; 8299 } 8300 8301 if (mode->hdisplay > plane_width_max) 8302 return MODE_H_ILLEGAL; 8303 8304 if (mode->vdisplay > plane_height_max) 8305 return MODE_V_ILLEGAL; 8306 8307 return MODE_OK; 8308 } 8309 8310 static const struct drm_mode_config_funcs intel_mode_funcs = { 8311 .fb_create = intel_user_framebuffer_create, 8312 .get_format_info = intel_fb_get_format_info, 8313 .output_poll_changed = intel_fbdev_output_poll_changed, 8314 .mode_valid = intel_mode_valid, 8315 .atomic_check = intel_atomic_check, 8316 .atomic_commit = intel_atomic_commit, 8317 .atomic_state_alloc = intel_atomic_state_alloc, 8318 .atomic_state_clear = intel_atomic_state_clear, 8319 .atomic_state_free = intel_atomic_state_free, 8320 }; 8321 8322 static const struct drm_i915_display_funcs skl_display_funcs = { 8323 .get_pipe_config = hsw_get_pipe_config, 8324 .crtc_enable = hsw_crtc_enable, 8325 .crtc_disable = hsw_crtc_disable, 8326 .commit_modeset_enables = skl_commit_modeset_enables, 8327 .get_initial_plane_config = skl_get_initial_plane_config, 8328 }; 8329 8330 static const struct drm_i915_display_funcs ddi_display_funcs = { 8331 .get_pipe_config = hsw_get_pipe_config, 8332 .crtc_enable = hsw_crtc_enable, 8333 .crtc_disable = hsw_crtc_disable, 8334 .commit_modeset_enables = intel_commit_modeset_enables, 8335 .get_initial_plane_config = i9xx_get_initial_plane_config, 8336 }; 8337 8338 static const struct drm_i915_display_funcs pch_split_display_funcs = { 8339 .get_pipe_config = ilk_get_pipe_config, 8340 .crtc_enable = ilk_crtc_enable, 8341 .crtc_disable = ilk_crtc_disable, 8342 .commit_modeset_enables = intel_commit_modeset_enables, 8343 .get_initial_plane_config = i9xx_get_initial_plane_config, 8344 }; 8345 8346 static const struct drm_i915_display_funcs vlv_display_funcs = { 8347 .get_pipe_config = i9xx_get_pipe_config, 8348 .crtc_enable = valleyview_crtc_enable, 8349 .crtc_disable = i9xx_crtc_disable, 8350 .commit_modeset_enables = intel_commit_modeset_enables, 8351 .get_initial_plane_config = i9xx_get_initial_plane_config, 8352 }; 8353 8354 static const struct drm_i915_display_funcs i9xx_display_funcs = { 8355 .get_pipe_config = i9xx_get_pipe_config, 8356 .crtc_enable = i9xx_crtc_enable, 8357 .crtc_disable = i9xx_crtc_disable, 8358 .commit_modeset_enables = intel_commit_modeset_enables, 8359 .get_initial_plane_config = i9xx_get_initial_plane_config, 8360 }; 8361 8362 /** 8363 * intel_init_display_hooks - initialize the display modesetting hooks 8364 * @dev_priv: device private 8365 */ 8366 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 8367 { 8368 if (!HAS_DISPLAY(dev_priv)) 8369 return; 8370 8371 intel_init_cdclk_hooks(dev_priv); 8372 intel_audio_hooks_init(dev_priv); 8373 8374 intel_dpll_init_clock_hook(dev_priv); 8375 8376 if (DISPLAY_VER(dev_priv) >= 9) { 8377 dev_priv->display = &skl_display_funcs; 8378 } else if (HAS_DDI(dev_priv)) { 8379 dev_priv->display = &ddi_display_funcs; 8380 } else if (HAS_PCH_SPLIT(dev_priv)) { 8381 dev_priv->display = &pch_split_display_funcs; 8382 } else if (IS_CHERRYVIEW(dev_priv) || 8383 IS_VALLEYVIEW(dev_priv)) { 8384 dev_priv->display = &vlv_display_funcs; 8385 } else { 8386 dev_priv->display = &i9xx_display_funcs; 8387 } 8388 8389 intel_fdi_init_hook(dev_priv); 8390 } 8391 8392 void intel_modeset_init_hw(struct drm_i915_private *i915) 8393 { 8394 struct intel_cdclk_state *cdclk_state; 8395 8396 if (!HAS_DISPLAY(i915)) 8397 return; 8398 8399 cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state); 8400 8401 intel_update_cdclk(i915); 8402 intel_cdclk_dump_config(i915, &i915->cdclk.hw, "Current CDCLK"); 8403 cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw; 8404 } 8405 8406 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state) 8407 { 8408 struct drm_plane *plane; 8409 struct intel_crtc *crtc; 8410 8411 for_each_intel_crtc(state->dev, crtc) { 8412 struct intel_crtc_state *crtc_state; 8413 8414 crtc_state = intel_atomic_get_crtc_state(state, crtc); 8415 if (IS_ERR(crtc_state)) 8416 return PTR_ERR(crtc_state); 8417 8418 if (crtc_state->hw.active) { 8419 /* 8420 * Preserve the inherited flag to avoid 8421 * taking the full modeset path. 8422 */ 8423 crtc_state->inherited = true; 8424 } 8425 } 8426 8427 drm_for_each_plane(plane, state->dev) { 8428 struct drm_plane_state *plane_state; 8429 8430 plane_state = drm_atomic_get_plane_state(state, plane); 8431 if (IS_ERR(plane_state)) 8432 return PTR_ERR(plane_state); 8433 } 8434 8435 return 0; 8436 } 8437 8438 /* 8439 * Calculate what we think the watermarks should be for the state we've read 8440 * out of the hardware and then immediately program those watermarks so that 8441 * we ensure the hardware settings match our internal state. 8442 * 8443 * We can calculate what we think WM's should be by creating a duplicate of the 8444 * current state (which was constructed during hardware readout) and running it 8445 * through the atomic check code to calculate new watermark values in the 8446 * state object. 8447 */ 8448 static void sanitize_watermarks(struct drm_i915_private *dev_priv) 8449 { 8450 struct drm_atomic_state *state; 8451 struct intel_atomic_state *intel_state; 8452 struct intel_crtc *crtc; 8453 struct intel_crtc_state *crtc_state; 8454 struct drm_modeset_acquire_ctx ctx; 8455 int ret; 8456 int i; 8457 8458 /* Only supported on platforms that use atomic watermark design */ 8459 if (!dev_priv->wm_disp->optimize_watermarks) 8460 return; 8461 8462 state = drm_atomic_state_alloc(&dev_priv->drm); 8463 if (drm_WARN_ON(&dev_priv->drm, !state)) 8464 return; 8465 8466 intel_state = to_intel_atomic_state(state); 8467 8468 drm_modeset_acquire_init(&ctx, 0); 8469 8470 retry: 8471 state->acquire_ctx = &ctx; 8472 8473 /* 8474 * Hardware readout is the only time we don't want to calculate 8475 * intermediate watermarks (since we don't trust the current 8476 * watermarks). 8477 */ 8478 if (!HAS_GMCH(dev_priv)) 8479 intel_state->skip_intermediate_wm = true; 8480 8481 ret = sanitize_watermarks_add_affected(state); 8482 if (ret) 8483 goto fail; 8484 8485 ret = intel_atomic_check(&dev_priv->drm, state); 8486 if (ret) 8487 goto fail; 8488 8489 /* Write calculated watermark values back */ 8490 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { 8491 crtc_state->wm.need_postvbl_update = true; 8492 intel_optimize_watermarks(intel_state, crtc); 8493 8494 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; 8495 } 8496 8497 fail: 8498 if (ret == -EDEADLK) { 8499 drm_atomic_state_clear(state); 8500 drm_modeset_backoff(&ctx); 8501 goto retry; 8502 } 8503 8504 /* 8505 * If we fail here, it means that the hardware appears to be 8506 * programmed in a way that shouldn't be possible, given our 8507 * understanding of watermark requirements. This might mean a 8508 * mistake in the hardware readout code or a mistake in the 8509 * watermark calculations for a given platform. Raise a WARN 8510 * so that this is noticeable. 8511 * 8512 * If this actually happens, we'll have to just leave the 8513 * BIOS-programmed watermarks untouched and hope for the best. 8514 */ 8515 drm_WARN(&dev_priv->drm, ret, 8516 "Could not determine valid watermarks for inherited state\n"); 8517 8518 drm_atomic_state_put(state); 8519 8520 drm_modeset_drop_locks(&ctx); 8521 drm_modeset_acquire_fini(&ctx); 8522 } 8523 8524 static int intel_initial_commit(struct drm_device *dev) 8525 { 8526 struct drm_atomic_state *state = NULL; 8527 struct drm_modeset_acquire_ctx ctx; 8528 struct intel_crtc *crtc; 8529 int ret = 0; 8530 8531 state = drm_atomic_state_alloc(dev); 8532 if (!state) 8533 return -ENOMEM; 8534 8535 drm_modeset_acquire_init(&ctx, 0); 8536 8537 retry: 8538 state->acquire_ctx = &ctx; 8539 8540 for_each_intel_crtc(dev, crtc) { 8541 struct intel_crtc_state *crtc_state = 8542 intel_atomic_get_crtc_state(state, crtc); 8543 8544 if (IS_ERR(crtc_state)) { 8545 ret = PTR_ERR(crtc_state); 8546 goto out; 8547 } 8548 8549 if (crtc_state->hw.active) { 8550 struct intel_encoder *encoder; 8551 8552 /* 8553 * We've not yet detected sink capabilities 8554 * (audio,infoframes,etc.) and thus we don't want to 8555 * force a full state recomputation yet. We want that to 8556 * happen only for the first real commit from userspace. 8557 * So preserve the inherited flag for the time being. 8558 */ 8559 crtc_state->inherited = true; 8560 8561 ret = drm_atomic_add_affected_planes(state, &crtc->base); 8562 if (ret) 8563 goto out; 8564 8565 /* 8566 * FIXME hack to force a LUT update to avoid the 8567 * plane update forcing the pipe gamma on without 8568 * having a proper LUT loaded. Remove once we 8569 * have readout for pipe gamma enable. 8570 */ 8571 crtc_state->uapi.color_mgmt_changed = true; 8572 8573 for_each_intel_encoder_mask(dev, encoder, 8574 crtc_state->uapi.encoder_mask) { 8575 if (encoder->initial_fastset_check && 8576 !encoder->initial_fastset_check(encoder, crtc_state)) { 8577 ret = drm_atomic_add_affected_connectors(state, 8578 &crtc->base); 8579 if (ret) 8580 goto out; 8581 } 8582 } 8583 } 8584 } 8585 8586 ret = drm_atomic_commit(state); 8587 8588 out: 8589 if (ret == -EDEADLK) { 8590 drm_atomic_state_clear(state); 8591 drm_modeset_backoff(&ctx); 8592 goto retry; 8593 } 8594 8595 drm_atomic_state_put(state); 8596 8597 drm_modeset_drop_locks(&ctx); 8598 drm_modeset_acquire_fini(&ctx); 8599 8600 return ret; 8601 } 8602 8603 static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = { 8604 .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, 8605 }; 8606 8607 static void intel_mode_config_init(struct drm_i915_private *i915) 8608 { 8609 struct drm_mode_config *mode_config = &i915->drm.mode_config; 8610 8611 drm_mode_config_init(&i915->drm); 8612 INIT_LIST_HEAD(&i915->global_obj_list); 8613 8614 mode_config->min_width = 0; 8615 mode_config->min_height = 0; 8616 8617 mode_config->preferred_depth = 24; 8618 mode_config->prefer_shadow = 1; 8619 8620 mode_config->funcs = &intel_mode_funcs; 8621 mode_config->helper_private = &intel_mode_config_funcs; 8622 8623 mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915); 8624 8625 /* 8626 * Maximum framebuffer dimensions, chosen to match 8627 * the maximum render engine surface size on gen4+. 8628 */ 8629 if (DISPLAY_VER(i915) >= 7) { 8630 mode_config->max_width = 16384; 8631 mode_config->max_height = 16384; 8632 } else if (DISPLAY_VER(i915) >= 4) { 8633 mode_config->max_width = 8192; 8634 mode_config->max_height = 8192; 8635 } else if (DISPLAY_VER(i915) == 3) { 8636 mode_config->max_width = 4096; 8637 mode_config->max_height = 4096; 8638 } else { 8639 mode_config->max_width = 2048; 8640 mode_config->max_height = 2048; 8641 } 8642 8643 if (IS_I845G(i915) || IS_I865G(i915)) { 8644 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512; 8645 mode_config->cursor_height = 1023; 8646 } else if (IS_I830(i915) || IS_I85X(i915) || 8647 IS_I915G(i915) || IS_I915GM(i915)) { 8648 mode_config->cursor_width = 64; 8649 mode_config->cursor_height = 64; 8650 } else { 8651 mode_config->cursor_width = 256; 8652 mode_config->cursor_height = 256; 8653 } 8654 } 8655 8656 static void intel_mode_config_cleanup(struct drm_i915_private *i915) 8657 { 8658 intel_atomic_global_obj_cleanup(i915); 8659 drm_mode_config_cleanup(&i915->drm); 8660 } 8661 8662 /* part #1: call before irq install */ 8663 int intel_modeset_init_noirq(struct drm_i915_private *i915) 8664 { 8665 int ret; 8666 8667 if (i915_inject_probe_failure(i915)) 8668 return -ENODEV; 8669 8670 if (HAS_DISPLAY(i915)) { 8671 ret = drm_vblank_init(&i915->drm, 8672 INTEL_NUM_PIPES(i915)); 8673 if (ret) 8674 return ret; 8675 } 8676 8677 intel_bios_init(i915); 8678 8679 ret = intel_vga_register(i915); 8680 if (ret) 8681 goto cleanup_bios; 8682 8683 /* FIXME: completely on the wrong abstraction layer */ 8684 intel_power_domains_init_hw(i915, false); 8685 8686 if (!HAS_DISPLAY(i915)) 8687 return 0; 8688 8689 intel_dmc_ucode_init(i915); 8690 8691 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); 8692 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI | 8693 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); 8694 8695 i915->window2_delay = 0; /* No DSB so no window2 delay */ 8696 8697 intel_mode_config_init(i915); 8698 8699 ret = intel_cdclk_init(i915); 8700 if (ret) 8701 goto cleanup_vga_client_pw_domain_dmc; 8702 8703 ret = intel_dbuf_init(i915); 8704 if (ret) 8705 goto cleanup_vga_client_pw_domain_dmc; 8706 8707 ret = intel_bw_init(i915); 8708 if (ret) 8709 goto cleanup_vga_client_pw_domain_dmc; 8710 8711 init_llist_head(&i915->atomic_helper.free_list); 8712 INIT_WORK(&i915->atomic_helper.free_work, 8713 intel_atomic_helper_free_state_worker); 8714 8715 intel_init_quirks(i915); 8716 8717 intel_fbc_init(i915); 8718 8719 return 0; 8720 8721 cleanup_vga_client_pw_domain_dmc: 8722 intel_dmc_ucode_fini(i915); 8723 intel_power_domains_driver_remove(i915); 8724 intel_vga_unregister(i915); 8725 cleanup_bios: 8726 intel_bios_driver_remove(i915); 8727 8728 return ret; 8729 } 8730 8731 /* part #2: call after irq install, but before gem init */ 8732 int intel_modeset_init_nogem(struct drm_i915_private *i915) 8733 { 8734 struct drm_device *dev = &i915->drm; 8735 enum pipe pipe; 8736 struct intel_crtc *crtc; 8737 int ret; 8738 8739 if (!HAS_DISPLAY(i915)) 8740 return 0; 8741 8742 intel_init_pm(i915); 8743 8744 intel_panel_sanitize_ssc(i915); 8745 8746 intel_pps_setup(i915); 8747 8748 intel_gmbus_setup(i915); 8749 8750 drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n", 8751 INTEL_NUM_PIPES(i915), 8752 INTEL_NUM_PIPES(i915) > 1 ? "s" : ""); 8753 8754 for_each_pipe(i915, pipe) { 8755 ret = intel_crtc_init(i915, pipe); 8756 if (ret) { 8757 intel_mode_config_cleanup(i915); 8758 return ret; 8759 } 8760 } 8761 8762 intel_plane_possible_crtcs_init(i915); 8763 intel_shared_dpll_init(i915); 8764 intel_fdi_pll_freq_update(i915); 8765 8766 intel_update_czclk(i915); 8767 intel_modeset_init_hw(i915); 8768 intel_dpll_update_ref_clks(i915); 8769 8770 intel_hdcp_component_init(i915); 8771 8772 if (i915->max_cdclk_freq == 0) 8773 intel_update_max_cdclk(i915); 8774 8775 /* 8776 * If the platform has HTI, we need to find out whether it has reserved 8777 * any display resources before we create our display outputs. 8778 */ 8779 if (INTEL_INFO(i915)->display.has_hti) 8780 i915->hti_state = intel_de_read(i915, HDPORT_STATE); 8781 8782 /* Just disable it once at startup */ 8783 intel_vga_disable(i915); 8784 intel_setup_outputs(i915); 8785 8786 drm_modeset_lock_all(dev); 8787 intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx); 8788 intel_acpi_assign_connector_fwnodes(i915); 8789 drm_modeset_unlock_all(dev); 8790 8791 for_each_intel_crtc(dev, crtc) { 8792 if (!to_intel_crtc_state(crtc->base.state)->uapi.active) 8793 continue; 8794 intel_crtc_initial_plane_config(crtc); 8795 } 8796 8797 /* 8798 * Make sure hardware watermarks really match the state we read out. 8799 * Note that we need to do this after reconstructing the BIOS fb's 8800 * since the watermark calculation done here will use pstate->fb. 8801 */ 8802 if (!HAS_GMCH(i915)) 8803 sanitize_watermarks(i915); 8804 8805 return 0; 8806 } 8807 8808 /* part #3: call after gem init */ 8809 int intel_modeset_init(struct drm_i915_private *i915) 8810 { 8811 int ret; 8812 8813 if (!HAS_DISPLAY(i915)) 8814 return 0; 8815 8816 /* 8817 * Force all active planes to recompute their states. So that on 8818 * mode_setcrtc after probe, all the intel_plane_state variables 8819 * are already calculated and there is no assert_plane warnings 8820 * during bootup. 8821 */ 8822 ret = intel_initial_commit(&i915->drm); 8823 if (ret) 8824 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret); 8825 8826 intel_overlay_setup(i915); 8827 8828 ret = intel_fbdev_init(&i915->drm); 8829 if (ret) 8830 return ret; 8831 8832 /* Only enable hotplug handling once the fbdev is fully set up. */ 8833 intel_hpd_init(i915); 8834 intel_hpd_poll_disable(i915); 8835 8836 intel_init_ipc(i915); 8837 8838 return 0; 8839 } 8840 8841 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 8842 { 8843 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 8844 /* 640x480@60Hz, ~25175 kHz */ 8845 struct dpll clock = { 8846 .m1 = 18, 8847 .m2 = 7, 8848 .p1 = 13, 8849 .p2 = 4, 8850 .n = 2, 8851 }; 8852 u32 dpll, fp; 8853 int i; 8854 8855 drm_WARN_ON(&dev_priv->drm, 8856 i9xx_calc_dpll_params(48000, &clock) != 25154); 8857 8858 drm_dbg_kms(&dev_priv->drm, 8859 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 8860 pipe_name(pipe), clock.vco, clock.dot); 8861 8862 fp = i9xx_dpll_compute_fp(&clock); 8863 dpll = DPLL_DVO_2X_MODE | 8864 DPLL_VGA_MODE_DIS | 8865 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 8866 PLL_P2_DIVIDE_BY_4 | 8867 PLL_REF_INPUT_DREFCLK | 8868 DPLL_VCO_ENABLE; 8869 8870 intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); 8871 intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); 8872 intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); 8873 intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16)); 8874 intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16)); 8875 intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); 8876 intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); 8877 8878 intel_de_write(dev_priv, FP0(pipe), fp); 8879 intel_de_write(dev_priv, FP1(pipe), fp); 8880 8881 /* 8882 * Apparently we need to have VGA mode enabled prior to changing 8883 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 8884 * dividers, even though the register value does change. 8885 */ 8886 intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); 8887 intel_de_write(dev_priv, DPLL(pipe), dpll); 8888 8889 /* Wait for the clocks to stabilize. */ 8890 intel_de_posting_read(dev_priv, DPLL(pipe)); 8891 udelay(150); 8892 8893 /* The pixel multiplier can only be updated once the 8894 * DPLL is enabled and the clocks are stable. 8895 * 8896 * So write it again. 8897 */ 8898 intel_de_write(dev_priv, DPLL(pipe), dpll); 8899 8900 /* We do this three times for luck */ 8901 for (i = 0; i < 3 ; i++) { 8902 intel_de_write(dev_priv, DPLL(pipe), dpll); 8903 intel_de_posting_read(dev_priv, DPLL(pipe)); 8904 udelay(150); /* wait for warmup */ 8905 } 8906 8907 intel_de_write(dev_priv, PIPECONF(pipe), PIPECONF_ENABLE); 8908 intel_de_posting_read(dev_priv, PIPECONF(pipe)); 8909 8910 intel_wait_for_pipe_scanline_moving(crtc); 8911 } 8912 8913 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 8914 { 8915 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 8916 8917 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n", 8918 pipe_name(pipe)); 8919 8920 drm_WARN_ON(&dev_priv->drm, 8921 intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE); 8922 drm_WARN_ON(&dev_priv->drm, 8923 intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE); 8924 drm_WARN_ON(&dev_priv->drm, 8925 intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE); 8926 drm_WARN_ON(&dev_priv->drm, 8927 intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK); 8928 drm_WARN_ON(&dev_priv->drm, 8929 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK); 8930 8931 intel_de_write(dev_priv, PIPECONF(pipe), 0); 8932 intel_de_posting_read(dev_priv, PIPECONF(pipe)); 8933 8934 intel_wait_for_pipe_scanline_stopped(crtc); 8935 8936 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS); 8937 intel_de_posting_read(dev_priv, DPLL(pipe)); 8938 } 8939 8940 void intel_display_resume(struct drm_device *dev) 8941 { 8942 struct drm_i915_private *i915 = to_i915(dev); 8943 struct drm_atomic_state *state = i915->modeset_restore_state; 8944 struct drm_modeset_acquire_ctx ctx; 8945 int ret; 8946 8947 if (!HAS_DISPLAY(i915)) 8948 return; 8949 8950 i915->modeset_restore_state = NULL; 8951 if (state) 8952 state->acquire_ctx = &ctx; 8953 8954 drm_modeset_acquire_init(&ctx, 0); 8955 8956 while (1) { 8957 ret = drm_modeset_lock_all_ctx(dev, &ctx); 8958 if (ret != -EDEADLK) 8959 break; 8960 8961 drm_modeset_backoff(&ctx); 8962 } 8963 8964 if (!ret) 8965 ret = __intel_display_resume(i915, state, &ctx); 8966 8967 intel_enable_ipc(i915); 8968 drm_modeset_drop_locks(&ctx); 8969 drm_modeset_acquire_fini(&ctx); 8970 8971 if (ret) 8972 drm_err(&i915->drm, 8973 "Restoring old state failed with %i\n", ret); 8974 if (state) 8975 drm_atomic_state_put(state); 8976 } 8977 8978 static void intel_hpd_poll_fini(struct drm_i915_private *i915) 8979 { 8980 struct intel_connector *connector; 8981 struct drm_connector_list_iter conn_iter; 8982 8983 /* Kill all the work that may have been queued by hpd. */ 8984 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 8985 for_each_intel_connector_iter(connector, &conn_iter) { 8986 if (connector->modeset_retry_work.func) 8987 cancel_work_sync(&connector->modeset_retry_work); 8988 if (connector->hdcp.shim) { 8989 cancel_delayed_work_sync(&connector->hdcp.check_work); 8990 cancel_work_sync(&connector->hdcp.prop_work); 8991 } 8992 } 8993 drm_connector_list_iter_end(&conn_iter); 8994 } 8995 8996 /* part #1: call before irq uninstall */ 8997 void intel_modeset_driver_remove(struct drm_i915_private *i915) 8998 { 8999 if (!HAS_DISPLAY(i915)) 9000 return; 9001 9002 flush_workqueue(i915->flip_wq); 9003 flush_workqueue(i915->modeset_wq); 9004 9005 flush_work(&i915->atomic_helper.free_work); 9006 drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list)); 9007 } 9008 9009 /* part #2: call after irq uninstall */ 9010 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) 9011 { 9012 if (!HAS_DISPLAY(i915)) 9013 return; 9014 9015 /* 9016 * Due to the hpd irq storm handling the hotplug work can re-arm the 9017 * poll handlers. Hence disable polling after hpd handling is shut down. 9018 */ 9019 intel_hpd_poll_fini(i915); 9020 9021 /* 9022 * MST topology needs to be suspended so we don't have any calls to 9023 * fbdev after it's finalized. MST will be destroyed later as part of 9024 * drm_mode_config_cleanup() 9025 */ 9026 intel_dp_mst_suspend(i915); 9027 9028 /* poll work can call into fbdev, hence clean that up afterwards */ 9029 intel_fbdev_fini(i915); 9030 9031 intel_unregister_dsm_handler(); 9032 9033 /* flush any delayed tasks or pending work */ 9034 flush_scheduled_work(); 9035 9036 intel_hdcp_component_fini(i915); 9037 9038 intel_mode_config_cleanup(i915); 9039 9040 intel_overlay_cleanup(i915); 9041 9042 intel_gmbus_teardown(i915); 9043 9044 destroy_workqueue(i915->flip_wq); 9045 destroy_workqueue(i915->modeset_wq); 9046 9047 intel_fbc_cleanup(i915); 9048 } 9049 9050 /* part #3: call after gem init */ 9051 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915) 9052 { 9053 intel_dmc_ucode_fini(i915); 9054 9055 intel_power_domains_driver_remove(i915); 9056 9057 intel_vga_unregister(i915); 9058 9059 intel_bios_driver_remove(i915); 9060 } 9061 9062 bool intel_modeset_probe_defer(struct pci_dev *pdev) 9063 { 9064 struct drm_privacy_screen *privacy_screen; 9065 9066 /* 9067 * apple-gmux is needed on dual GPU MacBook Pro 9068 * to probe the panel if we're the inactive GPU. 9069 */ 9070 if (vga_switcheroo_client_probe_defer(pdev)) 9071 return true; 9072 9073 /* If the LCD panel has a privacy-screen, wait for it */ 9074 privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL); 9075 if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER) 9076 return true; 9077 9078 drm_privacy_screen_put(privacy_screen); 9079 9080 return false; 9081 } 9082 9083 void intel_display_driver_register(struct drm_i915_private *i915) 9084 { 9085 if (!HAS_DISPLAY(i915)) 9086 return; 9087 9088 intel_display_debugfs_register(i915); 9089 9090 /* Must be done after probing outputs */ 9091 intel_opregion_register(i915); 9092 intel_acpi_video_register(i915); 9093 9094 intel_audio_init(i915); 9095 9096 /* 9097 * Some ports require correctly set-up hpd registers for 9098 * detection to work properly (leading to ghost connected 9099 * connector status), e.g. VGA on gm45. Hence we can only set 9100 * up the initial fbdev config after hpd irqs are fully 9101 * enabled. We do it last so that the async config cannot run 9102 * before the connectors are registered. 9103 */ 9104 intel_fbdev_initial_config_async(&i915->drm); 9105 9106 /* 9107 * We need to coordinate the hotplugs with the asynchronous 9108 * fbdev configuration, for which we use the 9109 * fbdev->async_cookie. 9110 */ 9111 drm_kms_helper_poll_init(&i915->drm); 9112 } 9113 9114 void intel_display_driver_unregister(struct drm_i915_private *i915) 9115 { 9116 if (!HAS_DISPLAY(i915)) 9117 return; 9118 9119 intel_fbdev_unregister(i915); 9120 intel_audio_deinit(i915); 9121 9122 /* 9123 * After flushing the fbdev (incl. a late async config which 9124 * will have delayed queuing of a hotplug event), then flush 9125 * the hotplug events. 9126 */ 9127 drm_kms_helper_poll_fini(&i915->drm); 9128 drm_atomic_helper_shutdown(&i915->drm); 9129 9130 acpi_video_unregister(); 9131 intel_opregion_unregister(i915); 9132 } 9133 9134 bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915) 9135 { 9136 return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915); 9137 } 9138