1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/i2c.h> 28 #include <linux/input.h> 29 #include <linux/intel-iommu.h> 30 #include <linux/kernel.h> 31 #include <linux/module.h> 32 #include <linux/reservation.h> 33 #include <linux/slab.h> 34 #include <linux/vgaarb.h> 35 36 #include <drm/drm_atomic.h> 37 #include <drm/drm_atomic_helper.h> 38 #include <drm/drm_atomic_uapi.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_edid.h> 41 #include <drm/drm_fourcc.h> 42 #include <drm/drm_plane_helper.h> 43 #include <drm/drm_probe_helper.h> 44 #include <drm/drm_rect.h> 45 #include <drm/i915_drm.h> 46 47 #include "display/intel_crt.h" 48 #include "display/intel_ddi.h" 49 #include "display/intel_dp.h" 50 #include "display/intel_dsi.h" 51 #include "display/intel_dvo.h" 52 #include "display/intel_gmbus.h" 53 #include "display/intel_hdmi.h" 54 #include "display/intel_lvds.h" 55 #include "display/intel_sdvo.h" 56 #include "display/intel_tv.h" 57 #include "display/intel_vdsc.h" 58 59 #include "i915_drv.h" 60 #include "i915_trace.h" 61 #include "intel_acpi.h" 62 #include "intel_atomic.h" 63 #include "intel_atomic_plane.h" 64 #include "intel_bw.h" 65 #include "intel_color.h" 66 #include "intel_cdclk.h" 67 #include "intel_drv.h" 68 #include "intel_fbc.h" 69 #include "intel_fbdev.h" 70 #include "intel_fifo_underrun.h" 71 #include "intel_frontbuffer.h" 72 #include "intel_hdcp.h" 73 #include "intel_hotplug.h" 74 #include "intel_overlay.h" 75 #include "intel_pipe_crc.h" 76 #include "intel_pm.h" 77 #include "intel_psr.h" 78 #include "intel_quirks.h" 79 #include "intel_sideband.h" 80 #include "intel_sprite.h" 81 #include "intel_tc.h" 82 83 /* Primary plane formats for gen <= 3 */ 84 static const u32 i8xx_primary_formats[] = { 85 DRM_FORMAT_C8, 86 DRM_FORMAT_RGB565, 87 DRM_FORMAT_XRGB1555, 88 DRM_FORMAT_XRGB8888, 89 }; 90 91 /* Primary plane formats for gen >= 4 */ 92 static const u32 i965_primary_formats[] = { 93 DRM_FORMAT_C8, 94 DRM_FORMAT_RGB565, 95 DRM_FORMAT_XRGB8888, 96 DRM_FORMAT_XBGR8888, 97 DRM_FORMAT_XRGB2101010, 98 DRM_FORMAT_XBGR2101010, 99 }; 100 101 static const u64 i9xx_format_modifiers[] = { 102 I915_FORMAT_MOD_X_TILED, 103 DRM_FORMAT_MOD_LINEAR, 104 DRM_FORMAT_MOD_INVALID 105 }; 106 107 /* Cursor formats */ 108 static const u32 intel_cursor_formats[] = { 109 DRM_FORMAT_ARGB8888, 110 }; 111 112 static const u64 cursor_format_modifiers[] = { 113 DRM_FORMAT_MOD_LINEAR, 114 DRM_FORMAT_MOD_INVALID 115 }; 116 117 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 118 struct intel_crtc_state *pipe_config); 119 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 120 struct intel_crtc_state *pipe_config); 121 122 static int intel_framebuffer_init(struct intel_framebuffer *ifb, 123 struct drm_i915_gem_object *obj, 124 struct drm_mode_fb_cmd2 *mode_cmd); 125 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state); 126 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 127 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 128 const struct intel_link_m_n *m_n, 129 const struct intel_link_m_n *m2_n2); 130 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); 131 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state); 132 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state); 133 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); 134 static void vlv_prepare_pll(struct intel_crtc *crtc, 135 const struct intel_crtc_state *pipe_config); 136 static void chv_prepare_pll(struct intel_crtc *crtc, 137 const struct intel_crtc_state *pipe_config); 138 static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *); 139 static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *); 140 static void intel_crtc_init_scalers(struct intel_crtc *crtc, 141 struct intel_crtc_state *crtc_state); 142 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state); 143 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state); 144 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state); 145 static void intel_modeset_setup_hw_state(struct drm_device *dev, 146 struct drm_modeset_acquire_ctx *ctx); 147 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 148 149 struct intel_limit { 150 struct { 151 int min, max; 152 } dot, vco, n, m, m1, m2, p, p1; 153 154 struct { 155 int dot_limit; 156 int p2_slow, p2_fast; 157 } p2; 158 }; 159 160 /* returns HPLL frequency in kHz */ 161 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 162 { 163 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 164 165 /* Obtain SKU information */ 166 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 167 CCK_FUSE_HPLL_FREQ_MASK; 168 169 return vco_freq[hpll_freq] * 1000; 170 } 171 172 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 173 const char *name, u32 reg, int ref_freq) 174 { 175 u32 val; 176 int divider; 177 178 val = vlv_cck_read(dev_priv, reg); 179 divider = val & CCK_FREQUENCY_VALUES; 180 181 WARN((val & CCK_FREQUENCY_STATUS) != 182 (divider << CCK_FREQUENCY_STATUS_SHIFT), 183 "%s change in progress\n", name); 184 185 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 186 } 187 188 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 189 const char *name, u32 reg) 190 { 191 int hpll; 192 193 vlv_cck_get(dev_priv); 194 195 if (dev_priv->hpll_freq == 0) 196 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 197 198 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); 199 200 vlv_cck_put(dev_priv); 201 202 return hpll; 203 } 204 205 static void intel_update_czclk(struct drm_i915_private *dev_priv) 206 { 207 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 208 return; 209 210 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 211 CCK_CZ_CLOCK_CONTROL); 212 213 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq); 214 } 215 216 static inline u32 /* units of 100MHz */ 217 intel_fdi_link_freq(struct drm_i915_private *dev_priv, 218 const struct intel_crtc_state *pipe_config) 219 { 220 if (HAS_DDI(dev_priv)) 221 return pipe_config->port_clock; /* SPLL */ 222 else 223 return dev_priv->fdi_pll_freq; 224 } 225 226 static const struct intel_limit intel_limits_i8xx_dac = { 227 .dot = { .min = 25000, .max = 350000 }, 228 .vco = { .min = 908000, .max = 1512000 }, 229 .n = { .min = 2, .max = 16 }, 230 .m = { .min = 96, .max = 140 }, 231 .m1 = { .min = 18, .max = 26 }, 232 .m2 = { .min = 6, .max = 16 }, 233 .p = { .min = 4, .max = 128 }, 234 .p1 = { .min = 2, .max = 33 }, 235 .p2 = { .dot_limit = 165000, 236 .p2_slow = 4, .p2_fast = 2 }, 237 }; 238 239 static const struct intel_limit intel_limits_i8xx_dvo = { 240 .dot = { .min = 25000, .max = 350000 }, 241 .vco = { .min = 908000, .max = 1512000 }, 242 .n = { .min = 2, .max = 16 }, 243 .m = { .min = 96, .max = 140 }, 244 .m1 = { .min = 18, .max = 26 }, 245 .m2 = { .min = 6, .max = 16 }, 246 .p = { .min = 4, .max = 128 }, 247 .p1 = { .min = 2, .max = 33 }, 248 .p2 = { .dot_limit = 165000, 249 .p2_slow = 4, .p2_fast = 4 }, 250 }; 251 252 static const struct intel_limit intel_limits_i8xx_lvds = { 253 .dot = { .min = 25000, .max = 350000 }, 254 .vco = { .min = 908000, .max = 1512000 }, 255 .n = { .min = 2, .max = 16 }, 256 .m = { .min = 96, .max = 140 }, 257 .m1 = { .min = 18, .max = 26 }, 258 .m2 = { .min = 6, .max = 16 }, 259 .p = { .min = 4, .max = 128 }, 260 .p1 = { .min = 1, .max = 6 }, 261 .p2 = { .dot_limit = 165000, 262 .p2_slow = 14, .p2_fast = 7 }, 263 }; 264 265 static const struct intel_limit intel_limits_i9xx_sdvo = { 266 .dot = { .min = 20000, .max = 400000 }, 267 .vco = { .min = 1400000, .max = 2800000 }, 268 .n = { .min = 1, .max = 6 }, 269 .m = { .min = 70, .max = 120 }, 270 .m1 = { .min = 8, .max = 18 }, 271 .m2 = { .min = 3, .max = 7 }, 272 .p = { .min = 5, .max = 80 }, 273 .p1 = { .min = 1, .max = 8 }, 274 .p2 = { .dot_limit = 200000, 275 .p2_slow = 10, .p2_fast = 5 }, 276 }; 277 278 static const struct intel_limit intel_limits_i9xx_lvds = { 279 .dot = { .min = 20000, .max = 400000 }, 280 .vco = { .min = 1400000, .max = 2800000 }, 281 .n = { .min = 1, .max = 6 }, 282 .m = { .min = 70, .max = 120 }, 283 .m1 = { .min = 8, .max = 18 }, 284 .m2 = { .min = 3, .max = 7 }, 285 .p = { .min = 7, .max = 98 }, 286 .p1 = { .min = 1, .max = 8 }, 287 .p2 = { .dot_limit = 112000, 288 .p2_slow = 14, .p2_fast = 7 }, 289 }; 290 291 292 static const struct intel_limit intel_limits_g4x_sdvo = { 293 .dot = { .min = 25000, .max = 270000 }, 294 .vco = { .min = 1750000, .max = 3500000}, 295 .n = { .min = 1, .max = 4 }, 296 .m = { .min = 104, .max = 138 }, 297 .m1 = { .min = 17, .max = 23 }, 298 .m2 = { .min = 5, .max = 11 }, 299 .p = { .min = 10, .max = 30 }, 300 .p1 = { .min = 1, .max = 3}, 301 .p2 = { .dot_limit = 270000, 302 .p2_slow = 10, 303 .p2_fast = 10 304 }, 305 }; 306 307 static const struct intel_limit intel_limits_g4x_hdmi = { 308 .dot = { .min = 22000, .max = 400000 }, 309 .vco = { .min = 1750000, .max = 3500000}, 310 .n = { .min = 1, .max = 4 }, 311 .m = { .min = 104, .max = 138 }, 312 .m1 = { .min = 16, .max = 23 }, 313 .m2 = { .min = 5, .max = 11 }, 314 .p = { .min = 5, .max = 80 }, 315 .p1 = { .min = 1, .max = 8}, 316 .p2 = { .dot_limit = 165000, 317 .p2_slow = 10, .p2_fast = 5 }, 318 }; 319 320 static const struct intel_limit intel_limits_g4x_single_channel_lvds = { 321 .dot = { .min = 20000, .max = 115000 }, 322 .vco = { .min = 1750000, .max = 3500000 }, 323 .n = { .min = 1, .max = 3 }, 324 .m = { .min = 104, .max = 138 }, 325 .m1 = { .min = 17, .max = 23 }, 326 .m2 = { .min = 5, .max = 11 }, 327 .p = { .min = 28, .max = 112 }, 328 .p1 = { .min = 2, .max = 8 }, 329 .p2 = { .dot_limit = 0, 330 .p2_slow = 14, .p2_fast = 14 331 }, 332 }; 333 334 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { 335 .dot = { .min = 80000, .max = 224000 }, 336 .vco = { .min = 1750000, .max = 3500000 }, 337 .n = { .min = 1, .max = 3 }, 338 .m = { .min = 104, .max = 138 }, 339 .m1 = { .min = 17, .max = 23 }, 340 .m2 = { .min = 5, .max = 11 }, 341 .p = { .min = 14, .max = 42 }, 342 .p1 = { .min = 2, .max = 6 }, 343 .p2 = { .dot_limit = 0, 344 .p2_slow = 7, .p2_fast = 7 345 }, 346 }; 347 348 static const struct intel_limit intel_limits_pineview_sdvo = { 349 .dot = { .min = 20000, .max = 400000}, 350 .vco = { .min = 1700000, .max = 3500000 }, 351 /* Pineview's Ncounter is a ring counter */ 352 .n = { .min = 3, .max = 6 }, 353 .m = { .min = 2, .max = 256 }, 354 /* Pineview only has one combined m divider, which we treat as m2. */ 355 .m1 = { .min = 0, .max = 0 }, 356 .m2 = { .min = 0, .max = 254 }, 357 .p = { .min = 5, .max = 80 }, 358 .p1 = { .min = 1, .max = 8 }, 359 .p2 = { .dot_limit = 200000, 360 .p2_slow = 10, .p2_fast = 5 }, 361 }; 362 363 static const struct intel_limit intel_limits_pineview_lvds = { 364 .dot = { .min = 20000, .max = 400000 }, 365 .vco = { .min = 1700000, .max = 3500000 }, 366 .n = { .min = 3, .max = 6 }, 367 .m = { .min = 2, .max = 256 }, 368 .m1 = { .min = 0, .max = 0 }, 369 .m2 = { .min = 0, .max = 254 }, 370 .p = { .min = 7, .max = 112 }, 371 .p1 = { .min = 1, .max = 8 }, 372 .p2 = { .dot_limit = 112000, 373 .p2_slow = 14, .p2_fast = 14 }, 374 }; 375 376 /* Ironlake / Sandybridge 377 * 378 * We calculate clock using (register_value + 2) for N/M1/M2, so here 379 * the range value for them is (actual_value - 2). 380 */ 381 static const struct intel_limit intel_limits_ironlake_dac = { 382 .dot = { .min = 25000, .max = 350000 }, 383 .vco = { .min = 1760000, .max = 3510000 }, 384 .n = { .min = 1, .max = 5 }, 385 .m = { .min = 79, .max = 127 }, 386 .m1 = { .min = 12, .max = 22 }, 387 .m2 = { .min = 5, .max = 9 }, 388 .p = { .min = 5, .max = 80 }, 389 .p1 = { .min = 1, .max = 8 }, 390 .p2 = { .dot_limit = 225000, 391 .p2_slow = 10, .p2_fast = 5 }, 392 }; 393 394 static const struct intel_limit intel_limits_ironlake_single_lvds = { 395 .dot = { .min = 25000, .max = 350000 }, 396 .vco = { .min = 1760000, .max = 3510000 }, 397 .n = { .min = 1, .max = 3 }, 398 .m = { .min = 79, .max = 118 }, 399 .m1 = { .min = 12, .max = 22 }, 400 .m2 = { .min = 5, .max = 9 }, 401 .p = { .min = 28, .max = 112 }, 402 .p1 = { .min = 2, .max = 8 }, 403 .p2 = { .dot_limit = 225000, 404 .p2_slow = 14, .p2_fast = 14 }, 405 }; 406 407 static const struct intel_limit intel_limits_ironlake_dual_lvds = { 408 .dot = { .min = 25000, .max = 350000 }, 409 .vco = { .min = 1760000, .max = 3510000 }, 410 .n = { .min = 1, .max = 3 }, 411 .m = { .min = 79, .max = 127 }, 412 .m1 = { .min = 12, .max = 22 }, 413 .m2 = { .min = 5, .max = 9 }, 414 .p = { .min = 14, .max = 56 }, 415 .p1 = { .min = 2, .max = 8 }, 416 .p2 = { .dot_limit = 225000, 417 .p2_slow = 7, .p2_fast = 7 }, 418 }; 419 420 /* LVDS 100mhz refclk limits. */ 421 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = { 422 .dot = { .min = 25000, .max = 350000 }, 423 .vco = { .min = 1760000, .max = 3510000 }, 424 .n = { .min = 1, .max = 2 }, 425 .m = { .min = 79, .max = 126 }, 426 .m1 = { .min = 12, .max = 22 }, 427 .m2 = { .min = 5, .max = 9 }, 428 .p = { .min = 28, .max = 112 }, 429 .p1 = { .min = 2, .max = 8 }, 430 .p2 = { .dot_limit = 225000, 431 .p2_slow = 14, .p2_fast = 14 }, 432 }; 433 434 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = { 435 .dot = { .min = 25000, .max = 350000 }, 436 .vco = { .min = 1760000, .max = 3510000 }, 437 .n = { .min = 1, .max = 3 }, 438 .m = { .min = 79, .max = 126 }, 439 .m1 = { .min = 12, .max = 22 }, 440 .m2 = { .min = 5, .max = 9 }, 441 .p = { .min = 14, .max = 42 }, 442 .p1 = { .min = 2, .max = 6 }, 443 .p2 = { .dot_limit = 225000, 444 .p2_slow = 7, .p2_fast = 7 }, 445 }; 446 447 static const struct intel_limit intel_limits_vlv = { 448 /* 449 * These are the data rate limits (measured in fast clocks) 450 * since those are the strictest limits we have. The fast 451 * clock and actual rate limits are more relaxed, so checking 452 * them would make no difference. 453 */ 454 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 455 .vco = { .min = 4000000, .max = 6000000 }, 456 .n = { .min = 1, .max = 7 }, 457 .m1 = { .min = 2, .max = 3 }, 458 .m2 = { .min = 11, .max = 156 }, 459 .p1 = { .min = 2, .max = 3 }, 460 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 461 }; 462 463 static const struct intel_limit intel_limits_chv = { 464 /* 465 * These are the data rate limits (measured in fast clocks) 466 * since those are the strictest limits we have. The fast 467 * clock and actual rate limits are more relaxed, so checking 468 * them would make no difference. 469 */ 470 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 471 .vco = { .min = 4800000, .max = 6480000 }, 472 .n = { .min = 1, .max = 1 }, 473 .m1 = { .min = 2, .max = 2 }, 474 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 475 .p1 = { .min = 2, .max = 4 }, 476 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 477 }; 478 479 static const struct intel_limit intel_limits_bxt = { 480 /* FIXME: find real dot limits */ 481 .dot = { .min = 0, .max = INT_MAX }, 482 .vco = { .min = 4800000, .max = 6700000 }, 483 .n = { .min = 1, .max = 1 }, 484 .m1 = { .min = 2, .max = 2 }, 485 /* FIXME: find real m2 limits */ 486 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 487 .p1 = { .min = 2, .max = 4 }, 488 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 489 }; 490 491 /* WA Display #0827: Gen9:all */ 492 static void 493 skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable) 494 { 495 if (enable) 496 I915_WRITE(CLKGATE_DIS_PSL(pipe), 497 I915_READ(CLKGATE_DIS_PSL(pipe)) | 498 DUPS1_GATING_DIS | DUPS2_GATING_DIS); 499 else 500 I915_WRITE(CLKGATE_DIS_PSL(pipe), 501 I915_READ(CLKGATE_DIS_PSL(pipe)) & 502 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS)); 503 } 504 505 /* Wa_2006604312:icl */ 506 static void 507 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 508 bool enable) 509 { 510 if (enable) 511 I915_WRITE(CLKGATE_DIS_PSL(pipe), 512 I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS); 513 else 514 I915_WRITE(CLKGATE_DIS_PSL(pipe), 515 I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS); 516 } 517 518 static bool 519 needs_modeset(const struct intel_crtc_state *state) 520 { 521 return drm_atomic_crtc_needs_modeset(&state->base); 522 } 523 524 /* 525 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 526 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 527 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 528 * The helpers' return value is the rate of the clock that is fed to the 529 * display engine's pipe which can be the above fast dot clock rate or a 530 * divided-down version of it. 531 */ 532 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 533 static int pnv_calc_dpll_params(int refclk, struct dpll *clock) 534 { 535 clock->m = clock->m2 + 2; 536 clock->p = clock->p1 * clock->p2; 537 if (WARN_ON(clock->n == 0 || clock->p == 0)) 538 return 0; 539 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 540 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 541 542 return clock->dot; 543 } 544 545 static u32 i9xx_dpll_compute_m(struct dpll *dpll) 546 { 547 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 548 } 549 550 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock) 551 { 552 clock->m = i9xx_dpll_compute_m(clock); 553 clock->p = clock->p1 * clock->p2; 554 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 555 return 0; 556 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 557 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 558 559 return clock->dot; 560 } 561 562 static int vlv_calc_dpll_params(int refclk, struct dpll *clock) 563 { 564 clock->m = clock->m1 * clock->m2; 565 clock->p = clock->p1 * clock->p2; 566 if (WARN_ON(clock->n == 0 || clock->p == 0)) 567 return 0; 568 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 569 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 570 571 return clock->dot / 5; 572 } 573 574 int chv_calc_dpll_params(int refclk, struct dpll *clock) 575 { 576 clock->m = clock->m1 * clock->m2; 577 clock->p = clock->p1 * clock->p2; 578 if (WARN_ON(clock->n == 0 || clock->p == 0)) 579 return 0; 580 clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), 581 clock->n << 22); 582 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 583 584 return clock->dot / 5; 585 } 586 587 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 588 589 /* 590 * Returns whether the given set of divisors are valid for a given refclk with 591 * the given connectors. 592 */ 593 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv, 594 const struct intel_limit *limit, 595 const struct dpll *clock) 596 { 597 if (clock->n < limit->n.min || limit->n.max < clock->n) 598 INTELPllInvalid("n out of range\n"); 599 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 600 INTELPllInvalid("p1 out of range\n"); 601 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 602 INTELPllInvalid("m2 out of range\n"); 603 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 604 INTELPllInvalid("m1 out of range\n"); 605 606 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) && 607 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv)) 608 if (clock->m1 <= clock->m2) 609 INTELPllInvalid("m1 <= m2\n"); 610 611 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 612 !IS_GEN9_LP(dev_priv)) { 613 if (clock->p < limit->p.min || limit->p.max < clock->p) 614 INTELPllInvalid("p out of range\n"); 615 if (clock->m < limit->m.min || limit->m.max < clock->m) 616 INTELPllInvalid("m out of range\n"); 617 } 618 619 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 620 INTELPllInvalid("vco out of range\n"); 621 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 622 * connector, etc., rather than just a single range. 623 */ 624 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 625 INTELPllInvalid("dot out of range\n"); 626 627 return true; 628 } 629 630 static int 631 i9xx_select_p2_div(const struct intel_limit *limit, 632 const struct intel_crtc_state *crtc_state, 633 int target) 634 { 635 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 636 637 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 638 /* 639 * For LVDS just rely on its current settings for dual-channel. 640 * We haven't figured out how to reliably set up different 641 * single/dual channel state, if we even can. 642 */ 643 if (intel_is_dual_link_lvds(dev_priv)) 644 return limit->p2.p2_fast; 645 else 646 return limit->p2.p2_slow; 647 } else { 648 if (target < limit->p2.dot_limit) 649 return limit->p2.p2_slow; 650 else 651 return limit->p2.p2_fast; 652 } 653 } 654 655 /* 656 * Returns a set of divisors for the desired target clock with the given 657 * refclk, or FALSE. The returned values represent the clock equation: 658 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 659 * 660 * Target and reference clocks are specified in kHz. 661 * 662 * If match_clock is provided, then best_clock P divider must match the P 663 * divider from @match_clock used for LVDS downclocking. 664 */ 665 static bool 666 i9xx_find_best_dpll(const struct intel_limit *limit, 667 struct intel_crtc_state *crtc_state, 668 int target, int refclk, struct dpll *match_clock, 669 struct dpll *best_clock) 670 { 671 struct drm_device *dev = crtc_state->base.crtc->dev; 672 struct dpll clock; 673 int err = target; 674 675 memset(best_clock, 0, sizeof(*best_clock)); 676 677 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 678 679 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 680 clock.m1++) { 681 for (clock.m2 = limit->m2.min; 682 clock.m2 <= limit->m2.max; clock.m2++) { 683 if (clock.m2 >= clock.m1) 684 break; 685 for (clock.n = limit->n.min; 686 clock.n <= limit->n.max; clock.n++) { 687 for (clock.p1 = limit->p1.min; 688 clock.p1 <= limit->p1.max; clock.p1++) { 689 int this_err; 690 691 i9xx_calc_dpll_params(refclk, &clock); 692 if (!intel_PLL_is_valid(to_i915(dev), 693 limit, 694 &clock)) 695 continue; 696 if (match_clock && 697 clock.p != match_clock->p) 698 continue; 699 700 this_err = abs(clock.dot - target); 701 if (this_err < err) { 702 *best_clock = clock; 703 err = this_err; 704 } 705 } 706 } 707 } 708 } 709 710 return (err != target); 711 } 712 713 /* 714 * Returns a set of divisors for the desired target clock with the given 715 * refclk, or FALSE. The returned values represent the clock equation: 716 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 717 * 718 * Target and reference clocks are specified in kHz. 719 * 720 * If match_clock is provided, then best_clock P divider must match the P 721 * divider from @match_clock used for LVDS downclocking. 722 */ 723 static bool 724 pnv_find_best_dpll(const struct intel_limit *limit, 725 struct intel_crtc_state *crtc_state, 726 int target, int refclk, struct dpll *match_clock, 727 struct dpll *best_clock) 728 { 729 struct drm_device *dev = crtc_state->base.crtc->dev; 730 struct dpll clock; 731 int err = target; 732 733 memset(best_clock, 0, sizeof(*best_clock)); 734 735 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 736 737 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 738 clock.m1++) { 739 for (clock.m2 = limit->m2.min; 740 clock.m2 <= limit->m2.max; clock.m2++) { 741 for (clock.n = limit->n.min; 742 clock.n <= limit->n.max; clock.n++) { 743 for (clock.p1 = limit->p1.min; 744 clock.p1 <= limit->p1.max; clock.p1++) { 745 int this_err; 746 747 pnv_calc_dpll_params(refclk, &clock); 748 if (!intel_PLL_is_valid(to_i915(dev), 749 limit, 750 &clock)) 751 continue; 752 if (match_clock && 753 clock.p != match_clock->p) 754 continue; 755 756 this_err = abs(clock.dot - target); 757 if (this_err < err) { 758 *best_clock = clock; 759 err = this_err; 760 } 761 } 762 } 763 } 764 } 765 766 return (err != target); 767 } 768 769 /* 770 * Returns a set of divisors for the desired target clock with the given 771 * refclk, or FALSE. The returned values represent the clock equation: 772 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 773 * 774 * Target and reference clocks are specified in kHz. 775 * 776 * If match_clock is provided, then best_clock P divider must match the P 777 * divider from @match_clock used for LVDS downclocking. 778 */ 779 static bool 780 g4x_find_best_dpll(const struct intel_limit *limit, 781 struct intel_crtc_state *crtc_state, 782 int target, int refclk, struct dpll *match_clock, 783 struct dpll *best_clock) 784 { 785 struct drm_device *dev = crtc_state->base.crtc->dev; 786 struct dpll clock; 787 int max_n; 788 bool found = false; 789 /* approximately equals target * 0.00585 */ 790 int err_most = (target >> 8) + (target >> 9); 791 792 memset(best_clock, 0, sizeof(*best_clock)); 793 794 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 795 796 max_n = limit->n.max; 797 /* based on hardware requirement, prefer smaller n to precision */ 798 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 799 /* based on hardware requirement, prefere larger m1,m2 */ 800 for (clock.m1 = limit->m1.max; 801 clock.m1 >= limit->m1.min; clock.m1--) { 802 for (clock.m2 = limit->m2.max; 803 clock.m2 >= limit->m2.min; clock.m2--) { 804 for (clock.p1 = limit->p1.max; 805 clock.p1 >= limit->p1.min; clock.p1--) { 806 int this_err; 807 808 i9xx_calc_dpll_params(refclk, &clock); 809 if (!intel_PLL_is_valid(to_i915(dev), 810 limit, 811 &clock)) 812 continue; 813 814 this_err = abs(clock.dot - target); 815 if (this_err < err_most) { 816 *best_clock = clock; 817 err_most = this_err; 818 max_n = clock.n; 819 found = true; 820 } 821 } 822 } 823 } 824 } 825 return found; 826 } 827 828 /* 829 * Check if the calculated PLL configuration is more optimal compared to the 830 * best configuration and error found so far. Return the calculated error. 831 */ 832 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 833 const struct dpll *calculated_clock, 834 const struct dpll *best_clock, 835 unsigned int best_error_ppm, 836 unsigned int *error_ppm) 837 { 838 /* 839 * For CHV ignore the error and consider only the P value. 840 * Prefer a bigger P value based on HW requirements. 841 */ 842 if (IS_CHERRYVIEW(to_i915(dev))) { 843 *error_ppm = 0; 844 845 return calculated_clock->p > best_clock->p; 846 } 847 848 if (WARN_ON_ONCE(!target_freq)) 849 return false; 850 851 *error_ppm = div_u64(1000000ULL * 852 abs(target_freq - calculated_clock->dot), 853 target_freq); 854 /* 855 * Prefer a better P value over a better (smaller) error if the error 856 * is small. Ensure this preference for future configurations too by 857 * setting the error to 0. 858 */ 859 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 860 *error_ppm = 0; 861 862 return true; 863 } 864 865 return *error_ppm + 10 < best_error_ppm; 866 } 867 868 /* 869 * Returns a set of divisors for the desired target clock with the given 870 * refclk, or FALSE. The returned values represent the clock equation: 871 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 872 */ 873 static bool 874 vlv_find_best_dpll(const struct intel_limit *limit, 875 struct intel_crtc_state *crtc_state, 876 int target, int refclk, struct dpll *match_clock, 877 struct dpll *best_clock) 878 { 879 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 880 struct drm_device *dev = crtc->base.dev; 881 struct dpll clock; 882 unsigned int bestppm = 1000000; 883 /* min update 19.2 MHz */ 884 int max_n = min(limit->n.max, refclk / 19200); 885 bool found = false; 886 887 target *= 5; /* fast clock */ 888 889 memset(best_clock, 0, sizeof(*best_clock)); 890 891 /* based on hardware requirement, prefer smaller n to precision */ 892 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 893 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 894 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 895 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 896 clock.p = clock.p1 * clock.p2; 897 /* based on hardware requirement, prefer bigger m1,m2 values */ 898 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 899 unsigned int ppm; 900 901 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 902 refclk * clock.m1); 903 904 vlv_calc_dpll_params(refclk, &clock); 905 906 if (!intel_PLL_is_valid(to_i915(dev), 907 limit, 908 &clock)) 909 continue; 910 911 if (!vlv_PLL_is_optimal(dev, target, 912 &clock, 913 best_clock, 914 bestppm, &ppm)) 915 continue; 916 917 *best_clock = clock; 918 bestppm = ppm; 919 found = true; 920 } 921 } 922 } 923 } 924 925 return found; 926 } 927 928 /* 929 * Returns a set of divisors for the desired target clock with the given 930 * refclk, or FALSE. The returned values represent the clock equation: 931 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 932 */ 933 static bool 934 chv_find_best_dpll(const struct intel_limit *limit, 935 struct intel_crtc_state *crtc_state, 936 int target, int refclk, struct dpll *match_clock, 937 struct dpll *best_clock) 938 { 939 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 940 struct drm_device *dev = crtc->base.dev; 941 unsigned int best_error_ppm; 942 struct dpll clock; 943 u64 m2; 944 int found = false; 945 946 memset(best_clock, 0, sizeof(*best_clock)); 947 best_error_ppm = 1000000; 948 949 /* 950 * Based on hardware doc, the n always set to 1, and m1 always 951 * set to 2. If requires to support 200Mhz refclk, we need to 952 * revisit this because n may not 1 anymore. 953 */ 954 clock.n = 1, clock.m1 = 2; 955 target *= 5; /* fast clock */ 956 957 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 958 for (clock.p2 = limit->p2.p2_fast; 959 clock.p2 >= limit->p2.p2_slow; 960 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 961 unsigned int error_ppm; 962 963 clock.p = clock.p1 * clock.p2; 964 965 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22, 966 refclk * clock.m1); 967 968 if (m2 > INT_MAX/clock.m1) 969 continue; 970 971 clock.m2 = m2; 972 973 chv_calc_dpll_params(refclk, &clock); 974 975 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock)) 976 continue; 977 978 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 979 best_error_ppm, &error_ppm)) 980 continue; 981 982 *best_clock = clock; 983 best_error_ppm = error_ppm; 984 found = true; 985 } 986 } 987 988 return found; 989 } 990 991 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, 992 struct dpll *best_clock) 993 { 994 int refclk = 100000; 995 const struct intel_limit *limit = &intel_limits_bxt; 996 997 return chv_find_best_dpll(limit, crtc_state, 998 crtc_state->port_clock, refclk, 999 NULL, best_clock); 1000 } 1001 1002 bool intel_crtc_active(struct intel_crtc *crtc) 1003 { 1004 /* Be paranoid as we can arrive here with only partial 1005 * state retrieved from the hardware during setup. 1006 * 1007 * We can ditch the adjusted_mode.crtc_clock check as soon 1008 * as Haswell has gained clock readout/fastboot support. 1009 * 1010 * We can ditch the crtc->primary->state->fb check as soon as we can 1011 * properly reconstruct framebuffers. 1012 * 1013 * FIXME: The intel_crtc->active here should be switched to 1014 * crtc->state->active once we have proper CRTC states wired up 1015 * for atomic. 1016 */ 1017 return crtc->active && crtc->base.primary->state->fb && 1018 crtc->config->base.adjusted_mode.crtc_clock; 1019 } 1020 1021 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 1022 enum pipe pipe) 1023 { 1024 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1025 1026 return crtc->config->cpu_transcoder; 1027 } 1028 1029 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, 1030 enum pipe pipe) 1031 { 1032 i915_reg_t reg = PIPEDSL(pipe); 1033 u32 line1, line2; 1034 u32 line_mask; 1035 1036 if (IS_GEN(dev_priv, 2)) 1037 line_mask = DSL_LINEMASK_GEN2; 1038 else 1039 line_mask = DSL_LINEMASK_GEN3; 1040 1041 line1 = I915_READ(reg) & line_mask; 1042 msleep(5); 1043 line2 = I915_READ(reg) & line_mask; 1044 1045 return line1 != line2; 1046 } 1047 1048 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state) 1049 { 1050 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1051 enum pipe pipe = crtc->pipe; 1052 1053 /* Wait for the display line to settle/start moving */ 1054 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100)) 1055 DRM_ERROR("pipe %c scanline %s wait timed out\n", 1056 pipe_name(pipe), onoff(state)); 1057 } 1058 1059 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc) 1060 { 1061 wait_for_pipe_scanline_moving(crtc, false); 1062 } 1063 1064 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) 1065 { 1066 wait_for_pipe_scanline_moving(crtc, true); 1067 } 1068 1069 static void 1070 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 1071 { 1072 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 1073 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1074 1075 if (INTEL_GEN(dev_priv) >= 4) { 1076 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 1077 i915_reg_t reg = PIPECONF(cpu_transcoder); 1078 1079 /* Wait for the Pipe State to go off */ 1080 if (intel_wait_for_register(&dev_priv->uncore, 1081 reg, I965_PIPECONF_ACTIVE, 0, 1082 100)) 1083 WARN(1, "pipe_off wait timed out\n"); 1084 } else { 1085 intel_wait_for_pipe_scanline_stopped(crtc); 1086 } 1087 } 1088 1089 /* Only for pre-ILK configs */ 1090 void assert_pll(struct drm_i915_private *dev_priv, 1091 enum pipe pipe, bool state) 1092 { 1093 u32 val; 1094 bool cur_state; 1095 1096 val = I915_READ(DPLL(pipe)); 1097 cur_state = !!(val & DPLL_VCO_ENABLE); 1098 I915_STATE_WARN(cur_state != state, 1099 "PLL state assertion failure (expected %s, current %s)\n", 1100 onoff(state), onoff(cur_state)); 1101 } 1102 1103 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1104 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1105 { 1106 u32 val; 1107 bool cur_state; 1108 1109 vlv_cck_get(dev_priv); 1110 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1111 vlv_cck_put(dev_priv); 1112 1113 cur_state = val & DSI_PLL_VCO_EN; 1114 I915_STATE_WARN(cur_state != state, 1115 "DSI PLL state assertion failure (expected %s, current %s)\n", 1116 onoff(state), onoff(cur_state)); 1117 } 1118 1119 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1120 enum pipe pipe, bool state) 1121 { 1122 bool cur_state; 1123 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1124 pipe); 1125 1126 if (HAS_DDI(dev_priv)) { 1127 /* DDI does not have a specific FDI_TX register */ 1128 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1129 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1130 } else { 1131 u32 val = I915_READ(FDI_TX_CTL(pipe)); 1132 cur_state = !!(val & FDI_TX_ENABLE); 1133 } 1134 I915_STATE_WARN(cur_state != state, 1135 "FDI TX state assertion failure (expected %s, current %s)\n", 1136 onoff(state), onoff(cur_state)); 1137 } 1138 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1139 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1140 1141 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1142 enum pipe pipe, bool state) 1143 { 1144 u32 val; 1145 bool cur_state; 1146 1147 val = I915_READ(FDI_RX_CTL(pipe)); 1148 cur_state = !!(val & FDI_RX_ENABLE); 1149 I915_STATE_WARN(cur_state != state, 1150 "FDI RX state assertion failure (expected %s, current %s)\n", 1151 onoff(state), onoff(cur_state)); 1152 } 1153 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1154 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1155 1156 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1157 enum pipe pipe) 1158 { 1159 u32 val; 1160 1161 /* ILK FDI PLL is always enabled */ 1162 if (IS_GEN(dev_priv, 5)) 1163 return; 1164 1165 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1166 if (HAS_DDI(dev_priv)) 1167 return; 1168 1169 val = I915_READ(FDI_TX_CTL(pipe)); 1170 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1171 } 1172 1173 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1174 enum pipe pipe, bool state) 1175 { 1176 u32 val; 1177 bool cur_state; 1178 1179 val = I915_READ(FDI_RX_CTL(pipe)); 1180 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1181 I915_STATE_WARN(cur_state != state, 1182 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1183 onoff(state), onoff(cur_state)); 1184 } 1185 1186 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) 1187 { 1188 i915_reg_t pp_reg; 1189 u32 val; 1190 enum pipe panel_pipe = INVALID_PIPE; 1191 bool locked = true; 1192 1193 if (WARN_ON(HAS_DDI(dev_priv))) 1194 return; 1195 1196 if (HAS_PCH_SPLIT(dev_priv)) { 1197 u32 port_sel; 1198 1199 pp_reg = PP_CONTROL(0); 1200 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1201 1202 switch (port_sel) { 1203 case PANEL_PORT_SELECT_LVDS: 1204 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe); 1205 break; 1206 case PANEL_PORT_SELECT_DPA: 1207 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe); 1208 break; 1209 case PANEL_PORT_SELECT_DPC: 1210 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe); 1211 break; 1212 case PANEL_PORT_SELECT_DPD: 1213 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe); 1214 break; 1215 default: 1216 MISSING_CASE(port_sel); 1217 break; 1218 } 1219 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1220 /* presumably write lock depends on pipe, not port select */ 1221 pp_reg = PP_CONTROL(pipe); 1222 panel_pipe = pipe; 1223 } else { 1224 u32 port_sel; 1225 1226 pp_reg = PP_CONTROL(0); 1227 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1228 1229 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS); 1230 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe); 1231 } 1232 1233 val = I915_READ(pp_reg); 1234 if (!(val & PANEL_POWER_ON) || 1235 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1236 locked = false; 1237 1238 I915_STATE_WARN(panel_pipe == pipe && locked, 1239 "panel assertion failure, pipe %c regs locked\n", 1240 pipe_name(pipe)); 1241 } 1242 1243 void assert_pipe(struct drm_i915_private *dev_priv, 1244 enum pipe pipe, bool state) 1245 { 1246 bool cur_state; 1247 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1248 pipe); 1249 enum intel_display_power_domain power_domain; 1250 intel_wakeref_t wakeref; 1251 1252 /* we keep both pipes enabled on 830 */ 1253 if (IS_I830(dev_priv)) 1254 state = true; 1255 1256 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 1257 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 1258 if (wakeref) { 1259 u32 val = I915_READ(PIPECONF(cpu_transcoder)); 1260 cur_state = !!(val & PIPECONF_ENABLE); 1261 1262 intel_display_power_put(dev_priv, power_domain, wakeref); 1263 } else { 1264 cur_state = false; 1265 } 1266 1267 I915_STATE_WARN(cur_state != state, 1268 "pipe %c assertion failure (expected %s, current %s)\n", 1269 pipe_name(pipe), onoff(state), onoff(cur_state)); 1270 } 1271 1272 static void assert_plane(struct intel_plane *plane, bool state) 1273 { 1274 enum pipe pipe; 1275 bool cur_state; 1276 1277 cur_state = plane->get_hw_state(plane, &pipe); 1278 1279 I915_STATE_WARN(cur_state != state, 1280 "%s assertion failure (expected %s, current %s)\n", 1281 plane->base.name, onoff(state), onoff(cur_state)); 1282 } 1283 1284 #define assert_plane_enabled(p) assert_plane(p, true) 1285 #define assert_plane_disabled(p) assert_plane(p, false) 1286 1287 static void assert_planes_disabled(struct intel_crtc *crtc) 1288 { 1289 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1290 struct intel_plane *plane; 1291 1292 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 1293 assert_plane_disabled(plane); 1294 } 1295 1296 static void assert_vblank_disabled(struct drm_crtc *crtc) 1297 { 1298 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1299 drm_crtc_vblank_put(crtc); 1300 } 1301 1302 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1303 enum pipe pipe) 1304 { 1305 u32 val; 1306 bool enabled; 1307 1308 val = I915_READ(PCH_TRANSCONF(pipe)); 1309 enabled = !!(val & TRANS_ENABLE); 1310 I915_STATE_WARN(enabled, 1311 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1312 pipe_name(pipe)); 1313 } 1314 1315 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1316 enum pipe pipe, enum port port, 1317 i915_reg_t dp_reg) 1318 { 1319 enum pipe port_pipe; 1320 bool state; 1321 1322 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); 1323 1324 I915_STATE_WARN(state && port_pipe == pipe, 1325 "PCH DP %c enabled on transcoder %c, should be disabled\n", 1326 port_name(port), pipe_name(pipe)); 1327 1328 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 1329 "IBX PCH DP %c still using transcoder B\n", 1330 port_name(port)); 1331 } 1332 1333 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1334 enum pipe pipe, enum port port, 1335 i915_reg_t hdmi_reg) 1336 { 1337 enum pipe port_pipe; 1338 bool state; 1339 1340 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); 1341 1342 I915_STATE_WARN(state && port_pipe == pipe, 1343 "PCH HDMI %c enabled on transcoder %c, should be disabled\n", 1344 port_name(port), pipe_name(pipe)); 1345 1346 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 1347 "IBX PCH HDMI %c still using transcoder B\n", 1348 port_name(port)); 1349 } 1350 1351 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1352 enum pipe pipe) 1353 { 1354 enum pipe port_pipe; 1355 1356 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); 1357 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); 1358 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); 1359 1360 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && 1361 port_pipe == pipe, 1362 "PCH VGA enabled on transcoder %c, should be disabled\n", 1363 pipe_name(pipe)); 1364 1365 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && 1366 port_pipe == pipe, 1367 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1368 pipe_name(pipe)); 1369 1370 /* PCH SDVOB multiplex with HDMIB */ 1371 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); 1372 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); 1373 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); 1374 } 1375 1376 static void _vlv_enable_pll(struct intel_crtc *crtc, 1377 const struct intel_crtc_state *pipe_config) 1378 { 1379 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1380 enum pipe pipe = crtc->pipe; 1381 1382 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1383 POSTING_READ(DPLL(pipe)); 1384 udelay(150); 1385 1386 if (intel_wait_for_register(&dev_priv->uncore, 1387 DPLL(pipe), 1388 DPLL_LOCK_VLV, 1389 DPLL_LOCK_VLV, 1390 1)) 1391 DRM_ERROR("DPLL %d failed to lock\n", pipe); 1392 } 1393 1394 static void vlv_enable_pll(struct intel_crtc *crtc, 1395 const struct intel_crtc_state *pipe_config) 1396 { 1397 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1398 enum pipe pipe = crtc->pipe; 1399 1400 assert_pipe_disabled(dev_priv, pipe); 1401 1402 /* PLL is protected by panel, make sure we can write it */ 1403 assert_panel_unlocked(dev_priv, pipe); 1404 1405 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1406 _vlv_enable_pll(crtc, pipe_config); 1407 1408 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1409 POSTING_READ(DPLL_MD(pipe)); 1410 } 1411 1412 1413 static void _chv_enable_pll(struct intel_crtc *crtc, 1414 const struct intel_crtc_state *pipe_config) 1415 { 1416 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1417 enum pipe pipe = crtc->pipe; 1418 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1419 u32 tmp; 1420 1421 vlv_dpio_get(dev_priv); 1422 1423 /* Enable back the 10bit clock to display controller */ 1424 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1425 tmp |= DPIO_DCLKP_EN; 1426 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1427 1428 vlv_dpio_put(dev_priv); 1429 1430 /* 1431 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1432 */ 1433 udelay(1); 1434 1435 /* Enable PLL */ 1436 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1437 1438 /* Check PLL is locked */ 1439 if (intel_wait_for_register(&dev_priv->uncore, 1440 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV, 1441 1)) 1442 DRM_ERROR("PLL %d failed to lock\n", pipe); 1443 } 1444 1445 static void chv_enable_pll(struct intel_crtc *crtc, 1446 const struct intel_crtc_state *pipe_config) 1447 { 1448 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1449 enum pipe pipe = crtc->pipe; 1450 1451 assert_pipe_disabled(dev_priv, pipe); 1452 1453 /* PLL is protected by panel, make sure we can write it */ 1454 assert_panel_unlocked(dev_priv, pipe); 1455 1456 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1457 _chv_enable_pll(crtc, pipe_config); 1458 1459 if (pipe != PIPE_A) { 1460 /* 1461 * WaPixelRepeatModeFixForC0:chv 1462 * 1463 * DPLLCMD is AWOL. Use chicken bits to propagate 1464 * the value from DPLLBMD to either pipe B or C. 1465 */ 1466 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); 1467 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md); 1468 I915_WRITE(CBR4_VLV, 0); 1469 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; 1470 1471 /* 1472 * DPLLB VGA mode also seems to cause problems. 1473 * We should always have it disabled. 1474 */ 1475 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0); 1476 } else { 1477 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1478 POSTING_READ(DPLL_MD(pipe)); 1479 } 1480 } 1481 1482 static bool i9xx_has_pps(struct drm_i915_private *dev_priv) 1483 { 1484 if (IS_I830(dev_priv)) 1485 return false; 1486 1487 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 1488 } 1489 1490 static void i9xx_enable_pll(struct intel_crtc *crtc, 1491 const struct intel_crtc_state *crtc_state) 1492 { 1493 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1494 i915_reg_t reg = DPLL(crtc->pipe); 1495 u32 dpll = crtc_state->dpll_hw_state.dpll; 1496 int i; 1497 1498 assert_pipe_disabled(dev_priv, crtc->pipe); 1499 1500 /* PLL is protected by panel, make sure we can write it */ 1501 if (i9xx_has_pps(dev_priv)) 1502 assert_panel_unlocked(dev_priv, crtc->pipe); 1503 1504 /* 1505 * Apparently we need to have VGA mode enabled prior to changing 1506 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1507 * dividers, even though the register value does change. 1508 */ 1509 I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS); 1510 I915_WRITE(reg, dpll); 1511 1512 /* Wait for the clocks to stabilize. */ 1513 POSTING_READ(reg); 1514 udelay(150); 1515 1516 if (INTEL_GEN(dev_priv) >= 4) { 1517 I915_WRITE(DPLL_MD(crtc->pipe), 1518 crtc_state->dpll_hw_state.dpll_md); 1519 } else { 1520 /* The pixel multiplier can only be updated once the 1521 * DPLL is enabled and the clocks are stable. 1522 * 1523 * So write it again. 1524 */ 1525 I915_WRITE(reg, dpll); 1526 } 1527 1528 /* We do this three times for luck */ 1529 for (i = 0; i < 3; i++) { 1530 I915_WRITE(reg, dpll); 1531 POSTING_READ(reg); 1532 udelay(150); /* wait for warmup */ 1533 } 1534 } 1535 1536 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) 1537 { 1538 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1539 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1540 enum pipe pipe = crtc->pipe; 1541 1542 /* Don't disable pipe or pipe PLLs if needed */ 1543 if (IS_I830(dev_priv)) 1544 return; 1545 1546 /* Make sure the pipe isn't still relying on us */ 1547 assert_pipe_disabled(dev_priv, pipe); 1548 1549 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 1550 POSTING_READ(DPLL(pipe)); 1551 } 1552 1553 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1554 { 1555 u32 val; 1556 1557 /* Make sure the pipe isn't still relying on us */ 1558 assert_pipe_disabled(dev_priv, pipe); 1559 1560 val = DPLL_INTEGRATED_REF_CLK_VLV | 1561 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1562 if (pipe != PIPE_A) 1563 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1564 1565 I915_WRITE(DPLL(pipe), val); 1566 POSTING_READ(DPLL(pipe)); 1567 } 1568 1569 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1570 { 1571 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1572 u32 val; 1573 1574 /* Make sure the pipe isn't still relying on us */ 1575 assert_pipe_disabled(dev_priv, pipe); 1576 1577 val = DPLL_SSC_REF_CLK_CHV | 1578 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1579 if (pipe != PIPE_A) 1580 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1581 1582 I915_WRITE(DPLL(pipe), val); 1583 POSTING_READ(DPLL(pipe)); 1584 1585 vlv_dpio_get(dev_priv); 1586 1587 /* Disable 10bit clock to display controller */ 1588 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1589 val &= ~DPIO_DCLKP_EN; 1590 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1591 1592 vlv_dpio_put(dev_priv); 1593 } 1594 1595 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1596 struct intel_digital_port *dport, 1597 unsigned int expected_mask) 1598 { 1599 u32 port_mask; 1600 i915_reg_t dpll_reg; 1601 1602 switch (dport->base.port) { 1603 case PORT_B: 1604 port_mask = DPLL_PORTB_READY_MASK; 1605 dpll_reg = DPLL(0); 1606 break; 1607 case PORT_C: 1608 port_mask = DPLL_PORTC_READY_MASK; 1609 dpll_reg = DPLL(0); 1610 expected_mask <<= 4; 1611 break; 1612 case PORT_D: 1613 port_mask = DPLL_PORTD_READY_MASK; 1614 dpll_reg = DPIO_PHY_STATUS; 1615 break; 1616 default: 1617 BUG(); 1618 } 1619 1620 if (intel_wait_for_register(&dev_priv->uncore, 1621 dpll_reg, port_mask, expected_mask, 1622 1000)) 1623 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", 1624 port_name(dport->base.port), 1625 I915_READ(dpll_reg) & port_mask, expected_mask); 1626 } 1627 1628 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) 1629 { 1630 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1631 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1632 enum pipe pipe = crtc->pipe; 1633 i915_reg_t reg; 1634 u32 val, pipeconf_val; 1635 1636 /* Make sure PCH DPLL is enabled */ 1637 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); 1638 1639 /* FDI must be feeding us bits for PCH ports */ 1640 assert_fdi_tx_enabled(dev_priv, pipe); 1641 assert_fdi_rx_enabled(dev_priv, pipe); 1642 1643 if (HAS_PCH_CPT(dev_priv)) { 1644 /* Workaround: Set the timing override bit before enabling the 1645 * pch transcoder. */ 1646 reg = TRANS_CHICKEN2(pipe); 1647 val = I915_READ(reg); 1648 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1649 I915_WRITE(reg, val); 1650 } 1651 1652 reg = PCH_TRANSCONF(pipe); 1653 val = I915_READ(reg); 1654 pipeconf_val = I915_READ(PIPECONF(pipe)); 1655 1656 if (HAS_PCH_IBX(dev_priv)) { 1657 /* 1658 * Make the BPC in transcoder be consistent with 1659 * that in pipeconf reg. For HDMI we must use 8bpc 1660 * here for both 8bpc and 12bpc. 1661 */ 1662 val &= ~PIPECONF_BPC_MASK; 1663 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 1664 val |= PIPECONF_8BPC; 1665 else 1666 val |= pipeconf_val & PIPECONF_BPC_MASK; 1667 } 1668 1669 val &= ~TRANS_INTERLACE_MASK; 1670 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { 1671 if (HAS_PCH_IBX(dev_priv) && 1672 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 1673 val |= TRANS_LEGACY_INTERLACED_ILK; 1674 else 1675 val |= TRANS_INTERLACED; 1676 } else { 1677 val |= TRANS_PROGRESSIVE; 1678 } 1679 1680 I915_WRITE(reg, val | TRANS_ENABLE); 1681 if (intel_wait_for_register(&dev_priv->uncore, 1682 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE, 1683 100)) 1684 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 1685 } 1686 1687 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1688 enum transcoder cpu_transcoder) 1689 { 1690 u32 val, pipeconf_val; 1691 1692 /* FDI must be feeding us bits for PCH ports */ 1693 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); 1694 assert_fdi_rx_enabled(dev_priv, PIPE_A); 1695 1696 /* Workaround: set timing override bit. */ 1697 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1698 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1699 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1700 1701 val = TRANS_ENABLE; 1702 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 1703 1704 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 1705 PIPECONF_INTERLACED_ILK) 1706 val |= TRANS_INTERLACED; 1707 else 1708 val |= TRANS_PROGRESSIVE; 1709 1710 I915_WRITE(LPT_TRANSCONF, val); 1711 if (intel_wait_for_register(&dev_priv->uncore, 1712 LPT_TRANSCONF, 1713 TRANS_STATE_ENABLE, 1714 TRANS_STATE_ENABLE, 1715 100)) 1716 DRM_ERROR("Failed to enable PCH transcoder\n"); 1717 } 1718 1719 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1720 enum pipe pipe) 1721 { 1722 i915_reg_t reg; 1723 u32 val; 1724 1725 /* FDI relies on the transcoder */ 1726 assert_fdi_tx_disabled(dev_priv, pipe); 1727 assert_fdi_rx_disabled(dev_priv, pipe); 1728 1729 /* Ports must be off as well */ 1730 assert_pch_ports_disabled(dev_priv, pipe); 1731 1732 reg = PCH_TRANSCONF(pipe); 1733 val = I915_READ(reg); 1734 val &= ~TRANS_ENABLE; 1735 I915_WRITE(reg, val); 1736 /* wait for PCH transcoder off, transcoder state */ 1737 if (intel_wait_for_register(&dev_priv->uncore, 1738 reg, TRANS_STATE_ENABLE, 0, 1739 50)) 1740 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 1741 1742 if (HAS_PCH_CPT(dev_priv)) { 1743 /* Workaround: Clear the timing override chicken bit again. */ 1744 reg = TRANS_CHICKEN2(pipe); 1745 val = I915_READ(reg); 1746 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1747 I915_WRITE(reg, val); 1748 } 1749 } 1750 1751 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 1752 { 1753 u32 val; 1754 1755 val = I915_READ(LPT_TRANSCONF); 1756 val &= ~TRANS_ENABLE; 1757 I915_WRITE(LPT_TRANSCONF, val); 1758 /* wait for PCH transcoder off, transcoder state */ 1759 if (intel_wait_for_register(&dev_priv->uncore, 1760 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0, 1761 50)) 1762 DRM_ERROR("Failed to disable PCH transcoder\n"); 1763 1764 /* Workaround: clear timing override bit. */ 1765 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1766 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1767 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1768 } 1769 1770 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) 1771 { 1772 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1773 1774 if (HAS_PCH_LPT(dev_priv)) 1775 return PIPE_A; 1776 else 1777 return crtc->pipe; 1778 } 1779 1780 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state) 1781 { 1782 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 1783 1784 /* 1785 * On i965gm the hardware frame counter reads 1786 * zero when the TV encoder is enabled :( 1787 */ 1788 if (IS_I965GM(dev_priv) && 1789 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT))) 1790 return 0; 1791 1792 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1793 return 0xffffffff; /* full 32 bit counter */ 1794 else if (INTEL_GEN(dev_priv) >= 3) 1795 return 0xffffff; /* only 24 bits of frame count */ 1796 else 1797 return 0; /* Gen2 doesn't have a hardware frame counter */ 1798 } 1799 1800 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) 1801 { 1802 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1803 1804 drm_crtc_set_max_vblank_count(&crtc->base, 1805 intel_crtc_max_vblank_count(crtc_state)); 1806 drm_crtc_vblank_on(&crtc->base); 1807 } 1808 1809 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) 1810 { 1811 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 1812 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1813 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 1814 enum pipe pipe = crtc->pipe; 1815 i915_reg_t reg; 1816 u32 val; 1817 1818 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); 1819 1820 assert_planes_disabled(crtc); 1821 1822 /* 1823 * A pipe without a PLL won't actually be able to drive bits from 1824 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1825 * need the check. 1826 */ 1827 if (HAS_GMCH(dev_priv)) { 1828 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 1829 assert_dsi_pll_enabled(dev_priv); 1830 else 1831 assert_pll_enabled(dev_priv, pipe); 1832 } else { 1833 if (new_crtc_state->has_pch_encoder) { 1834 /* if driving the PCH, we need FDI enabled */ 1835 assert_fdi_rx_pll_enabled(dev_priv, 1836 intel_crtc_pch_transcoder(crtc)); 1837 assert_fdi_tx_pll_enabled(dev_priv, 1838 (enum pipe) cpu_transcoder); 1839 } 1840 /* FIXME: assert CPU port conditions for SNB+ */ 1841 } 1842 1843 trace_intel_pipe_enable(crtc); 1844 1845 reg = PIPECONF(cpu_transcoder); 1846 val = I915_READ(reg); 1847 if (val & PIPECONF_ENABLE) { 1848 /* we keep both pipes enabled on 830 */ 1849 WARN_ON(!IS_I830(dev_priv)); 1850 return; 1851 } 1852 1853 I915_WRITE(reg, val | PIPECONF_ENABLE); 1854 POSTING_READ(reg); 1855 1856 /* 1857 * Until the pipe starts PIPEDSL reads will return a stale value, 1858 * which causes an apparent vblank timestamp jump when PIPEDSL 1859 * resets to its proper value. That also messes up the frame count 1860 * when it's derived from the timestamps. So let's wait for the 1861 * pipe to start properly before we call drm_crtc_vblank_on() 1862 */ 1863 if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 1864 intel_wait_for_pipe_scanline_moving(crtc); 1865 } 1866 1867 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) 1868 { 1869 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 1870 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1871 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 1872 enum pipe pipe = crtc->pipe; 1873 i915_reg_t reg; 1874 u32 val; 1875 1876 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); 1877 1878 /* 1879 * Make sure planes won't keep trying to pump pixels to us, 1880 * or we might hang the display. 1881 */ 1882 assert_planes_disabled(crtc); 1883 1884 trace_intel_pipe_disable(crtc); 1885 1886 reg = PIPECONF(cpu_transcoder); 1887 val = I915_READ(reg); 1888 if ((val & PIPECONF_ENABLE) == 0) 1889 return; 1890 1891 /* 1892 * Double wide has implications for planes 1893 * so best keep it disabled when not needed. 1894 */ 1895 if (old_crtc_state->double_wide) 1896 val &= ~PIPECONF_DOUBLE_WIDE; 1897 1898 /* Don't disable pipe or pipe PLLs if needed */ 1899 if (!IS_I830(dev_priv)) 1900 val &= ~PIPECONF_ENABLE; 1901 1902 I915_WRITE(reg, val); 1903 if ((val & PIPECONF_ENABLE) == 0) 1904 intel_wait_for_pipe_off(old_crtc_state); 1905 } 1906 1907 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) 1908 { 1909 return IS_GEN(dev_priv, 2) ? 2048 : 4096; 1910 } 1911 1912 static unsigned int 1913 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) 1914 { 1915 struct drm_i915_private *dev_priv = to_i915(fb->dev); 1916 unsigned int cpp = fb->format->cpp[color_plane]; 1917 1918 switch (fb->modifier) { 1919 case DRM_FORMAT_MOD_LINEAR: 1920 return intel_tile_size(dev_priv); 1921 case I915_FORMAT_MOD_X_TILED: 1922 if (IS_GEN(dev_priv, 2)) 1923 return 128; 1924 else 1925 return 512; 1926 case I915_FORMAT_MOD_Y_TILED_CCS: 1927 if (color_plane == 1) 1928 return 128; 1929 /* fall through */ 1930 case I915_FORMAT_MOD_Y_TILED: 1931 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv)) 1932 return 128; 1933 else 1934 return 512; 1935 case I915_FORMAT_MOD_Yf_TILED_CCS: 1936 if (color_plane == 1) 1937 return 128; 1938 /* fall through */ 1939 case I915_FORMAT_MOD_Yf_TILED: 1940 switch (cpp) { 1941 case 1: 1942 return 64; 1943 case 2: 1944 case 4: 1945 return 128; 1946 case 8: 1947 case 16: 1948 return 256; 1949 default: 1950 MISSING_CASE(cpp); 1951 return cpp; 1952 } 1953 break; 1954 default: 1955 MISSING_CASE(fb->modifier); 1956 return cpp; 1957 } 1958 } 1959 1960 static unsigned int 1961 intel_tile_height(const struct drm_framebuffer *fb, int color_plane) 1962 { 1963 return intel_tile_size(to_i915(fb->dev)) / 1964 intel_tile_width_bytes(fb, color_plane); 1965 } 1966 1967 /* Return the tile dimensions in pixel units */ 1968 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane, 1969 unsigned int *tile_width, 1970 unsigned int *tile_height) 1971 { 1972 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane); 1973 unsigned int cpp = fb->format->cpp[color_plane]; 1974 1975 *tile_width = tile_width_bytes / cpp; 1976 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes; 1977 } 1978 1979 unsigned int 1980 intel_fb_align_height(const struct drm_framebuffer *fb, 1981 int color_plane, unsigned int height) 1982 { 1983 unsigned int tile_height = intel_tile_height(fb, color_plane); 1984 1985 return ALIGN(height, tile_height); 1986 } 1987 1988 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 1989 { 1990 unsigned int size = 0; 1991 int i; 1992 1993 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 1994 size += rot_info->plane[i].width * rot_info->plane[i].height; 1995 1996 return size; 1997 } 1998 1999 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info) 2000 { 2001 unsigned int size = 0; 2002 int i; 2003 2004 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) 2005 size += rem_info->plane[i].width * rem_info->plane[i].height; 2006 2007 return size; 2008 } 2009 2010 static void 2011 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, 2012 const struct drm_framebuffer *fb, 2013 unsigned int rotation) 2014 { 2015 view->type = I915_GGTT_VIEW_NORMAL; 2016 if (drm_rotation_90_or_270(rotation)) { 2017 view->type = I915_GGTT_VIEW_ROTATED; 2018 view->rotated = to_intel_framebuffer(fb)->rot_info; 2019 } 2020 } 2021 2022 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv) 2023 { 2024 if (IS_I830(dev_priv)) 2025 return 16 * 1024; 2026 else if (IS_I85X(dev_priv)) 2027 return 256; 2028 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) 2029 return 32; 2030 else 2031 return 4 * 1024; 2032 } 2033 2034 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) 2035 { 2036 if (INTEL_GEN(dev_priv) >= 9) 2037 return 256 * 1024; 2038 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || 2039 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2040 return 128 * 1024; 2041 else if (INTEL_GEN(dev_priv) >= 4) 2042 return 4 * 1024; 2043 else 2044 return 0; 2045 } 2046 2047 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, 2048 int color_plane) 2049 { 2050 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2051 2052 /* AUX_DIST needs only 4K alignment */ 2053 if (color_plane == 1) 2054 return 4096; 2055 2056 switch (fb->modifier) { 2057 case DRM_FORMAT_MOD_LINEAR: 2058 return intel_linear_alignment(dev_priv); 2059 case I915_FORMAT_MOD_X_TILED: 2060 if (INTEL_GEN(dev_priv) >= 9) 2061 return 256 * 1024; 2062 return 0; 2063 case I915_FORMAT_MOD_Y_TILED_CCS: 2064 case I915_FORMAT_MOD_Yf_TILED_CCS: 2065 case I915_FORMAT_MOD_Y_TILED: 2066 case I915_FORMAT_MOD_Yf_TILED: 2067 return 1 * 1024 * 1024; 2068 default: 2069 MISSING_CASE(fb->modifier); 2070 return 0; 2071 } 2072 } 2073 2074 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) 2075 { 2076 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 2077 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 2078 2079 return INTEL_GEN(dev_priv) < 4 || 2080 (plane->has_fbc && 2081 plane_state->view.type == I915_GGTT_VIEW_NORMAL); 2082 } 2083 2084 struct i915_vma * 2085 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 2086 const struct i915_ggtt_view *view, 2087 bool uses_fence, 2088 unsigned long *out_flags) 2089 { 2090 struct drm_device *dev = fb->dev; 2091 struct drm_i915_private *dev_priv = to_i915(dev); 2092 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2093 intel_wakeref_t wakeref; 2094 struct i915_vma *vma; 2095 unsigned int pinctl; 2096 u32 alignment; 2097 2098 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2099 2100 alignment = intel_surf_alignment(fb, 0); 2101 2102 /* Note that the w/a also requires 64 PTE of padding following the 2103 * bo. We currently fill all unused PTE with the shadow page and so 2104 * we should always have valid PTE following the scanout preventing 2105 * the VT-d warning. 2106 */ 2107 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) 2108 alignment = 256 * 1024; 2109 2110 /* 2111 * Global gtt pte registers are special registers which actually forward 2112 * writes to a chunk of system memory. Which means that there is no risk 2113 * that the register values disappear as soon as we call 2114 * intel_runtime_pm_put(), so it is correct to wrap only the 2115 * pin/unpin/fence and not more. 2116 */ 2117 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2118 i915_gem_object_lock(obj); 2119 2120 atomic_inc(&dev_priv->gpu_error.pending_fb_pin); 2121 2122 pinctl = 0; 2123 2124 /* Valleyview is definitely limited to scanning out the first 2125 * 512MiB. Lets presume this behaviour was inherited from the 2126 * g4x display engine and that all earlier gen are similarly 2127 * limited. Testing suggests that it is a little more 2128 * complicated than this. For example, Cherryview appears quite 2129 * happy to scanout from anywhere within its global aperture. 2130 */ 2131 if (HAS_GMCH(dev_priv)) 2132 pinctl |= PIN_MAPPABLE; 2133 2134 vma = i915_gem_object_pin_to_display_plane(obj, 2135 alignment, view, pinctl); 2136 if (IS_ERR(vma)) 2137 goto err; 2138 2139 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) { 2140 int ret; 2141 2142 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2143 * fence, whereas 965+ only requires a fence if using 2144 * framebuffer compression. For simplicity, we always, when 2145 * possible, install a fence as the cost is not that onerous. 2146 * 2147 * If we fail to fence the tiled scanout, then either the 2148 * modeset will reject the change (which is highly unlikely as 2149 * the affected systems, all but one, do not have unmappable 2150 * space) or we will not be able to enable full powersaving 2151 * techniques (also likely not to apply due to various limits 2152 * FBC and the like impose on the size of the buffer, which 2153 * presumably we violated anyway with this unmappable buffer). 2154 * Anyway, it is presumably better to stumble onwards with 2155 * something and try to run the system in a "less than optimal" 2156 * mode that matches the user configuration. 2157 */ 2158 ret = i915_vma_pin_fence(vma); 2159 if (ret != 0 && INTEL_GEN(dev_priv) < 4) { 2160 i915_gem_object_unpin_from_display_plane(vma); 2161 vma = ERR_PTR(ret); 2162 goto err; 2163 } 2164 2165 if (ret == 0 && vma->fence) 2166 *out_flags |= PLANE_HAS_FENCE; 2167 } 2168 2169 i915_vma_get(vma); 2170 err: 2171 atomic_dec(&dev_priv->gpu_error.pending_fb_pin); 2172 2173 i915_gem_object_unlock(obj); 2174 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2175 return vma; 2176 } 2177 2178 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) 2179 { 2180 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 2181 2182 i915_gem_object_lock(vma->obj); 2183 if (flags & PLANE_HAS_FENCE) 2184 i915_vma_unpin_fence(vma); 2185 i915_gem_object_unpin_from_display_plane(vma); 2186 i915_gem_object_unlock(vma->obj); 2187 2188 i915_vma_put(vma); 2189 } 2190 2191 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane, 2192 unsigned int rotation) 2193 { 2194 if (drm_rotation_90_or_270(rotation)) 2195 return to_intel_framebuffer(fb)->rotated[color_plane].pitch; 2196 else 2197 return fb->pitches[color_plane]; 2198 } 2199 2200 /* 2201 * Convert the x/y offsets into a linear offset. 2202 * Only valid with 0/180 degree rotation, which is fine since linear 2203 * offset is only used with linear buffers on pre-hsw and tiled buffers 2204 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 2205 */ 2206 u32 intel_fb_xy_to_linear(int x, int y, 2207 const struct intel_plane_state *state, 2208 int color_plane) 2209 { 2210 const struct drm_framebuffer *fb = state->base.fb; 2211 unsigned int cpp = fb->format->cpp[color_plane]; 2212 unsigned int pitch = state->color_plane[color_plane].stride; 2213 2214 return y * pitch + x * cpp; 2215 } 2216 2217 /* 2218 * Add the x/y offsets derived from fb->offsets[] to the user 2219 * specified plane src x/y offsets. The resulting x/y offsets 2220 * specify the start of scanout from the beginning of the gtt mapping. 2221 */ 2222 void intel_add_fb_offsets(int *x, int *y, 2223 const struct intel_plane_state *state, 2224 int color_plane) 2225 2226 { 2227 *x += state->color_plane[color_plane].x; 2228 *y += state->color_plane[color_plane].y; 2229 } 2230 2231 static u32 intel_adjust_tile_offset(int *x, int *y, 2232 unsigned int tile_width, 2233 unsigned int tile_height, 2234 unsigned int tile_size, 2235 unsigned int pitch_tiles, 2236 u32 old_offset, 2237 u32 new_offset) 2238 { 2239 unsigned int pitch_pixels = pitch_tiles * tile_width; 2240 unsigned int tiles; 2241 2242 WARN_ON(old_offset & (tile_size - 1)); 2243 WARN_ON(new_offset & (tile_size - 1)); 2244 WARN_ON(new_offset > old_offset); 2245 2246 tiles = (old_offset - new_offset) / tile_size; 2247 2248 *y += tiles / pitch_tiles * tile_height; 2249 *x += tiles % pitch_tiles * tile_width; 2250 2251 /* minimize x in case it got needlessly big */ 2252 *y += *x / pitch_pixels * tile_height; 2253 *x %= pitch_pixels; 2254 2255 return new_offset; 2256 } 2257 2258 static bool is_surface_linear(u64 modifier, int color_plane) 2259 { 2260 return modifier == DRM_FORMAT_MOD_LINEAR; 2261 } 2262 2263 static u32 intel_adjust_aligned_offset(int *x, int *y, 2264 const struct drm_framebuffer *fb, 2265 int color_plane, 2266 unsigned int rotation, 2267 unsigned int pitch, 2268 u32 old_offset, u32 new_offset) 2269 { 2270 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2271 unsigned int cpp = fb->format->cpp[color_plane]; 2272 2273 WARN_ON(new_offset > old_offset); 2274 2275 if (!is_surface_linear(fb->modifier, color_plane)) { 2276 unsigned int tile_size, tile_width, tile_height; 2277 unsigned int pitch_tiles; 2278 2279 tile_size = intel_tile_size(dev_priv); 2280 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2281 2282 if (drm_rotation_90_or_270(rotation)) { 2283 pitch_tiles = pitch / tile_height; 2284 swap(tile_width, tile_height); 2285 } else { 2286 pitch_tiles = pitch / (tile_width * cpp); 2287 } 2288 2289 intel_adjust_tile_offset(x, y, tile_width, tile_height, 2290 tile_size, pitch_tiles, 2291 old_offset, new_offset); 2292 } else { 2293 old_offset += *y * pitch + *x * cpp; 2294 2295 *y = (old_offset - new_offset) / pitch; 2296 *x = ((old_offset - new_offset) - *y * pitch) / cpp; 2297 } 2298 2299 return new_offset; 2300 } 2301 2302 /* 2303 * Adjust the tile offset by moving the difference into 2304 * the x/y offsets. 2305 */ 2306 static u32 intel_plane_adjust_aligned_offset(int *x, int *y, 2307 const struct intel_plane_state *state, 2308 int color_plane, 2309 u32 old_offset, u32 new_offset) 2310 { 2311 return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane, 2312 state->base.rotation, 2313 state->color_plane[color_plane].stride, 2314 old_offset, new_offset); 2315 } 2316 2317 /* 2318 * Computes the aligned offset to the base tile and adjusts 2319 * x, y. bytes per pixel is assumed to be a power-of-two. 2320 * 2321 * In the 90/270 rotated case, x and y are assumed 2322 * to be already rotated to match the rotated GTT view, and 2323 * pitch is the tile_height aligned framebuffer height. 2324 * 2325 * This function is used when computing the derived information 2326 * under intel_framebuffer, so using any of that information 2327 * here is not allowed. Anything under drm_framebuffer can be 2328 * used. This is why the user has to pass in the pitch since it 2329 * is specified in the rotated orientation. 2330 */ 2331 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv, 2332 int *x, int *y, 2333 const struct drm_framebuffer *fb, 2334 int color_plane, 2335 unsigned int pitch, 2336 unsigned int rotation, 2337 u32 alignment) 2338 { 2339 unsigned int cpp = fb->format->cpp[color_plane]; 2340 u32 offset, offset_aligned; 2341 2342 if (alignment) 2343 alignment--; 2344 2345 if (!is_surface_linear(fb->modifier, color_plane)) { 2346 unsigned int tile_size, tile_width, tile_height; 2347 unsigned int tile_rows, tiles, pitch_tiles; 2348 2349 tile_size = intel_tile_size(dev_priv); 2350 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2351 2352 if (drm_rotation_90_or_270(rotation)) { 2353 pitch_tiles = pitch / tile_height; 2354 swap(tile_width, tile_height); 2355 } else { 2356 pitch_tiles = pitch / (tile_width * cpp); 2357 } 2358 2359 tile_rows = *y / tile_height; 2360 *y %= tile_height; 2361 2362 tiles = *x / tile_width; 2363 *x %= tile_width; 2364 2365 offset = (tile_rows * pitch_tiles + tiles) * tile_size; 2366 offset_aligned = offset & ~alignment; 2367 2368 intel_adjust_tile_offset(x, y, tile_width, tile_height, 2369 tile_size, pitch_tiles, 2370 offset, offset_aligned); 2371 } else { 2372 offset = *y * pitch + *x * cpp; 2373 offset_aligned = offset & ~alignment; 2374 2375 *y = (offset & alignment) / pitch; 2376 *x = ((offset & alignment) - *y * pitch) / cpp; 2377 } 2378 2379 return offset_aligned; 2380 } 2381 2382 static u32 intel_plane_compute_aligned_offset(int *x, int *y, 2383 const struct intel_plane_state *state, 2384 int color_plane) 2385 { 2386 struct intel_plane *intel_plane = to_intel_plane(state->base.plane); 2387 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 2388 const struct drm_framebuffer *fb = state->base.fb; 2389 unsigned int rotation = state->base.rotation; 2390 int pitch = state->color_plane[color_plane].stride; 2391 u32 alignment; 2392 2393 if (intel_plane->id == PLANE_CURSOR) 2394 alignment = intel_cursor_alignment(dev_priv); 2395 else 2396 alignment = intel_surf_alignment(fb, color_plane); 2397 2398 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane, 2399 pitch, rotation, alignment); 2400 } 2401 2402 /* Convert the fb->offset[] into x/y offsets */ 2403 static int intel_fb_offset_to_xy(int *x, int *y, 2404 const struct drm_framebuffer *fb, 2405 int color_plane) 2406 { 2407 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2408 unsigned int height; 2409 2410 if (fb->modifier != DRM_FORMAT_MOD_LINEAR && 2411 fb->offsets[color_plane] % intel_tile_size(dev_priv)) { 2412 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n", 2413 fb->offsets[color_plane], color_plane); 2414 return -EINVAL; 2415 } 2416 2417 height = drm_framebuffer_plane_height(fb->height, fb, color_plane); 2418 height = ALIGN(height, intel_tile_height(fb, color_plane)); 2419 2420 /* Catch potential overflows early */ 2421 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]), 2422 fb->offsets[color_plane])) { 2423 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n", 2424 fb->offsets[color_plane], fb->pitches[color_plane], 2425 color_plane); 2426 return -ERANGE; 2427 } 2428 2429 *x = 0; 2430 *y = 0; 2431 2432 intel_adjust_aligned_offset(x, y, 2433 fb, color_plane, DRM_MODE_ROTATE_0, 2434 fb->pitches[color_plane], 2435 fb->offsets[color_plane], 0); 2436 2437 return 0; 2438 } 2439 2440 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) 2441 { 2442 switch (fb_modifier) { 2443 case I915_FORMAT_MOD_X_TILED: 2444 return I915_TILING_X; 2445 case I915_FORMAT_MOD_Y_TILED: 2446 case I915_FORMAT_MOD_Y_TILED_CCS: 2447 return I915_TILING_Y; 2448 default: 2449 return I915_TILING_NONE; 2450 } 2451 } 2452 2453 /* 2454 * From the Sky Lake PRM: 2455 * "The Color Control Surface (CCS) contains the compression status of 2456 * the cache-line pairs. The compression state of the cache-line pair 2457 * is specified by 2 bits in the CCS. Each CCS cache-line represents 2458 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled 2459 * cache-line-pairs. CCS is always Y tiled." 2460 * 2461 * Since cache line pairs refers to horizontally adjacent cache lines, 2462 * each cache line in the CCS corresponds to an area of 32x16 cache 2463 * lines on the main surface. Since each pixel is 4 bytes, this gives 2464 * us a ratio of one byte in the CCS for each 8x16 pixels in the 2465 * main surface. 2466 */ 2467 static const struct drm_format_info ccs_formats[] = { 2468 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 2469 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2470 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 2471 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2472 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 2473 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 2474 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 2475 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 2476 }; 2477 2478 static const struct drm_format_info * 2479 lookup_format_info(const struct drm_format_info formats[], 2480 int num_formats, u32 format) 2481 { 2482 int i; 2483 2484 for (i = 0; i < num_formats; i++) { 2485 if (formats[i].format == format) 2486 return &formats[i]; 2487 } 2488 2489 return NULL; 2490 } 2491 2492 static const struct drm_format_info * 2493 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 2494 { 2495 switch (cmd->modifier[0]) { 2496 case I915_FORMAT_MOD_Y_TILED_CCS: 2497 case I915_FORMAT_MOD_Yf_TILED_CCS: 2498 return lookup_format_info(ccs_formats, 2499 ARRAY_SIZE(ccs_formats), 2500 cmd->pixel_format); 2501 default: 2502 return NULL; 2503 } 2504 } 2505 2506 bool is_ccs_modifier(u64 modifier) 2507 { 2508 return modifier == I915_FORMAT_MOD_Y_TILED_CCS || 2509 modifier == I915_FORMAT_MOD_Yf_TILED_CCS; 2510 } 2511 2512 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 2513 u32 pixel_format, u64 modifier) 2514 { 2515 struct intel_crtc *crtc; 2516 struct intel_plane *plane; 2517 2518 /* 2519 * We assume the primary plane for pipe A has 2520 * the highest stride limits of them all. 2521 */ 2522 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); 2523 plane = to_intel_plane(crtc->base.primary); 2524 2525 return plane->max_stride(plane, pixel_format, modifier, 2526 DRM_MODE_ROTATE_0); 2527 } 2528 2529 static 2530 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv, 2531 u32 pixel_format, u64 modifier) 2532 { 2533 /* 2534 * Arbitrary limit for gen4+ chosen to match the 2535 * render engine max stride. 2536 * 2537 * The new CCS hash mode makes remapping impossible 2538 */ 2539 if (!is_ccs_modifier(modifier)) { 2540 if (INTEL_GEN(dev_priv) >= 7) 2541 return 256*1024; 2542 else if (INTEL_GEN(dev_priv) >= 4) 2543 return 128*1024; 2544 } 2545 2546 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier); 2547 } 2548 2549 static u32 2550 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) 2551 { 2552 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2553 2554 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) { 2555 u32 max_stride = intel_plane_fb_max_stride(dev_priv, 2556 fb->format->format, 2557 fb->modifier); 2558 2559 /* 2560 * To make remapping with linear generally feasible 2561 * we need the stride to be page aligned. 2562 */ 2563 if (fb->pitches[color_plane] > max_stride) 2564 return intel_tile_size(dev_priv); 2565 else 2566 return 64; 2567 } else { 2568 return intel_tile_width_bytes(fb, color_plane); 2569 } 2570 } 2571 2572 bool intel_plane_can_remap(const struct intel_plane_state *plane_state) 2573 { 2574 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 2575 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 2576 const struct drm_framebuffer *fb = plane_state->base.fb; 2577 int i; 2578 2579 /* We don't want to deal with remapping with cursors */ 2580 if (plane->id == PLANE_CURSOR) 2581 return false; 2582 2583 /* 2584 * The display engine limits already match/exceed the 2585 * render engine limits, so not much point in remapping. 2586 * Would also need to deal with the fence POT alignment 2587 * and gen2 2KiB GTT tile size. 2588 */ 2589 if (INTEL_GEN(dev_priv) < 4) 2590 return false; 2591 2592 /* 2593 * The new CCS hash mode isn't compatible with remapping as 2594 * the virtual address of the pages affects the compressed data. 2595 */ 2596 if (is_ccs_modifier(fb->modifier)) 2597 return false; 2598 2599 /* Linear needs a page aligned stride for remapping */ 2600 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) { 2601 unsigned int alignment = intel_tile_size(dev_priv) - 1; 2602 2603 for (i = 0; i < fb->format->num_planes; i++) { 2604 if (fb->pitches[i] & alignment) 2605 return false; 2606 } 2607 } 2608 2609 return true; 2610 } 2611 2612 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) 2613 { 2614 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 2615 const struct drm_framebuffer *fb = plane_state->base.fb; 2616 unsigned int rotation = plane_state->base.rotation; 2617 u32 stride, max_stride; 2618 2619 /* 2620 * No remapping for invisible planes since we don't have 2621 * an actual source viewport to remap. 2622 */ 2623 if (!plane_state->base.visible) 2624 return false; 2625 2626 if (!intel_plane_can_remap(plane_state)) 2627 return false; 2628 2629 /* 2630 * FIXME: aux plane limits on gen9+ are 2631 * unclear in Bspec, for now no checking. 2632 */ 2633 stride = intel_fb_pitch(fb, 0, rotation); 2634 max_stride = plane->max_stride(plane, fb->format->format, 2635 fb->modifier, rotation); 2636 2637 return stride > max_stride; 2638 } 2639 2640 static int 2641 intel_fill_fb_info(struct drm_i915_private *dev_priv, 2642 struct drm_framebuffer *fb) 2643 { 2644 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2645 struct intel_rotation_info *rot_info = &intel_fb->rot_info; 2646 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2647 u32 gtt_offset_rotated = 0; 2648 unsigned int max_size = 0; 2649 int i, num_planes = fb->format->num_planes; 2650 unsigned int tile_size = intel_tile_size(dev_priv); 2651 2652 for (i = 0; i < num_planes; i++) { 2653 unsigned int width, height; 2654 unsigned int cpp, size; 2655 u32 offset; 2656 int x, y; 2657 int ret; 2658 2659 cpp = fb->format->cpp[i]; 2660 width = drm_framebuffer_plane_width(fb->width, fb, i); 2661 height = drm_framebuffer_plane_height(fb->height, fb, i); 2662 2663 ret = intel_fb_offset_to_xy(&x, &y, fb, i); 2664 if (ret) { 2665 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", 2666 i, fb->offsets[i]); 2667 return ret; 2668 } 2669 2670 if (is_ccs_modifier(fb->modifier) && i == 1) { 2671 int hsub = fb->format->hsub; 2672 int vsub = fb->format->vsub; 2673 int tile_width, tile_height; 2674 int main_x, main_y; 2675 int ccs_x, ccs_y; 2676 2677 intel_tile_dims(fb, i, &tile_width, &tile_height); 2678 tile_width *= hsub; 2679 tile_height *= vsub; 2680 2681 ccs_x = (x * hsub) % tile_width; 2682 ccs_y = (y * vsub) % tile_height; 2683 main_x = intel_fb->normal[0].x % tile_width; 2684 main_y = intel_fb->normal[0].y % tile_height; 2685 2686 /* 2687 * CCS doesn't have its own x/y offset register, so the intra CCS tile 2688 * x/y offsets must match between CCS and the main surface. 2689 */ 2690 if (main_x != ccs_x || main_y != ccs_y) { 2691 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n", 2692 main_x, main_y, 2693 ccs_x, ccs_y, 2694 intel_fb->normal[0].x, 2695 intel_fb->normal[0].y, 2696 x, y); 2697 return -EINVAL; 2698 } 2699 } 2700 2701 /* 2702 * The fence (if used) is aligned to the start of the object 2703 * so having the framebuffer wrap around across the edge of the 2704 * fenced region doesn't really work. We have no API to configure 2705 * the fence start offset within the object (nor could we probably 2706 * on gen2/3). So it's just easier if we just require that the 2707 * fb layout agrees with the fence layout. We already check that the 2708 * fb stride matches the fence stride elsewhere. 2709 */ 2710 if (i == 0 && i915_gem_object_is_tiled(obj) && 2711 (x + width) * cpp > fb->pitches[i]) { 2712 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", 2713 i, fb->offsets[i]); 2714 return -EINVAL; 2715 } 2716 2717 /* 2718 * First pixel of the framebuffer from 2719 * the start of the normal gtt mapping. 2720 */ 2721 intel_fb->normal[i].x = x; 2722 intel_fb->normal[i].y = y; 2723 2724 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i, 2725 fb->pitches[i], 2726 DRM_MODE_ROTATE_0, 2727 tile_size); 2728 offset /= tile_size; 2729 2730 if (!is_surface_linear(fb->modifier, i)) { 2731 unsigned int tile_width, tile_height; 2732 unsigned int pitch_tiles; 2733 struct drm_rect r; 2734 2735 intel_tile_dims(fb, i, &tile_width, &tile_height); 2736 2737 rot_info->plane[i].offset = offset; 2738 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp); 2739 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); 2740 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); 2741 2742 intel_fb->rotated[i].pitch = 2743 rot_info->plane[i].height * tile_height; 2744 2745 /* how many tiles does this plane need */ 2746 size = rot_info->plane[i].stride * rot_info->plane[i].height; 2747 /* 2748 * If the plane isn't horizontally tile aligned, 2749 * we need one more tile. 2750 */ 2751 if (x != 0) 2752 size++; 2753 2754 /* rotate the x/y offsets to match the GTT view */ 2755 r.x1 = x; 2756 r.y1 = y; 2757 r.x2 = x + width; 2758 r.y2 = y + height; 2759 drm_rect_rotate(&r, 2760 rot_info->plane[i].width * tile_width, 2761 rot_info->plane[i].height * tile_height, 2762 DRM_MODE_ROTATE_270); 2763 x = r.x1; 2764 y = r.y1; 2765 2766 /* rotate the tile dimensions to match the GTT view */ 2767 pitch_tiles = intel_fb->rotated[i].pitch / tile_height; 2768 swap(tile_width, tile_height); 2769 2770 /* 2771 * We only keep the x/y offsets, so push all of the 2772 * gtt offset into the x/y offsets. 2773 */ 2774 intel_adjust_tile_offset(&x, &y, 2775 tile_width, tile_height, 2776 tile_size, pitch_tiles, 2777 gtt_offset_rotated * tile_size, 0); 2778 2779 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; 2780 2781 /* 2782 * First pixel of the framebuffer from 2783 * the start of the rotated gtt mapping. 2784 */ 2785 intel_fb->rotated[i].x = x; 2786 intel_fb->rotated[i].y = y; 2787 } else { 2788 size = DIV_ROUND_UP((y + height) * fb->pitches[i] + 2789 x * cpp, tile_size); 2790 } 2791 2792 /* how many tiles in total needed in the bo */ 2793 max_size = max(max_size, offset + size); 2794 } 2795 2796 if (mul_u32_u32(max_size, tile_size) > obj->base.size) { 2797 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n", 2798 mul_u32_u32(max_size, tile_size), obj->base.size); 2799 return -EINVAL; 2800 } 2801 2802 return 0; 2803 } 2804 2805 static void 2806 intel_plane_remap_gtt(struct intel_plane_state *plane_state) 2807 { 2808 struct drm_i915_private *dev_priv = 2809 to_i915(plane_state->base.plane->dev); 2810 struct drm_framebuffer *fb = plane_state->base.fb; 2811 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2812 struct intel_rotation_info *info = &plane_state->view.rotated; 2813 unsigned int rotation = plane_state->base.rotation; 2814 int i, num_planes = fb->format->num_planes; 2815 unsigned int tile_size = intel_tile_size(dev_priv); 2816 unsigned int src_x, src_y; 2817 unsigned int src_w, src_h; 2818 u32 gtt_offset = 0; 2819 2820 memset(&plane_state->view, 0, sizeof(plane_state->view)); 2821 plane_state->view.type = drm_rotation_90_or_270(rotation) ? 2822 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED; 2823 2824 src_x = plane_state->base.src.x1 >> 16; 2825 src_y = plane_state->base.src.y1 >> 16; 2826 src_w = drm_rect_width(&plane_state->base.src) >> 16; 2827 src_h = drm_rect_height(&plane_state->base.src) >> 16; 2828 2829 WARN_ON(is_ccs_modifier(fb->modifier)); 2830 2831 /* Make src coordinates relative to the viewport */ 2832 drm_rect_translate(&plane_state->base.src, 2833 -(src_x << 16), -(src_y << 16)); 2834 2835 /* Rotate src coordinates to match rotated GTT view */ 2836 if (drm_rotation_90_or_270(rotation)) 2837 drm_rect_rotate(&plane_state->base.src, 2838 src_w << 16, src_h << 16, 2839 DRM_MODE_ROTATE_270); 2840 2841 for (i = 0; i < num_planes; i++) { 2842 unsigned int hsub = i ? fb->format->hsub : 1; 2843 unsigned int vsub = i ? fb->format->vsub : 1; 2844 unsigned int cpp = fb->format->cpp[i]; 2845 unsigned int tile_width, tile_height; 2846 unsigned int width, height; 2847 unsigned int pitch_tiles; 2848 unsigned int x, y; 2849 u32 offset; 2850 2851 intel_tile_dims(fb, i, &tile_width, &tile_height); 2852 2853 x = src_x / hsub; 2854 y = src_y / vsub; 2855 width = src_w / hsub; 2856 height = src_h / vsub; 2857 2858 /* 2859 * First pixel of the src viewport from the 2860 * start of the normal gtt mapping. 2861 */ 2862 x += intel_fb->normal[i].x; 2863 y += intel_fb->normal[i].y; 2864 2865 offset = intel_compute_aligned_offset(dev_priv, &x, &y, 2866 fb, i, fb->pitches[i], 2867 DRM_MODE_ROTATE_0, tile_size); 2868 offset /= tile_size; 2869 2870 info->plane[i].offset = offset; 2871 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], 2872 tile_width * cpp); 2873 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); 2874 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); 2875 2876 if (drm_rotation_90_or_270(rotation)) { 2877 struct drm_rect r; 2878 2879 /* rotate the x/y offsets to match the GTT view */ 2880 r.x1 = x; 2881 r.y1 = y; 2882 r.x2 = x + width; 2883 r.y2 = y + height; 2884 drm_rect_rotate(&r, 2885 info->plane[i].width * tile_width, 2886 info->plane[i].height * tile_height, 2887 DRM_MODE_ROTATE_270); 2888 x = r.x1; 2889 y = r.y1; 2890 2891 pitch_tiles = info->plane[i].height; 2892 plane_state->color_plane[i].stride = pitch_tiles * tile_height; 2893 2894 /* rotate the tile dimensions to match the GTT view */ 2895 swap(tile_width, tile_height); 2896 } else { 2897 pitch_tiles = info->plane[i].width; 2898 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp; 2899 } 2900 2901 /* 2902 * We only keep the x/y offsets, so push all of the 2903 * gtt offset into the x/y offsets. 2904 */ 2905 intel_adjust_tile_offset(&x, &y, 2906 tile_width, tile_height, 2907 tile_size, pitch_tiles, 2908 gtt_offset * tile_size, 0); 2909 2910 gtt_offset += info->plane[i].width * info->plane[i].height; 2911 2912 plane_state->color_plane[i].offset = 0; 2913 plane_state->color_plane[i].x = x; 2914 plane_state->color_plane[i].y = y; 2915 } 2916 } 2917 2918 static int 2919 intel_plane_compute_gtt(struct intel_plane_state *plane_state) 2920 { 2921 const struct intel_framebuffer *fb = 2922 to_intel_framebuffer(plane_state->base.fb); 2923 unsigned int rotation = plane_state->base.rotation; 2924 int i, num_planes; 2925 2926 if (!fb) 2927 return 0; 2928 2929 num_planes = fb->base.format->num_planes; 2930 2931 if (intel_plane_needs_remap(plane_state)) { 2932 intel_plane_remap_gtt(plane_state); 2933 2934 /* 2935 * Sometimes even remapping can't overcome 2936 * the stride limitations :( Can happen with 2937 * big plane sizes and suitably misaligned 2938 * offsets. 2939 */ 2940 return intel_plane_check_stride(plane_state); 2941 } 2942 2943 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation); 2944 2945 for (i = 0; i < num_planes; i++) { 2946 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation); 2947 plane_state->color_plane[i].offset = 0; 2948 2949 if (drm_rotation_90_or_270(rotation)) { 2950 plane_state->color_plane[i].x = fb->rotated[i].x; 2951 plane_state->color_plane[i].y = fb->rotated[i].y; 2952 } else { 2953 plane_state->color_plane[i].x = fb->normal[i].x; 2954 plane_state->color_plane[i].y = fb->normal[i].y; 2955 } 2956 } 2957 2958 /* Rotate src coordinates to match rotated GTT view */ 2959 if (drm_rotation_90_or_270(rotation)) 2960 drm_rect_rotate(&plane_state->base.src, 2961 fb->base.width << 16, fb->base.height << 16, 2962 DRM_MODE_ROTATE_270); 2963 2964 return intel_plane_check_stride(plane_state); 2965 } 2966 2967 static int i9xx_format_to_fourcc(int format) 2968 { 2969 switch (format) { 2970 case DISPPLANE_8BPP: 2971 return DRM_FORMAT_C8; 2972 case DISPPLANE_BGRX555: 2973 return DRM_FORMAT_XRGB1555; 2974 case DISPPLANE_BGRX565: 2975 return DRM_FORMAT_RGB565; 2976 default: 2977 case DISPPLANE_BGRX888: 2978 return DRM_FORMAT_XRGB8888; 2979 case DISPPLANE_RGBX888: 2980 return DRM_FORMAT_XBGR8888; 2981 case DISPPLANE_BGRX101010: 2982 return DRM_FORMAT_XRGB2101010; 2983 case DISPPLANE_RGBX101010: 2984 return DRM_FORMAT_XBGR2101010; 2985 } 2986 } 2987 2988 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 2989 { 2990 switch (format) { 2991 case PLANE_CTL_FORMAT_RGB_565: 2992 return DRM_FORMAT_RGB565; 2993 case PLANE_CTL_FORMAT_NV12: 2994 return DRM_FORMAT_NV12; 2995 case PLANE_CTL_FORMAT_P010: 2996 return DRM_FORMAT_P010; 2997 case PLANE_CTL_FORMAT_P012: 2998 return DRM_FORMAT_P012; 2999 case PLANE_CTL_FORMAT_P016: 3000 return DRM_FORMAT_P016; 3001 case PLANE_CTL_FORMAT_Y210: 3002 return DRM_FORMAT_Y210; 3003 case PLANE_CTL_FORMAT_Y212: 3004 return DRM_FORMAT_Y212; 3005 case PLANE_CTL_FORMAT_Y216: 3006 return DRM_FORMAT_Y216; 3007 case PLANE_CTL_FORMAT_Y410: 3008 return DRM_FORMAT_XVYU2101010; 3009 case PLANE_CTL_FORMAT_Y412: 3010 return DRM_FORMAT_XVYU12_16161616; 3011 case PLANE_CTL_FORMAT_Y416: 3012 return DRM_FORMAT_XVYU16161616; 3013 default: 3014 case PLANE_CTL_FORMAT_XRGB_8888: 3015 if (rgb_order) { 3016 if (alpha) 3017 return DRM_FORMAT_ABGR8888; 3018 else 3019 return DRM_FORMAT_XBGR8888; 3020 } else { 3021 if (alpha) 3022 return DRM_FORMAT_ARGB8888; 3023 else 3024 return DRM_FORMAT_XRGB8888; 3025 } 3026 case PLANE_CTL_FORMAT_XRGB_2101010: 3027 if (rgb_order) 3028 return DRM_FORMAT_XBGR2101010; 3029 else 3030 return DRM_FORMAT_XRGB2101010; 3031 case PLANE_CTL_FORMAT_XRGB_16161616F: 3032 if (rgb_order) { 3033 if (alpha) 3034 return DRM_FORMAT_ABGR16161616F; 3035 else 3036 return DRM_FORMAT_XBGR16161616F; 3037 } else { 3038 if (alpha) 3039 return DRM_FORMAT_ARGB16161616F; 3040 else 3041 return DRM_FORMAT_XRGB16161616F; 3042 } 3043 } 3044 } 3045 3046 static bool 3047 intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 3048 struct intel_initial_plane_config *plane_config) 3049 { 3050 struct drm_device *dev = crtc->base.dev; 3051 struct drm_i915_private *dev_priv = to_i915(dev); 3052 struct drm_i915_gem_object *obj = NULL; 3053 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 3054 struct drm_framebuffer *fb = &plane_config->fb->base; 3055 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); 3056 u32 size_aligned = round_up(plane_config->base + plane_config->size, 3057 PAGE_SIZE); 3058 3059 size_aligned -= base_aligned; 3060 3061 if (plane_config->size == 0) 3062 return false; 3063 3064 /* If the FB is too big, just don't use it since fbdev is not very 3065 * important and we should probably use that space with FBC or other 3066 * features. */ 3067 if (size_aligned * 2 > dev_priv->stolen_usable_size) 3068 return false; 3069 3070 switch (fb->modifier) { 3071 case DRM_FORMAT_MOD_LINEAR: 3072 case I915_FORMAT_MOD_X_TILED: 3073 case I915_FORMAT_MOD_Y_TILED: 3074 break; 3075 default: 3076 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n", 3077 fb->modifier); 3078 return false; 3079 } 3080 3081 mutex_lock(&dev->struct_mutex); 3082 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv, 3083 base_aligned, 3084 base_aligned, 3085 size_aligned); 3086 mutex_unlock(&dev->struct_mutex); 3087 if (!obj) 3088 return false; 3089 3090 switch (plane_config->tiling) { 3091 case I915_TILING_NONE: 3092 break; 3093 case I915_TILING_X: 3094 case I915_TILING_Y: 3095 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling; 3096 break; 3097 default: 3098 MISSING_CASE(plane_config->tiling); 3099 return false; 3100 } 3101 3102 mode_cmd.pixel_format = fb->format->format; 3103 mode_cmd.width = fb->width; 3104 mode_cmd.height = fb->height; 3105 mode_cmd.pitches[0] = fb->pitches[0]; 3106 mode_cmd.modifier[0] = fb->modifier; 3107 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 3108 3109 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) { 3110 DRM_DEBUG_KMS("intel fb init failed\n"); 3111 goto out_unref_obj; 3112 } 3113 3114 3115 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); 3116 return true; 3117 3118 out_unref_obj: 3119 i915_gem_object_put(obj); 3120 return false; 3121 } 3122 3123 static void 3124 intel_set_plane_visible(struct intel_crtc_state *crtc_state, 3125 struct intel_plane_state *plane_state, 3126 bool visible) 3127 { 3128 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 3129 3130 plane_state->base.visible = visible; 3131 3132 if (visible) 3133 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base); 3134 else 3135 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base); 3136 } 3137 3138 static void fixup_active_planes(struct intel_crtc_state *crtc_state) 3139 { 3140 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 3141 struct drm_plane *plane; 3142 3143 /* 3144 * Active_planes aliases if multiple "primary" or cursor planes 3145 * have been used on the same (or wrong) pipe. plane_mask uses 3146 * unique ids, hence we can use that to reconstruct active_planes. 3147 */ 3148 crtc_state->active_planes = 0; 3149 3150 drm_for_each_plane_mask(plane, &dev_priv->drm, 3151 crtc_state->base.plane_mask) 3152 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 3153 } 3154 3155 static void intel_plane_disable_noatomic(struct intel_crtc *crtc, 3156 struct intel_plane *plane) 3157 { 3158 struct intel_crtc_state *crtc_state = 3159 to_intel_crtc_state(crtc->base.state); 3160 struct intel_plane_state *plane_state = 3161 to_intel_plane_state(plane->base.state); 3162 3163 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 3164 plane->base.base.id, plane->base.name, 3165 crtc->base.base.id, crtc->base.name); 3166 3167 intel_set_plane_visible(crtc_state, plane_state, false); 3168 fixup_active_planes(crtc_state); 3169 crtc_state->data_rate[plane->id] = 0; 3170 3171 if (plane->id == PLANE_PRIMARY) 3172 intel_pre_disable_primary_noatomic(&crtc->base); 3173 3174 intel_disable_plane(plane, crtc_state); 3175 } 3176 3177 static void 3178 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 3179 struct intel_initial_plane_config *plane_config) 3180 { 3181 struct drm_device *dev = intel_crtc->base.dev; 3182 struct drm_i915_private *dev_priv = to_i915(dev); 3183 struct drm_crtc *c; 3184 struct drm_i915_gem_object *obj; 3185 struct drm_plane *primary = intel_crtc->base.primary; 3186 struct drm_plane_state *plane_state = primary->state; 3187 struct intel_plane *intel_plane = to_intel_plane(primary); 3188 struct intel_plane_state *intel_state = 3189 to_intel_plane_state(plane_state); 3190 struct drm_framebuffer *fb; 3191 3192 if (!plane_config->fb) 3193 return; 3194 3195 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 3196 fb = &plane_config->fb->base; 3197 goto valid_fb; 3198 } 3199 3200 kfree(plane_config->fb); 3201 3202 /* 3203 * Failed to alloc the obj, check to see if we should share 3204 * an fb with another CRTC instead 3205 */ 3206 for_each_crtc(dev, c) { 3207 struct intel_plane_state *state; 3208 3209 if (c == &intel_crtc->base) 3210 continue; 3211 3212 if (!to_intel_crtc(c)->active) 3213 continue; 3214 3215 state = to_intel_plane_state(c->primary->state); 3216 if (!state->vma) 3217 continue; 3218 3219 if (intel_plane_ggtt_offset(state) == plane_config->base) { 3220 fb = state->base.fb; 3221 drm_framebuffer_get(fb); 3222 goto valid_fb; 3223 } 3224 } 3225 3226 /* 3227 * We've failed to reconstruct the BIOS FB. Current display state 3228 * indicates that the primary plane is visible, but has a NULL FB, 3229 * which will lead to problems later if we don't fix it up. The 3230 * simplest solution is to just disable the primary plane now and 3231 * pretend the BIOS never had it enabled. 3232 */ 3233 intel_plane_disable_noatomic(intel_crtc, intel_plane); 3234 3235 return; 3236 3237 valid_fb: 3238 intel_state->base.rotation = plane_config->rotation; 3239 intel_fill_fb_ggtt_view(&intel_state->view, fb, 3240 intel_state->base.rotation); 3241 intel_state->color_plane[0].stride = 3242 intel_fb_pitch(fb, 0, intel_state->base.rotation); 3243 3244 mutex_lock(&dev->struct_mutex); 3245 intel_state->vma = 3246 intel_pin_and_fence_fb_obj(fb, 3247 &intel_state->view, 3248 intel_plane_uses_fence(intel_state), 3249 &intel_state->flags); 3250 mutex_unlock(&dev->struct_mutex); 3251 if (IS_ERR(intel_state->vma)) { 3252 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n", 3253 intel_crtc->pipe, PTR_ERR(intel_state->vma)); 3254 3255 intel_state->vma = NULL; 3256 drm_framebuffer_put(fb); 3257 return; 3258 } 3259 3260 obj = intel_fb_obj(fb); 3261 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); 3262 3263 plane_state->src_x = 0; 3264 plane_state->src_y = 0; 3265 plane_state->src_w = fb->width << 16; 3266 plane_state->src_h = fb->height << 16; 3267 3268 plane_state->crtc_x = 0; 3269 plane_state->crtc_y = 0; 3270 plane_state->crtc_w = fb->width; 3271 plane_state->crtc_h = fb->height; 3272 3273 intel_state->base.src = drm_plane_state_src(plane_state); 3274 intel_state->base.dst = drm_plane_state_dest(plane_state); 3275 3276 if (i915_gem_object_is_tiled(obj)) 3277 dev_priv->preserve_bios_swizzle = true; 3278 3279 plane_state->fb = fb; 3280 plane_state->crtc = &intel_crtc->base; 3281 3282 atomic_or(to_intel_plane(primary)->frontbuffer_bit, 3283 &obj->frontbuffer_bits); 3284 } 3285 3286 static int skl_max_plane_width(const struct drm_framebuffer *fb, 3287 int color_plane, 3288 unsigned int rotation) 3289 { 3290 int cpp = fb->format->cpp[color_plane]; 3291 3292 switch (fb->modifier) { 3293 case DRM_FORMAT_MOD_LINEAR: 3294 case I915_FORMAT_MOD_X_TILED: 3295 return 4096; 3296 case I915_FORMAT_MOD_Y_TILED_CCS: 3297 case I915_FORMAT_MOD_Yf_TILED_CCS: 3298 /* FIXME AUX plane? */ 3299 case I915_FORMAT_MOD_Y_TILED: 3300 case I915_FORMAT_MOD_Yf_TILED: 3301 if (cpp == 8) 3302 return 2048; 3303 else 3304 return 4096; 3305 default: 3306 MISSING_CASE(fb->modifier); 3307 return 2048; 3308 } 3309 } 3310 3311 static int glk_max_plane_width(const struct drm_framebuffer *fb, 3312 int color_plane, 3313 unsigned int rotation) 3314 { 3315 int cpp = fb->format->cpp[color_plane]; 3316 3317 switch (fb->modifier) { 3318 case DRM_FORMAT_MOD_LINEAR: 3319 case I915_FORMAT_MOD_X_TILED: 3320 if (cpp == 8) 3321 return 4096; 3322 else 3323 return 5120; 3324 case I915_FORMAT_MOD_Y_TILED_CCS: 3325 case I915_FORMAT_MOD_Yf_TILED_CCS: 3326 /* FIXME AUX plane? */ 3327 case I915_FORMAT_MOD_Y_TILED: 3328 case I915_FORMAT_MOD_Yf_TILED: 3329 if (cpp == 8) 3330 return 2048; 3331 else 3332 return 5120; 3333 default: 3334 MISSING_CASE(fb->modifier); 3335 return 2048; 3336 } 3337 } 3338 3339 static int icl_max_plane_width(const struct drm_framebuffer *fb, 3340 int color_plane, 3341 unsigned int rotation) 3342 { 3343 return 5120; 3344 } 3345 3346 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, 3347 int main_x, int main_y, u32 main_offset) 3348 { 3349 const struct drm_framebuffer *fb = plane_state->base.fb; 3350 int hsub = fb->format->hsub; 3351 int vsub = fb->format->vsub; 3352 int aux_x = plane_state->color_plane[1].x; 3353 int aux_y = plane_state->color_plane[1].y; 3354 u32 aux_offset = plane_state->color_plane[1].offset; 3355 u32 alignment = intel_surf_alignment(fb, 1); 3356 3357 while (aux_offset >= main_offset && aux_y <= main_y) { 3358 int x, y; 3359 3360 if (aux_x == main_x && aux_y == main_y) 3361 break; 3362 3363 if (aux_offset == 0) 3364 break; 3365 3366 x = aux_x / hsub; 3367 y = aux_y / vsub; 3368 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1, 3369 aux_offset, aux_offset - alignment); 3370 aux_x = x * hsub + aux_x % hsub; 3371 aux_y = y * vsub + aux_y % vsub; 3372 } 3373 3374 if (aux_x != main_x || aux_y != main_y) 3375 return false; 3376 3377 plane_state->color_plane[1].offset = aux_offset; 3378 plane_state->color_plane[1].x = aux_x; 3379 plane_state->color_plane[1].y = aux_y; 3380 3381 return true; 3382 } 3383 3384 static int skl_check_main_surface(struct intel_plane_state *plane_state) 3385 { 3386 struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev); 3387 const struct drm_framebuffer *fb = plane_state->base.fb; 3388 unsigned int rotation = plane_state->base.rotation; 3389 int x = plane_state->base.src.x1 >> 16; 3390 int y = plane_state->base.src.y1 >> 16; 3391 int w = drm_rect_width(&plane_state->base.src) >> 16; 3392 int h = drm_rect_height(&plane_state->base.src) >> 16; 3393 int max_width; 3394 int max_height = 4096; 3395 u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset; 3396 3397 if (INTEL_GEN(dev_priv) >= 11) 3398 max_width = icl_max_plane_width(fb, 0, rotation); 3399 else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 3400 max_width = glk_max_plane_width(fb, 0, rotation); 3401 else 3402 max_width = skl_max_plane_width(fb, 0, rotation); 3403 3404 if (w > max_width || h > max_height) { 3405 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n", 3406 w, h, max_width, max_height); 3407 return -EINVAL; 3408 } 3409 3410 intel_add_fb_offsets(&x, &y, plane_state, 0); 3411 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0); 3412 alignment = intel_surf_alignment(fb, 0); 3413 3414 /* 3415 * AUX surface offset is specified as the distance from the 3416 * main surface offset, and it must be non-negative. Make 3417 * sure that is what we will get. 3418 */ 3419 if (offset > aux_offset) 3420 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3421 offset, aux_offset & ~(alignment - 1)); 3422 3423 /* 3424 * When using an X-tiled surface, the plane blows up 3425 * if the x offset + width exceed the stride. 3426 * 3427 * TODO: linear and Y-tiled seem fine, Yf untested, 3428 */ 3429 if (fb->modifier == I915_FORMAT_MOD_X_TILED) { 3430 int cpp = fb->format->cpp[0]; 3431 3432 while ((x + w) * cpp > plane_state->color_plane[0].stride) { 3433 if (offset == 0) { 3434 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n"); 3435 return -EINVAL; 3436 } 3437 3438 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3439 offset, offset - alignment); 3440 } 3441 } 3442 3443 /* 3444 * CCS AUX surface doesn't have its own x/y offsets, we must make sure 3445 * they match with the main surface x/y offsets. 3446 */ 3447 if (is_ccs_modifier(fb->modifier)) { 3448 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) { 3449 if (offset == 0) 3450 break; 3451 3452 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3453 offset, offset - alignment); 3454 } 3455 3456 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) { 3457 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n"); 3458 return -EINVAL; 3459 } 3460 } 3461 3462 plane_state->color_plane[0].offset = offset; 3463 plane_state->color_plane[0].x = x; 3464 plane_state->color_plane[0].y = y; 3465 3466 /* 3467 * Put the final coordinates back so that the src 3468 * coordinate checks will see the right values. 3469 */ 3470 drm_rect_translate(&plane_state->base.src, 3471 (x << 16) - plane_state->base.src.x1, 3472 (y << 16) - plane_state->base.src.y1); 3473 3474 return 0; 3475 } 3476 3477 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) 3478 { 3479 const struct drm_framebuffer *fb = plane_state->base.fb; 3480 unsigned int rotation = plane_state->base.rotation; 3481 int max_width = skl_max_plane_width(fb, 1, rotation); 3482 int max_height = 4096; 3483 int x = plane_state->base.src.x1 >> 17; 3484 int y = plane_state->base.src.y1 >> 17; 3485 int w = drm_rect_width(&plane_state->base.src) >> 17; 3486 int h = drm_rect_height(&plane_state->base.src) >> 17; 3487 u32 offset; 3488 3489 intel_add_fb_offsets(&x, &y, plane_state, 1); 3490 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1); 3491 3492 /* FIXME not quite sure how/if these apply to the chroma plane */ 3493 if (w > max_width || h > max_height) { 3494 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n", 3495 w, h, max_width, max_height); 3496 return -EINVAL; 3497 } 3498 3499 plane_state->color_plane[1].offset = offset; 3500 plane_state->color_plane[1].x = x; 3501 plane_state->color_plane[1].y = y; 3502 3503 return 0; 3504 } 3505 3506 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) 3507 { 3508 const struct drm_framebuffer *fb = plane_state->base.fb; 3509 int src_x = plane_state->base.src.x1 >> 16; 3510 int src_y = plane_state->base.src.y1 >> 16; 3511 int hsub = fb->format->hsub; 3512 int vsub = fb->format->vsub; 3513 int x = src_x / hsub; 3514 int y = src_y / vsub; 3515 u32 offset; 3516 3517 intel_add_fb_offsets(&x, &y, plane_state, 1); 3518 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1); 3519 3520 plane_state->color_plane[1].offset = offset; 3521 plane_state->color_plane[1].x = x * hsub + src_x % hsub; 3522 plane_state->color_plane[1].y = y * vsub + src_y % vsub; 3523 3524 return 0; 3525 } 3526 3527 int skl_check_plane_surface(struct intel_plane_state *plane_state) 3528 { 3529 const struct drm_framebuffer *fb = plane_state->base.fb; 3530 int ret; 3531 3532 ret = intel_plane_compute_gtt(plane_state); 3533 if (ret) 3534 return ret; 3535 3536 if (!plane_state->base.visible) 3537 return 0; 3538 3539 /* 3540 * Handle the AUX surface first since 3541 * the main surface setup depends on it. 3542 */ 3543 if (is_planar_yuv_format(fb->format->format)) { 3544 ret = skl_check_nv12_aux_surface(plane_state); 3545 if (ret) 3546 return ret; 3547 } else if (is_ccs_modifier(fb->modifier)) { 3548 ret = skl_check_ccs_aux_surface(plane_state); 3549 if (ret) 3550 return ret; 3551 } else { 3552 plane_state->color_plane[1].offset = ~0xfff; 3553 plane_state->color_plane[1].x = 0; 3554 plane_state->color_plane[1].y = 0; 3555 } 3556 3557 ret = skl_check_main_surface(plane_state); 3558 if (ret) 3559 return ret; 3560 3561 return 0; 3562 } 3563 3564 unsigned int 3565 i9xx_plane_max_stride(struct intel_plane *plane, 3566 u32 pixel_format, u64 modifier, 3567 unsigned int rotation) 3568 { 3569 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3570 3571 if (!HAS_GMCH(dev_priv)) { 3572 return 32*1024; 3573 } else if (INTEL_GEN(dev_priv) >= 4) { 3574 if (modifier == I915_FORMAT_MOD_X_TILED) 3575 return 16*1024; 3576 else 3577 return 32*1024; 3578 } else if (INTEL_GEN(dev_priv) >= 3) { 3579 if (modifier == I915_FORMAT_MOD_X_TILED) 3580 return 8*1024; 3581 else 3582 return 16*1024; 3583 } else { 3584 if (plane->i9xx_plane == PLANE_C) 3585 return 4*1024; 3586 else 3587 return 8*1024; 3588 } 3589 } 3590 3591 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 3592 { 3593 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 3594 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3595 u32 dspcntr = 0; 3596 3597 if (crtc_state->gamma_enable) 3598 dspcntr |= DISPPLANE_GAMMA_ENABLE; 3599 3600 if (crtc_state->csc_enable) 3601 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 3602 3603 if (INTEL_GEN(dev_priv) < 5) 3604 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); 3605 3606 return dspcntr; 3607 } 3608 3609 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, 3610 const struct intel_plane_state *plane_state) 3611 { 3612 struct drm_i915_private *dev_priv = 3613 to_i915(plane_state->base.plane->dev); 3614 const struct drm_framebuffer *fb = plane_state->base.fb; 3615 unsigned int rotation = plane_state->base.rotation; 3616 u32 dspcntr; 3617 3618 dspcntr = DISPLAY_PLANE_ENABLE; 3619 3620 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) || 3621 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 3622 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 3623 3624 switch (fb->format->format) { 3625 case DRM_FORMAT_C8: 3626 dspcntr |= DISPPLANE_8BPP; 3627 break; 3628 case DRM_FORMAT_XRGB1555: 3629 dspcntr |= DISPPLANE_BGRX555; 3630 break; 3631 case DRM_FORMAT_RGB565: 3632 dspcntr |= DISPPLANE_BGRX565; 3633 break; 3634 case DRM_FORMAT_XRGB8888: 3635 dspcntr |= DISPPLANE_BGRX888; 3636 break; 3637 case DRM_FORMAT_XBGR8888: 3638 dspcntr |= DISPPLANE_RGBX888; 3639 break; 3640 case DRM_FORMAT_XRGB2101010: 3641 dspcntr |= DISPPLANE_BGRX101010; 3642 break; 3643 case DRM_FORMAT_XBGR2101010: 3644 dspcntr |= DISPPLANE_RGBX101010; 3645 break; 3646 default: 3647 MISSING_CASE(fb->format->format); 3648 return 0; 3649 } 3650 3651 if (INTEL_GEN(dev_priv) >= 4 && 3652 fb->modifier == I915_FORMAT_MOD_X_TILED) 3653 dspcntr |= DISPPLANE_TILED; 3654 3655 if (rotation & DRM_MODE_ROTATE_180) 3656 dspcntr |= DISPPLANE_ROTATE_180; 3657 3658 if (rotation & DRM_MODE_REFLECT_X) 3659 dspcntr |= DISPPLANE_MIRROR; 3660 3661 return dspcntr; 3662 } 3663 3664 int i9xx_check_plane_surface(struct intel_plane_state *plane_state) 3665 { 3666 struct drm_i915_private *dev_priv = 3667 to_i915(plane_state->base.plane->dev); 3668 int src_x, src_y; 3669 u32 offset; 3670 int ret; 3671 3672 ret = intel_plane_compute_gtt(plane_state); 3673 if (ret) 3674 return ret; 3675 3676 if (!plane_state->base.visible) 3677 return 0; 3678 3679 src_x = plane_state->base.src.x1 >> 16; 3680 src_y = plane_state->base.src.y1 >> 16; 3681 3682 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 3683 3684 if (INTEL_GEN(dev_priv) >= 4) 3685 offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 3686 plane_state, 0); 3687 else 3688 offset = 0; 3689 3690 /* 3691 * Put the final coordinates back so that the src 3692 * coordinate checks will see the right values. 3693 */ 3694 drm_rect_translate(&plane_state->base.src, 3695 (src_x << 16) - plane_state->base.src.x1, 3696 (src_y << 16) - plane_state->base.src.y1); 3697 3698 /* HSW/BDW do this automagically in hardware */ 3699 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { 3700 unsigned int rotation = plane_state->base.rotation; 3701 int src_w = drm_rect_width(&plane_state->base.src) >> 16; 3702 int src_h = drm_rect_height(&plane_state->base.src) >> 16; 3703 3704 if (rotation & DRM_MODE_ROTATE_180) { 3705 src_x += src_w - 1; 3706 src_y += src_h - 1; 3707 } else if (rotation & DRM_MODE_REFLECT_X) { 3708 src_x += src_w - 1; 3709 } 3710 } 3711 3712 plane_state->color_plane[0].offset = offset; 3713 plane_state->color_plane[0].x = src_x; 3714 plane_state->color_plane[0].y = src_y; 3715 3716 return 0; 3717 } 3718 3719 static bool i9xx_plane_has_windowing(struct intel_plane *plane) 3720 { 3721 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3722 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 3723 3724 if (IS_CHERRYVIEW(dev_priv)) 3725 return i9xx_plane == PLANE_B; 3726 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 3727 return false; 3728 else if (IS_GEN(dev_priv, 4)) 3729 return i9xx_plane == PLANE_C; 3730 else 3731 return i9xx_plane == PLANE_B || 3732 i9xx_plane == PLANE_C; 3733 } 3734 3735 static int 3736 i9xx_plane_check(struct intel_crtc_state *crtc_state, 3737 struct intel_plane_state *plane_state) 3738 { 3739 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 3740 int ret; 3741 3742 ret = chv_plane_check_rotation(plane_state); 3743 if (ret) 3744 return ret; 3745 3746 ret = drm_atomic_helper_check_plane_state(&plane_state->base, 3747 &crtc_state->base, 3748 DRM_PLANE_HELPER_NO_SCALING, 3749 DRM_PLANE_HELPER_NO_SCALING, 3750 i9xx_plane_has_windowing(plane), 3751 true); 3752 if (ret) 3753 return ret; 3754 3755 ret = i9xx_check_plane_surface(plane_state); 3756 if (ret) 3757 return ret; 3758 3759 if (!plane_state->base.visible) 3760 return 0; 3761 3762 ret = intel_plane_check_src_coordinates(plane_state); 3763 if (ret) 3764 return ret; 3765 3766 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state); 3767 3768 return 0; 3769 } 3770 3771 static void i9xx_update_plane(struct intel_plane *plane, 3772 const struct intel_crtc_state *crtc_state, 3773 const struct intel_plane_state *plane_state) 3774 { 3775 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3776 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 3777 u32 linear_offset; 3778 int x = plane_state->color_plane[0].x; 3779 int y = plane_state->color_plane[0].y; 3780 int crtc_x = plane_state->base.dst.x1; 3781 int crtc_y = plane_state->base.dst.y1; 3782 int crtc_w = drm_rect_width(&plane_state->base.dst); 3783 int crtc_h = drm_rect_height(&plane_state->base.dst); 3784 unsigned long irqflags; 3785 u32 dspaddr_offset; 3786 u32 dspcntr; 3787 3788 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); 3789 3790 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 3791 3792 if (INTEL_GEN(dev_priv) >= 4) 3793 dspaddr_offset = plane_state->color_plane[0].offset; 3794 else 3795 dspaddr_offset = linear_offset; 3796 3797 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3798 3799 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride); 3800 3801 if (INTEL_GEN(dev_priv) < 4) { 3802 /* 3803 * PLANE_A doesn't actually have a full window 3804 * generator but let's assume we still need to 3805 * program whatever is there. 3806 */ 3807 I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x); 3808 I915_WRITE_FW(DSPSIZE(i9xx_plane), 3809 ((crtc_h - 1) << 16) | (crtc_w - 1)); 3810 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { 3811 I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x); 3812 I915_WRITE_FW(PRIMSIZE(i9xx_plane), 3813 ((crtc_h - 1) << 16) | (crtc_w - 1)); 3814 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0); 3815 } 3816 3817 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 3818 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x); 3819 } else if (INTEL_GEN(dev_priv) >= 4) { 3820 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset); 3821 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x); 3822 } 3823 3824 /* 3825 * The control register self-arms if the plane was previously 3826 * disabled. Try to make the plane enable atomic by writing 3827 * the control register just before the surface register. 3828 */ 3829 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr); 3830 if (INTEL_GEN(dev_priv) >= 4) 3831 I915_WRITE_FW(DSPSURF(i9xx_plane), 3832 intel_plane_ggtt_offset(plane_state) + 3833 dspaddr_offset); 3834 else 3835 I915_WRITE_FW(DSPADDR(i9xx_plane), 3836 intel_plane_ggtt_offset(plane_state) + 3837 dspaddr_offset); 3838 3839 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3840 } 3841 3842 static void i9xx_disable_plane(struct intel_plane *plane, 3843 const struct intel_crtc_state *crtc_state) 3844 { 3845 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3846 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 3847 unsigned long irqflags; 3848 u32 dspcntr; 3849 3850 /* 3851 * DSPCNTR pipe gamma enable on g4x+ and pipe csc 3852 * enable on ilk+ affect the pipe bottom color as 3853 * well, so we must configure them even if the plane 3854 * is disabled. 3855 * 3856 * On pre-g4x there is no way to gamma correct the 3857 * pipe bottom color but we'll keep on doing this 3858 * anyway so that the crtc state readout works correctly. 3859 */ 3860 dspcntr = i9xx_plane_ctl_crtc(crtc_state); 3861 3862 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3863 3864 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr); 3865 if (INTEL_GEN(dev_priv) >= 4) 3866 I915_WRITE_FW(DSPSURF(i9xx_plane), 0); 3867 else 3868 I915_WRITE_FW(DSPADDR(i9xx_plane), 0); 3869 3870 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3871 } 3872 3873 static bool i9xx_plane_get_hw_state(struct intel_plane *plane, 3874 enum pipe *pipe) 3875 { 3876 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3877 enum intel_display_power_domain power_domain; 3878 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 3879 intel_wakeref_t wakeref; 3880 bool ret; 3881 u32 val; 3882 3883 /* 3884 * Not 100% correct for planes that can move between pipes, 3885 * but that's only the case for gen2-4 which don't have any 3886 * display power wells. 3887 */ 3888 power_domain = POWER_DOMAIN_PIPE(plane->pipe); 3889 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 3890 if (!wakeref) 3891 return false; 3892 3893 val = I915_READ(DSPCNTR(i9xx_plane)); 3894 3895 ret = val & DISPLAY_PLANE_ENABLE; 3896 3897 if (INTEL_GEN(dev_priv) >= 5) 3898 *pipe = plane->pipe; 3899 else 3900 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 3901 DISPPLANE_SEL_PIPE_SHIFT; 3902 3903 intel_display_power_put(dev_priv, power_domain, wakeref); 3904 3905 return ret; 3906 } 3907 3908 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 3909 { 3910 struct drm_device *dev = intel_crtc->base.dev; 3911 struct drm_i915_private *dev_priv = to_i915(dev); 3912 3913 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); 3914 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 3915 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); 3916 } 3917 3918 /* 3919 * This function detaches (aka. unbinds) unused scalers in hardware 3920 */ 3921 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state) 3922 { 3923 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 3924 const struct intel_crtc_scaler_state *scaler_state = 3925 &crtc_state->scaler_state; 3926 int i; 3927 3928 /* loop through and disable scalers that aren't in use */ 3929 for (i = 0; i < intel_crtc->num_scalers; i++) { 3930 if (!scaler_state->scalers[i].in_use) 3931 skl_detach_scaler(intel_crtc, i); 3932 } 3933 } 3934 3935 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb, 3936 int color_plane, unsigned int rotation) 3937 { 3938 /* 3939 * The stride is either expressed as a multiple of 64 bytes chunks for 3940 * linear buffers or in number of tiles for tiled buffers. 3941 */ 3942 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) 3943 return 64; 3944 else if (drm_rotation_90_or_270(rotation)) 3945 return intel_tile_height(fb, color_plane); 3946 else 3947 return intel_tile_width_bytes(fb, color_plane); 3948 } 3949 3950 u32 skl_plane_stride(const struct intel_plane_state *plane_state, 3951 int color_plane) 3952 { 3953 const struct drm_framebuffer *fb = plane_state->base.fb; 3954 unsigned int rotation = plane_state->base.rotation; 3955 u32 stride = plane_state->color_plane[color_plane].stride; 3956 3957 if (color_plane >= fb->format->num_planes) 3958 return 0; 3959 3960 return stride / skl_plane_stride_mult(fb, color_plane, rotation); 3961 } 3962 3963 static u32 skl_plane_ctl_format(u32 pixel_format) 3964 { 3965 switch (pixel_format) { 3966 case DRM_FORMAT_C8: 3967 return PLANE_CTL_FORMAT_INDEXED; 3968 case DRM_FORMAT_RGB565: 3969 return PLANE_CTL_FORMAT_RGB_565; 3970 case DRM_FORMAT_XBGR8888: 3971 case DRM_FORMAT_ABGR8888: 3972 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; 3973 case DRM_FORMAT_XRGB8888: 3974 case DRM_FORMAT_ARGB8888: 3975 return PLANE_CTL_FORMAT_XRGB_8888; 3976 case DRM_FORMAT_XBGR2101010: 3977 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX; 3978 case DRM_FORMAT_XRGB2101010: 3979 return PLANE_CTL_FORMAT_XRGB_2101010; 3980 case DRM_FORMAT_XBGR16161616F: 3981 case DRM_FORMAT_ABGR16161616F: 3982 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX; 3983 case DRM_FORMAT_XRGB16161616F: 3984 case DRM_FORMAT_ARGB16161616F: 3985 return PLANE_CTL_FORMAT_XRGB_16161616F; 3986 case DRM_FORMAT_YUYV: 3987 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 3988 case DRM_FORMAT_YVYU: 3989 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; 3990 case DRM_FORMAT_UYVY: 3991 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; 3992 case DRM_FORMAT_VYUY: 3993 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 3994 case DRM_FORMAT_NV12: 3995 return PLANE_CTL_FORMAT_NV12; 3996 case DRM_FORMAT_P010: 3997 return PLANE_CTL_FORMAT_P010; 3998 case DRM_FORMAT_P012: 3999 return PLANE_CTL_FORMAT_P012; 4000 case DRM_FORMAT_P016: 4001 return PLANE_CTL_FORMAT_P016; 4002 case DRM_FORMAT_Y210: 4003 return PLANE_CTL_FORMAT_Y210; 4004 case DRM_FORMAT_Y212: 4005 return PLANE_CTL_FORMAT_Y212; 4006 case DRM_FORMAT_Y216: 4007 return PLANE_CTL_FORMAT_Y216; 4008 case DRM_FORMAT_XVYU2101010: 4009 return PLANE_CTL_FORMAT_Y410; 4010 case DRM_FORMAT_XVYU12_16161616: 4011 return PLANE_CTL_FORMAT_Y412; 4012 case DRM_FORMAT_XVYU16161616: 4013 return PLANE_CTL_FORMAT_Y416; 4014 default: 4015 MISSING_CASE(pixel_format); 4016 } 4017 4018 return 0; 4019 } 4020 4021 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state) 4022 { 4023 if (!plane_state->base.fb->format->has_alpha) 4024 return PLANE_CTL_ALPHA_DISABLE; 4025 4026 switch (plane_state->base.pixel_blend_mode) { 4027 case DRM_MODE_BLEND_PIXEL_NONE: 4028 return PLANE_CTL_ALPHA_DISABLE; 4029 case DRM_MODE_BLEND_PREMULTI: 4030 return PLANE_CTL_ALPHA_SW_PREMULTIPLY; 4031 case DRM_MODE_BLEND_COVERAGE: 4032 return PLANE_CTL_ALPHA_HW_PREMULTIPLY; 4033 default: 4034 MISSING_CASE(plane_state->base.pixel_blend_mode); 4035 return PLANE_CTL_ALPHA_DISABLE; 4036 } 4037 } 4038 4039 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state) 4040 { 4041 if (!plane_state->base.fb->format->has_alpha) 4042 return PLANE_COLOR_ALPHA_DISABLE; 4043 4044 switch (plane_state->base.pixel_blend_mode) { 4045 case DRM_MODE_BLEND_PIXEL_NONE: 4046 return PLANE_COLOR_ALPHA_DISABLE; 4047 case DRM_MODE_BLEND_PREMULTI: 4048 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY; 4049 case DRM_MODE_BLEND_COVERAGE: 4050 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY; 4051 default: 4052 MISSING_CASE(plane_state->base.pixel_blend_mode); 4053 return PLANE_COLOR_ALPHA_DISABLE; 4054 } 4055 } 4056 4057 static u32 skl_plane_ctl_tiling(u64 fb_modifier) 4058 { 4059 switch (fb_modifier) { 4060 case DRM_FORMAT_MOD_LINEAR: 4061 break; 4062 case I915_FORMAT_MOD_X_TILED: 4063 return PLANE_CTL_TILED_X; 4064 case I915_FORMAT_MOD_Y_TILED: 4065 return PLANE_CTL_TILED_Y; 4066 case I915_FORMAT_MOD_Y_TILED_CCS: 4067 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; 4068 case I915_FORMAT_MOD_Yf_TILED: 4069 return PLANE_CTL_TILED_YF; 4070 case I915_FORMAT_MOD_Yf_TILED_CCS: 4071 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; 4072 default: 4073 MISSING_CASE(fb_modifier); 4074 } 4075 4076 return 0; 4077 } 4078 4079 static u32 skl_plane_ctl_rotate(unsigned int rotate) 4080 { 4081 switch (rotate) { 4082 case DRM_MODE_ROTATE_0: 4083 break; 4084 /* 4085 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 4086 * while i915 HW rotation is clockwise, thats why this swapping. 4087 */ 4088 case DRM_MODE_ROTATE_90: 4089 return PLANE_CTL_ROTATE_270; 4090 case DRM_MODE_ROTATE_180: 4091 return PLANE_CTL_ROTATE_180; 4092 case DRM_MODE_ROTATE_270: 4093 return PLANE_CTL_ROTATE_90; 4094 default: 4095 MISSING_CASE(rotate); 4096 } 4097 4098 return 0; 4099 } 4100 4101 static u32 cnl_plane_ctl_flip(unsigned int reflect) 4102 { 4103 switch (reflect) { 4104 case 0: 4105 break; 4106 case DRM_MODE_REFLECT_X: 4107 return PLANE_CTL_FLIP_HORIZONTAL; 4108 case DRM_MODE_REFLECT_Y: 4109 default: 4110 MISSING_CASE(reflect); 4111 } 4112 4113 return 0; 4114 } 4115 4116 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 4117 { 4118 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 4119 u32 plane_ctl = 0; 4120 4121 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 4122 return plane_ctl; 4123 4124 if (crtc_state->gamma_enable) 4125 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE; 4126 4127 if (crtc_state->csc_enable) 4128 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE; 4129 4130 return plane_ctl; 4131 } 4132 4133 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, 4134 const struct intel_plane_state *plane_state) 4135 { 4136 struct drm_i915_private *dev_priv = 4137 to_i915(plane_state->base.plane->dev); 4138 const struct drm_framebuffer *fb = plane_state->base.fb; 4139 unsigned int rotation = plane_state->base.rotation; 4140 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 4141 u32 plane_ctl; 4142 4143 plane_ctl = PLANE_CTL_ENABLE; 4144 4145 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) { 4146 plane_ctl |= skl_plane_ctl_alpha(plane_state); 4147 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 4148 4149 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) 4150 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709; 4151 4152 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 4153 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE; 4154 } 4155 4156 plane_ctl |= skl_plane_ctl_format(fb->format->format); 4157 plane_ctl |= skl_plane_ctl_tiling(fb->modifier); 4158 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK); 4159 4160 if (INTEL_GEN(dev_priv) >= 10) 4161 plane_ctl |= cnl_plane_ctl_flip(rotation & 4162 DRM_MODE_REFLECT_MASK); 4163 4164 if (key->flags & I915_SET_COLORKEY_DESTINATION) 4165 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; 4166 else if (key->flags & I915_SET_COLORKEY_SOURCE) 4167 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; 4168 4169 return plane_ctl; 4170 } 4171 4172 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state) 4173 { 4174 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 4175 u32 plane_color_ctl = 0; 4176 4177 if (INTEL_GEN(dev_priv) >= 11) 4178 return plane_color_ctl; 4179 4180 if (crtc_state->gamma_enable) 4181 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE; 4182 4183 if (crtc_state->csc_enable) 4184 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; 4185 4186 return plane_color_ctl; 4187 } 4188 4189 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, 4190 const struct intel_plane_state *plane_state) 4191 { 4192 struct drm_i915_private *dev_priv = 4193 to_i915(plane_state->base.plane->dev); 4194 const struct drm_framebuffer *fb = plane_state->base.fb; 4195 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 4196 u32 plane_color_ctl = 0; 4197 4198 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; 4199 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state); 4200 4201 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) { 4202 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) 4203 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; 4204 else 4205 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709; 4206 4207 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 4208 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; 4209 } else if (fb->format->is_yuv) { 4210 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE; 4211 } 4212 4213 return plane_color_ctl; 4214 } 4215 4216 static int 4217 __intel_display_resume(struct drm_device *dev, 4218 struct drm_atomic_state *state, 4219 struct drm_modeset_acquire_ctx *ctx) 4220 { 4221 struct drm_crtc_state *crtc_state; 4222 struct drm_crtc *crtc; 4223 int i, ret; 4224 4225 intel_modeset_setup_hw_state(dev, ctx); 4226 i915_redisable_vga(to_i915(dev)); 4227 4228 if (!state) 4229 return 0; 4230 4231 /* 4232 * We've duplicated the state, pointers to the old state are invalid. 4233 * 4234 * Don't attempt to use the old state until we commit the duplicated state. 4235 */ 4236 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 4237 /* 4238 * Force recalculation even if we restore 4239 * current state. With fast modeset this may not result 4240 * in a modeset when the state is compatible. 4241 */ 4242 crtc_state->mode_changed = true; 4243 } 4244 4245 /* ignore any reset values/BIOS leftovers in the WM registers */ 4246 if (!HAS_GMCH(to_i915(dev))) 4247 to_intel_atomic_state(state)->skip_intermediate_wm = true; 4248 4249 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 4250 4251 WARN_ON(ret == -EDEADLK); 4252 return ret; 4253 } 4254 4255 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) 4256 { 4257 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display && 4258 intel_has_gpu_reset(dev_priv)); 4259 } 4260 4261 void intel_prepare_reset(struct drm_i915_private *dev_priv) 4262 { 4263 struct drm_device *dev = &dev_priv->drm; 4264 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 4265 struct drm_atomic_state *state; 4266 int ret; 4267 4268 /* reset doesn't touch the display */ 4269 if (!i915_modparams.force_reset_modeset_test && 4270 !gpu_reset_clobbers_display(dev_priv)) 4271 return; 4272 4273 /* We have a modeset vs reset deadlock, defensively unbreak it. */ 4274 set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 4275 smp_mb__after_atomic(); 4276 wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET); 4277 4278 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) { 4279 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n"); 4280 intel_gt_set_wedged(&dev_priv->gt); 4281 } 4282 4283 /* 4284 * Need mode_config.mutex so that we don't 4285 * trample ongoing ->detect() and whatnot. 4286 */ 4287 mutex_lock(&dev->mode_config.mutex); 4288 drm_modeset_acquire_init(ctx, 0); 4289 while (1) { 4290 ret = drm_modeset_lock_all_ctx(dev, ctx); 4291 if (ret != -EDEADLK) 4292 break; 4293 4294 drm_modeset_backoff(ctx); 4295 } 4296 /* 4297 * Disabling the crtcs gracefully seems nicer. Also the 4298 * g33 docs say we should at least disable all the planes. 4299 */ 4300 state = drm_atomic_helper_duplicate_state(dev, ctx); 4301 if (IS_ERR(state)) { 4302 ret = PTR_ERR(state); 4303 DRM_ERROR("Duplicating state failed with %i\n", ret); 4304 return; 4305 } 4306 4307 ret = drm_atomic_helper_disable_all(dev, ctx); 4308 if (ret) { 4309 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 4310 drm_atomic_state_put(state); 4311 return; 4312 } 4313 4314 dev_priv->modeset_restore_state = state; 4315 state->acquire_ctx = ctx; 4316 } 4317 4318 void intel_finish_reset(struct drm_i915_private *dev_priv) 4319 { 4320 struct drm_device *dev = &dev_priv->drm; 4321 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 4322 struct drm_atomic_state *state; 4323 int ret; 4324 4325 /* reset doesn't touch the display */ 4326 if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) 4327 return; 4328 4329 state = fetch_and_zero(&dev_priv->modeset_restore_state); 4330 if (!state) 4331 goto unlock; 4332 4333 /* reset doesn't touch the display */ 4334 if (!gpu_reset_clobbers_display(dev_priv)) { 4335 /* for testing only restore the display */ 4336 ret = __intel_display_resume(dev, state, ctx); 4337 if (ret) 4338 DRM_ERROR("Restoring old state failed with %i\n", ret); 4339 } else { 4340 /* 4341 * The display has been reset as well, 4342 * so need a full re-initialization. 4343 */ 4344 intel_pps_unlock_regs_wa(dev_priv); 4345 intel_modeset_init_hw(dev); 4346 intel_init_clock_gating(dev_priv); 4347 4348 spin_lock_irq(&dev_priv->irq_lock); 4349 if (dev_priv->display.hpd_irq_setup) 4350 dev_priv->display.hpd_irq_setup(dev_priv); 4351 spin_unlock_irq(&dev_priv->irq_lock); 4352 4353 ret = __intel_display_resume(dev, state, ctx); 4354 if (ret) 4355 DRM_ERROR("Restoring old state failed with %i\n", ret); 4356 4357 intel_hpd_init(dev_priv); 4358 } 4359 4360 drm_atomic_state_put(state); 4361 unlock: 4362 drm_modeset_drop_locks(ctx); 4363 drm_modeset_acquire_fini(ctx); 4364 mutex_unlock(&dev->mode_config.mutex); 4365 4366 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 4367 } 4368 4369 static void icl_set_pipe_chicken(struct intel_crtc *crtc) 4370 { 4371 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4372 enum pipe pipe = crtc->pipe; 4373 u32 tmp; 4374 4375 tmp = I915_READ(PIPE_CHICKEN(pipe)); 4376 4377 /* 4378 * Display WA #1153: icl 4379 * enable hardware to bypass the alpha math 4380 * and rounding for per-pixel values 00 and 0xff 4381 */ 4382 tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 4383 /* 4384 * Display WA # 1605353570: icl 4385 * Set the pixel rounding bit to 1 for allowing 4386 * passthrough of Frame buffer pixels unmodified 4387 * across pipe 4388 */ 4389 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 4390 I915_WRITE(PIPE_CHICKEN(pipe), tmp); 4391 } 4392 4393 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state, 4394 const struct intel_crtc_state *new_crtc_state) 4395 { 4396 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 4397 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4398 4399 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ 4400 crtc->base.mode = new_crtc_state->base.mode; 4401 4402 /* 4403 * Update pipe size and adjust fitter if needed: the reason for this is 4404 * that in compute_mode_changes we check the native mode (not the pfit 4405 * mode) to see if we can flip rather than do a full mode set. In the 4406 * fastboot case, we'll flip, but if we don't update the pipesrc and 4407 * pfit state, we'll end up with a big fb scanned out into the wrong 4408 * sized surface. 4409 */ 4410 4411 I915_WRITE(PIPESRC(crtc->pipe), 4412 ((new_crtc_state->pipe_src_w - 1) << 16) | 4413 (new_crtc_state->pipe_src_h - 1)); 4414 4415 /* on skylake this is done by detaching scalers */ 4416 if (INTEL_GEN(dev_priv) >= 9) { 4417 skl_detach_scalers(new_crtc_state); 4418 4419 if (new_crtc_state->pch_pfit.enabled) 4420 skylake_pfit_enable(new_crtc_state); 4421 } else if (HAS_PCH_SPLIT(dev_priv)) { 4422 if (new_crtc_state->pch_pfit.enabled) 4423 ironlake_pfit_enable(new_crtc_state); 4424 else if (old_crtc_state->pch_pfit.enabled) 4425 ironlake_pfit_disable(old_crtc_state); 4426 } 4427 4428 if (INTEL_GEN(dev_priv) >= 11) 4429 icl_set_pipe_chicken(crtc); 4430 } 4431 4432 static void intel_fdi_normal_train(struct intel_crtc *crtc) 4433 { 4434 struct drm_device *dev = crtc->base.dev; 4435 struct drm_i915_private *dev_priv = to_i915(dev); 4436 int pipe = crtc->pipe; 4437 i915_reg_t reg; 4438 u32 temp; 4439 4440 /* enable normal train */ 4441 reg = FDI_TX_CTL(pipe); 4442 temp = I915_READ(reg); 4443 if (IS_IVYBRIDGE(dev_priv)) { 4444 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 4445 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 4446 } else { 4447 temp &= ~FDI_LINK_TRAIN_NONE; 4448 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 4449 } 4450 I915_WRITE(reg, temp); 4451 4452 reg = FDI_RX_CTL(pipe); 4453 temp = I915_READ(reg); 4454 if (HAS_PCH_CPT(dev_priv)) { 4455 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4456 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 4457 } else { 4458 temp &= ~FDI_LINK_TRAIN_NONE; 4459 temp |= FDI_LINK_TRAIN_NONE; 4460 } 4461 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 4462 4463 /* wait one idle pattern time */ 4464 POSTING_READ(reg); 4465 udelay(1000); 4466 4467 /* IVB wants error correction enabled */ 4468 if (IS_IVYBRIDGE(dev_priv)) 4469 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 4470 FDI_FE_ERRC_ENABLE); 4471 } 4472 4473 /* The FDI link training functions for ILK/Ibexpeak. */ 4474 static void ironlake_fdi_link_train(struct intel_crtc *crtc, 4475 const struct intel_crtc_state *crtc_state) 4476 { 4477 struct drm_device *dev = crtc->base.dev; 4478 struct drm_i915_private *dev_priv = to_i915(dev); 4479 int pipe = crtc->pipe; 4480 i915_reg_t reg; 4481 u32 temp, tries; 4482 4483 /* FDI needs bits from pipe first */ 4484 assert_pipe_enabled(dev_priv, pipe); 4485 4486 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 4487 for train result */ 4488 reg = FDI_RX_IMR(pipe); 4489 temp = I915_READ(reg); 4490 temp &= ~FDI_RX_SYMBOL_LOCK; 4491 temp &= ~FDI_RX_BIT_LOCK; 4492 I915_WRITE(reg, temp); 4493 I915_READ(reg); 4494 udelay(150); 4495 4496 /* enable CPU FDI TX and PCH FDI RX */ 4497 reg = FDI_TX_CTL(pipe); 4498 temp = I915_READ(reg); 4499 temp &= ~FDI_DP_PORT_WIDTH_MASK; 4500 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 4501 temp &= ~FDI_LINK_TRAIN_NONE; 4502 temp |= FDI_LINK_TRAIN_PATTERN_1; 4503 I915_WRITE(reg, temp | FDI_TX_ENABLE); 4504 4505 reg = FDI_RX_CTL(pipe); 4506 temp = I915_READ(reg); 4507 temp &= ~FDI_LINK_TRAIN_NONE; 4508 temp |= FDI_LINK_TRAIN_PATTERN_1; 4509 I915_WRITE(reg, temp | FDI_RX_ENABLE); 4510 4511 POSTING_READ(reg); 4512 udelay(150); 4513 4514 /* Ironlake workaround, enable clock pointer after FDI enable*/ 4515 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 4516 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 4517 FDI_RX_PHASE_SYNC_POINTER_EN); 4518 4519 reg = FDI_RX_IIR(pipe); 4520 for (tries = 0; tries < 5; tries++) { 4521 temp = I915_READ(reg); 4522 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4523 4524 if ((temp & FDI_RX_BIT_LOCK)) { 4525 DRM_DEBUG_KMS("FDI train 1 done.\n"); 4526 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 4527 break; 4528 } 4529 } 4530 if (tries == 5) 4531 DRM_ERROR("FDI train 1 fail!\n"); 4532 4533 /* Train 2 */ 4534 reg = FDI_TX_CTL(pipe); 4535 temp = I915_READ(reg); 4536 temp &= ~FDI_LINK_TRAIN_NONE; 4537 temp |= FDI_LINK_TRAIN_PATTERN_2; 4538 I915_WRITE(reg, temp); 4539 4540 reg = FDI_RX_CTL(pipe); 4541 temp = I915_READ(reg); 4542 temp &= ~FDI_LINK_TRAIN_NONE; 4543 temp |= FDI_LINK_TRAIN_PATTERN_2; 4544 I915_WRITE(reg, temp); 4545 4546 POSTING_READ(reg); 4547 udelay(150); 4548 4549 reg = FDI_RX_IIR(pipe); 4550 for (tries = 0; tries < 5; tries++) { 4551 temp = I915_READ(reg); 4552 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4553 4554 if (temp & FDI_RX_SYMBOL_LOCK) { 4555 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 4556 DRM_DEBUG_KMS("FDI train 2 done.\n"); 4557 break; 4558 } 4559 } 4560 if (tries == 5) 4561 DRM_ERROR("FDI train 2 fail!\n"); 4562 4563 DRM_DEBUG_KMS("FDI train done\n"); 4564 4565 } 4566 4567 static const int snb_b_fdi_train_param[] = { 4568 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 4569 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 4570 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 4571 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 4572 }; 4573 4574 /* The FDI link training functions for SNB/Cougarpoint. */ 4575 static void gen6_fdi_link_train(struct intel_crtc *crtc, 4576 const struct intel_crtc_state *crtc_state) 4577 { 4578 struct drm_device *dev = crtc->base.dev; 4579 struct drm_i915_private *dev_priv = to_i915(dev); 4580 int pipe = crtc->pipe; 4581 i915_reg_t reg; 4582 u32 temp, i, retry; 4583 4584 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 4585 for train result */ 4586 reg = FDI_RX_IMR(pipe); 4587 temp = I915_READ(reg); 4588 temp &= ~FDI_RX_SYMBOL_LOCK; 4589 temp &= ~FDI_RX_BIT_LOCK; 4590 I915_WRITE(reg, temp); 4591 4592 POSTING_READ(reg); 4593 udelay(150); 4594 4595 /* enable CPU FDI TX and PCH FDI RX */ 4596 reg = FDI_TX_CTL(pipe); 4597 temp = I915_READ(reg); 4598 temp &= ~FDI_DP_PORT_WIDTH_MASK; 4599 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 4600 temp &= ~FDI_LINK_TRAIN_NONE; 4601 temp |= FDI_LINK_TRAIN_PATTERN_1; 4602 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4603 /* SNB-B */ 4604 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 4605 I915_WRITE(reg, temp | FDI_TX_ENABLE); 4606 4607 I915_WRITE(FDI_RX_MISC(pipe), 4608 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 4609 4610 reg = FDI_RX_CTL(pipe); 4611 temp = I915_READ(reg); 4612 if (HAS_PCH_CPT(dev_priv)) { 4613 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4614 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 4615 } else { 4616 temp &= ~FDI_LINK_TRAIN_NONE; 4617 temp |= FDI_LINK_TRAIN_PATTERN_1; 4618 } 4619 I915_WRITE(reg, temp | FDI_RX_ENABLE); 4620 4621 POSTING_READ(reg); 4622 udelay(150); 4623 4624 for (i = 0; i < 4; i++) { 4625 reg = FDI_TX_CTL(pipe); 4626 temp = I915_READ(reg); 4627 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4628 temp |= snb_b_fdi_train_param[i]; 4629 I915_WRITE(reg, temp); 4630 4631 POSTING_READ(reg); 4632 udelay(500); 4633 4634 for (retry = 0; retry < 5; retry++) { 4635 reg = FDI_RX_IIR(pipe); 4636 temp = I915_READ(reg); 4637 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4638 if (temp & FDI_RX_BIT_LOCK) { 4639 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 4640 DRM_DEBUG_KMS("FDI train 1 done.\n"); 4641 break; 4642 } 4643 udelay(50); 4644 } 4645 if (retry < 5) 4646 break; 4647 } 4648 if (i == 4) 4649 DRM_ERROR("FDI train 1 fail!\n"); 4650 4651 /* Train 2 */ 4652 reg = FDI_TX_CTL(pipe); 4653 temp = I915_READ(reg); 4654 temp &= ~FDI_LINK_TRAIN_NONE; 4655 temp |= FDI_LINK_TRAIN_PATTERN_2; 4656 if (IS_GEN(dev_priv, 6)) { 4657 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4658 /* SNB-B */ 4659 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 4660 } 4661 I915_WRITE(reg, temp); 4662 4663 reg = FDI_RX_CTL(pipe); 4664 temp = I915_READ(reg); 4665 if (HAS_PCH_CPT(dev_priv)) { 4666 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4667 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 4668 } else { 4669 temp &= ~FDI_LINK_TRAIN_NONE; 4670 temp |= FDI_LINK_TRAIN_PATTERN_2; 4671 } 4672 I915_WRITE(reg, temp); 4673 4674 POSTING_READ(reg); 4675 udelay(150); 4676 4677 for (i = 0; i < 4; i++) { 4678 reg = FDI_TX_CTL(pipe); 4679 temp = I915_READ(reg); 4680 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4681 temp |= snb_b_fdi_train_param[i]; 4682 I915_WRITE(reg, temp); 4683 4684 POSTING_READ(reg); 4685 udelay(500); 4686 4687 for (retry = 0; retry < 5; retry++) { 4688 reg = FDI_RX_IIR(pipe); 4689 temp = I915_READ(reg); 4690 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4691 if (temp & FDI_RX_SYMBOL_LOCK) { 4692 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 4693 DRM_DEBUG_KMS("FDI train 2 done.\n"); 4694 break; 4695 } 4696 udelay(50); 4697 } 4698 if (retry < 5) 4699 break; 4700 } 4701 if (i == 4) 4702 DRM_ERROR("FDI train 2 fail!\n"); 4703 4704 DRM_DEBUG_KMS("FDI train done.\n"); 4705 } 4706 4707 /* Manual link training for Ivy Bridge A0 parts */ 4708 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, 4709 const struct intel_crtc_state *crtc_state) 4710 { 4711 struct drm_device *dev = crtc->base.dev; 4712 struct drm_i915_private *dev_priv = to_i915(dev); 4713 int pipe = crtc->pipe; 4714 i915_reg_t reg; 4715 u32 temp, i, j; 4716 4717 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 4718 for train result */ 4719 reg = FDI_RX_IMR(pipe); 4720 temp = I915_READ(reg); 4721 temp &= ~FDI_RX_SYMBOL_LOCK; 4722 temp &= ~FDI_RX_BIT_LOCK; 4723 I915_WRITE(reg, temp); 4724 4725 POSTING_READ(reg); 4726 udelay(150); 4727 4728 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 4729 I915_READ(FDI_RX_IIR(pipe))); 4730 4731 /* Try each vswing and preemphasis setting twice before moving on */ 4732 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 4733 /* disable first in case we need to retry */ 4734 reg = FDI_TX_CTL(pipe); 4735 temp = I915_READ(reg); 4736 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 4737 temp &= ~FDI_TX_ENABLE; 4738 I915_WRITE(reg, temp); 4739 4740 reg = FDI_RX_CTL(pipe); 4741 temp = I915_READ(reg); 4742 temp &= ~FDI_LINK_TRAIN_AUTO; 4743 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4744 temp &= ~FDI_RX_ENABLE; 4745 I915_WRITE(reg, temp); 4746 4747 /* enable CPU FDI TX and PCH FDI RX */ 4748 reg = FDI_TX_CTL(pipe); 4749 temp = I915_READ(reg); 4750 temp &= ~FDI_DP_PORT_WIDTH_MASK; 4751 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 4752 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 4753 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4754 temp |= snb_b_fdi_train_param[j/2]; 4755 temp |= FDI_COMPOSITE_SYNC; 4756 I915_WRITE(reg, temp | FDI_TX_ENABLE); 4757 4758 I915_WRITE(FDI_RX_MISC(pipe), 4759 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 4760 4761 reg = FDI_RX_CTL(pipe); 4762 temp = I915_READ(reg); 4763 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 4764 temp |= FDI_COMPOSITE_SYNC; 4765 I915_WRITE(reg, temp | FDI_RX_ENABLE); 4766 4767 POSTING_READ(reg); 4768 udelay(1); /* should be 0.5us */ 4769 4770 for (i = 0; i < 4; i++) { 4771 reg = FDI_RX_IIR(pipe); 4772 temp = I915_READ(reg); 4773 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4774 4775 if (temp & FDI_RX_BIT_LOCK || 4776 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 4777 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 4778 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", 4779 i); 4780 break; 4781 } 4782 udelay(1); /* should be 0.5us */ 4783 } 4784 if (i == 4) { 4785 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); 4786 continue; 4787 } 4788 4789 /* Train 2 */ 4790 reg = FDI_TX_CTL(pipe); 4791 temp = I915_READ(reg); 4792 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 4793 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 4794 I915_WRITE(reg, temp); 4795 4796 reg = FDI_RX_CTL(pipe); 4797 temp = I915_READ(reg); 4798 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4799 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 4800 I915_WRITE(reg, temp); 4801 4802 POSTING_READ(reg); 4803 udelay(2); /* should be 1.5us */ 4804 4805 for (i = 0; i < 4; i++) { 4806 reg = FDI_RX_IIR(pipe); 4807 temp = I915_READ(reg); 4808 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4809 4810 if (temp & FDI_RX_SYMBOL_LOCK || 4811 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { 4812 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 4813 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", 4814 i); 4815 goto train_done; 4816 } 4817 udelay(2); /* should be 1.5us */ 4818 } 4819 if (i == 4) 4820 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); 4821 } 4822 4823 train_done: 4824 DRM_DEBUG_KMS("FDI train done.\n"); 4825 } 4826 4827 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state) 4828 { 4829 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 4830 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 4831 int pipe = intel_crtc->pipe; 4832 i915_reg_t reg; 4833 u32 temp; 4834 4835 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 4836 reg = FDI_RX_CTL(pipe); 4837 temp = I915_READ(reg); 4838 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 4839 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 4840 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4841 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 4842 4843 POSTING_READ(reg); 4844 udelay(200); 4845 4846 /* Switch from Rawclk to PCDclk */ 4847 temp = I915_READ(reg); 4848 I915_WRITE(reg, temp | FDI_PCDCLK); 4849 4850 POSTING_READ(reg); 4851 udelay(200); 4852 4853 /* Enable CPU FDI TX PLL, always on for Ironlake */ 4854 reg = FDI_TX_CTL(pipe); 4855 temp = I915_READ(reg); 4856 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 4857 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 4858 4859 POSTING_READ(reg); 4860 udelay(100); 4861 } 4862 } 4863 4864 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 4865 { 4866 struct drm_device *dev = intel_crtc->base.dev; 4867 struct drm_i915_private *dev_priv = to_i915(dev); 4868 int pipe = intel_crtc->pipe; 4869 i915_reg_t reg; 4870 u32 temp; 4871 4872 /* Switch from PCDclk to Rawclk */ 4873 reg = FDI_RX_CTL(pipe); 4874 temp = I915_READ(reg); 4875 I915_WRITE(reg, temp & ~FDI_PCDCLK); 4876 4877 /* Disable CPU FDI TX PLL */ 4878 reg = FDI_TX_CTL(pipe); 4879 temp = I915_READ(reg); 4880 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 4881 4882 POSTING_READ(reg); 4883 udelay(100); 4884 4885 reg = FDI_RX_CTL(pipe); 4886 temp = I915_READ(reg); 4887 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 4888 4889 /* Wait for the clocks to turn off. */ 4890 POSTING_READ(reg); 4891 udelay(100); 4892 } 4893 4894 static void ironlake_fdi_disable(struct drm_crtc *crtc) 4895 { 4896 struct drm_device *dev = crtc->dev; 4897 struct drm_i915_private *dev_priv = to_i915(dev); 4898 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4899 int pipe = intel_crtc->pipe; 4900 i915_reg_t reg; 4901 u32 temp; 4902 4903 /* disable CPU FDI tx and PCH FDI rx */ 4904 reg = FDI_TX_CTL(pipe); 4905 temp = I915_READ(reg); 4906 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 4907 POSTING_READ(reg); 4908 4909 reg = FDI_RX_CTL(pipe); 4910 temp = I915_READ(reg); 4911 temp &= ~(0x7 << 16); 4912 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4913 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 4914 4915 POSTING_READ(reg); 4916 udelay(100); 4917 4918 /* Ironlake workaround, disable clock pointer after downing FDI */ 4919 if (HAS_PCH_IBX(dev_priv)) 4920 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 4921 4922 /* still set train pattern 1 */ 4923 reg = FDI_TX_CTL(pipe); 4924 temp = I915_READ(reg); 4925 temp &= ~FDI_LINK_TRAIN_NONE; 4926 temp |= FDI_LINK_TRAIN_PATTERN_1; 4927 I915_WRITE(reg, temp); 4928 4929 reg = FDI_RX_CTL(pipe); 4930 temp = I915_READ(reg); 4931 if (HAS_PCH_CPT(dev_priv)) { 4932 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4933 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 4934 } else { 4935 temp &= ~FDI_LINK_TRAIN_NONE; 4936 temp |= FDI_LINK_TRAIN_PATTERN_1; 4937 } 4938 /* BPC in FDI rx is consistent with that in PIPECONF */ 4939 temp &= ~(0x07 << 16); 4940 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4941 I915_WRITE(reg, temp); 4942 4943 POSTING_READ(reg); 4944 udelay(100); 4945 } 4946 4947 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 4948 { 4949 struct drm_crtc *crtc; 4950 bool cleanup_done; 4951 4952 drm_for_each_crtc(crtc, &dev_priv->drm) { 4953 struct drm_crtc_commit *commit; 4954 spin_lock(&crtc->commit_lock); 4955 commit = list_first_entry_or_null(&crtc->commit_list, 4956 struct drm_crtc_commit, commit_entry); 4957 cleanup_done = commit ? 4958 try_wait_for_completion(&commit->cleanup_done) : true; 4959 spin_unlock(&crtc->commit_lock); 4960 4961 if (cleanup_done) 4962 continue; 4963 4964 drm_crtc_wait_one_vblank(crtc); 4965 4966 return true; 4967 } 4968 4969 return false; 4970 } 4971 4972 void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 4973 { 4974 u32 temp; 4975 4976 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 4977 4978 mutex_lock(&dev_priv->sb_lock); 4979 4980 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4981 temp |= SBI_SSCCTL_DISABLE; 4982 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 4983 4984 mutex_unlock(&dev_priv->sb_lock); 4985 } 4986 4987 /* Program iCLKIP clock to the desired frequency */ 4988 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) 4989 { 4990 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 4991 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4992 int clock = crtc_state->base.adjusted_mode.crtc_clock; 4993 u32 divsel, phaseinc, auxdiv, phasedir = 0; 4994 u32 temp; 4995 4996 lpt_disable_iclkip(dev_priv); 4997 4998 /* The iCLK virtual clock root frequency is in MHz, 4999 * but the adjusted_mode->crtc_clock in in KHz. To get the 5000 * divisors, it is necessary to divide one by another, so we 5001 * convert the virtual clock precision to KHz here for higher 5002 * precision. 5003 */ 5004 for (auxdiv = 0; auxdiv < 2; auxdiv++) { 5005 u32 iclk_virtual_root_freq = 172800 * 1000; 5006 u32 iclk_pi_range = 64; 5007 u32 desired_divisor; 5008 5009 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 5010 clock << auxdiv); 5011 divsel = (desired_divisor / iclk_pi_range) - 2; 5012 phaseinc = desired_divisor % iclk_pi_range; 5013 5014 /* 5015 * Near 20MHz is a corner case which is 5016 * out of range for the 7-bit divisor 5017 */ 5018 if (divsel <= 0x7f) 5019 break; 5020 } 5021 5022 /* This should not happen with any sane values */ 5023 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 5024 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 5025 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 5026 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 5027 5028 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 5029 clock, 5030 auxdiv, 5031 divsel, 5032 phasedir, 5033 phaseinc); 5034 5035 mutex_lock(&dev_priv->sb_lock); 5036 5037 /* Program SSCDIVINTPHASE6 */ 5038 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 5039 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 5040 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 5041 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 5042 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 5043 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 5044 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 5045 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 5046 5047 /* Program SSCAUXDIV */ 5048 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 5049 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 5050 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 5051 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 5052 5053 /* Enable modulator and associated divider */ 5054 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5055 temp &= ~SBI_SSCCTL_DISABLE; 5056 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 5057 5058 mutex_unlock(&dev_priv->sb_lock); 5059 5060 /* Wait for initialization time */ 5061 udelay(24); 5062 5063 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 5064 } 5065 5066 int lpt_get_iclkip(struct drm_i915_private *dev_priv) 5067 { 5068 u32 divsel, phaseinc, auxdiv; 5069 u32 iclk_virtual_root_freq = 172800 * 1000; 5070 u32 iclk_pi_range = 64; 5071 u32 desired_divisor; 5072 u32 temp; 5073 5074 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) 5075 return 0; 5076 5077 mutex_lock(&dev_priv->sb_lock); 5078 5079 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5080 if (temp & SBI_SSCCTL_DISABLE) { 5081 mutex_unlock(&dev_priv->sb_lock); 5082 return 0; 5083 } 5084 5085 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 5086 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> 5087 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; 5088 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> 5089 SBI_SSCDIVINTPHASE_INCVAL_SHIFT; 5090 5091 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 5092 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> 5093 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; 5094 5095 mutex_unlock(&dev_priv->sb_lock); 5096 5097 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; 5098 5099 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 5100 desired_divisor << auxdiv); 5101 } 5102 5103 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, 5104 enum pipe pch_transcoder) 5105 { 5106 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5107 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5108 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 5109 5110 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 5111 I915_READ(HTOTAL(cpu_transcoder))); 5112 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), 5113 I915_READ(HBLANK(cpu_transcoder))); 5114 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), 5115 I915_READ(HSYNC(cpu_transcoder))); 5116 5117 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), 5118 I915_READ(VTOTAL(cpu_transcoder))); 5119 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), 5120 I915_READ(VBLANK(cpu_transcoder))); 5121 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), 5122 I915_READ(VSYNC(cpu_transcoder))); 5123 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), 5124 I915_READ(VSYNCSHIFT(cpu_transcoder))); 5125 } 5126 5127 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable) 5128 { 5129 u32 temp; 5130 5131 temp = I915_READ(SOUTH_CHICKEN1); 5132 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 5133 return; 5134 5135 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 5136 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 5137 5138 temp &= ~FDI_BC_BIFURCATION_SELECT; 5139 if (enable) 5140 temp |= FDI_BC_BIFURCATION_SELECT; 5141 5142 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); 5143 I915_WRITE(SOUTH_CHICKEN1, temp); 5144 POSTING_READ(SOUTH_CHICKEN1); 5145 } 5146 5147 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) 5148 { 5149 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5150 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5151 5152 switch (crtc->pipe) { 5153 case PIPE_A: 5154 break; 5155 case PIPE_B: 5156 if (crtc_state->fdi_lanes > 2) 5157 cpt_set_fdi_bc_bifurcation(dev_priv, false); 5158 else 5159 cpt_set_fdi_bc_bifurcation(dev_priv, true); 5160 5161 break; 5162 case PIPE_C: 5163 cpt_set_fdi_bc_bifurcation(dev_priv, true); 5164 5165 break; 5166 default: 5167 BUG(); 5168 } 5169 } 5170 5171 /* 5172 * Finds the encoder associated with the given CRTC. This can only be 5173 * used when we know that the CRTC isn't feeding multiple encoders! 5174 */ 5175 static struct intel_encoder * 5176 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 5177 const struct intel_crtc_state *crtc_state) 5178 { 5179 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5180 const struct drm_connector_state *connector_state; 5181 const struct drm_connector *connector; 5182 struct intel_encoder *encoder = NULL; 5183 int num_encoders = 0; 5184 int i; 5185 5186 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 5187 if (connector_state->crtc != &crtc->base) 5188 continue; 5189 5190 encoder = to_intel_encoder(connector_state->best_encoder); 5191 num_encoders++; 5192 } 5193 5194 WARN(num_encoders != 1, "%d encoders for pipe %c\n", 5195 num_encoders, pipe_name(crtc->pipe)); 5196 5197 return encoder; 5198 } 5199 5200 /* 5201 * Enable PCH resources required for PCH ports: 5202 * - PCH PLLs 5203 * - FDI training & RX/TX 5204 * - update transcoder timings 5205 * - DP transcoding bits 5206 * - transcoder 5207 */ 5208 static void ironlake_pch_enable(const struct intel_atomic_state *state, 5209 const struct intel_crtc_state *crtc_state) 5210 { 5211 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5212 struct drm_device *dev = crtc->base.dev; 5213 struct drm_i915_private *dev_priv = to_i915(dev); 5214 int pipe = crtc->pipe; 5215 u32 temp; 5216 5217 assert_pch_transcoder_disabled(dev_priv, pipe); 5218 5219 if (IS_IVYBRIDGE(dev_priv)) 5220 ivybridge_update_fdi_bc_bifurcation(crtc_state); 5221 5222 /* Write the TU size bits before fdi link training, so that error 5223 * detection works. */ 5224 I915_WRITE(FDI_RX_TUSIZE1(pipe), 5225 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 5226 5227 /* For PCH output, training FDI link */ 5228 dev_priv->display.fdi_link_train(crtc, crtc_state); 5229 5230 /* We need to program the right clock selection before writing the pixel 5231 * mutliplier into the DPLL. */ 5232 if (HAS_PCH_CPT(dev_priv)) { 5233 u32 sel; 5234 5235 temp = I915_READ(PCH_DPLL_SEL); 5236 temp |= TRANS_DPLL_ENABLE(pipe); 5237 sel = TRANS_DPLLB_SEL(pipe); 5238 if (crtc_state->shared_dpll == 5239 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) 5240 temp |= sel; 5241 else 5242 temp &= ~sel; 5243 I915_WRITE(PCH_DPLL_SEL, temp); 5244 } 5245 5246 /* XXX: pch pll's can be enabled any time before we enable the PCH 5247 * transcoder, and we actually should do this to not upset any PCH 5248 * transcoder that already use the clock when we share it. 5249 * 5250 * Note that enable_shared_dpll tries to do the right thing, but 5251 * get_shared_dpll unconditionally resets the pll - we need that to have 5252 * the right LVDS enable sequence. */ 5253 intel_enable_shared_dpll(crtc_state); 5254 5255 /* set transcoder timing, panel must allow it */ 5256 assert_panel_unlocked(dev_priv, pipe); 5257 ironlake_pch_transcoder_set_timings(crtc_state, pipe); 5258 5259 intel_fdi_normal_train(crtc); 5260 5261 /* For PCH DP, enable TRANS_DP_CTL */ 5262 if (HAS_PCH_CPT(dev_priv) && 5263 intel_crtc_has_dp_encoder(crtc_state)) { 5264 const struct drm_display_mode *adjusted_mode = 5265 &crtc_state->base.adjusted_mode; 5266 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 5267 i915_reg_t reg = TRANS_DP_CTL(pipe); 5268 enum port port; 5269 5270 temp = I915_READ(reg); 5271 temp &= ~(TRANS_DP_PORT_SEL_MASK | 5272 TRANS_DP_SYNC_MASK | 5273 TRANS_DP_BPC_MASK); 5274 temp |= TRANS_DP_OUTPUT_ENABLE; 5275 temp |= bpc << 9; /* same format but at 11:9 */ 5276 5277 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 5278 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 5279 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 5280 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 5281 5282 port = intel_get_crtc_new_encoder(state, crtc_state)->port; 5283 WARN_ON(port < PORT_B || port > PORT_D); 5284 temp |= TRANS_DP_PORT_SEL(port); 5285 5286 I915_WRITE(reg, temp); 5287 } 5288 5289 ironlake_enable_pch_transcoder(crtc_state); 5290 } 5291 5292 static void lpt_pch_enable(const struct intel_atomic_state *state, 5293 const struct intel_crtc_state *crtc_state) 5294 { 5295 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5296 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5297 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 5298 5299 assert_pch_transcoder_disabled(dev_priv, PIPE_A); 5300 5301 lpt_program_iclkip(crtc_state); 5302 5303 /* Set transcoder timing. */ 5304 ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A); 5305 5306 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 5307 } 5308 5309 static void cpt_verify_modeset(struct drm_device *dev, int pipe) 5310 { 5311 struct drm_i915_private *dev_priv = to_i915(dev); 5312 i915_reg_t dslreg = PIPEDSL(pipe); 5313 u32 temp; 5314 5315 temp = I915_READ(dslreg); 5316 udelay(500); 5317 if (wait_for(I915_READ(dslreg) != temp, 5)) { 5318 if (wait_for(I915_READ(dslreg) != temp, 5)) 5319 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); 5320 } 5321 } 5322 5323 /* 5324 * The hardware phase 0.0 refers to the center of the pixel. 5325 * We want to start from the top/left edge which is phase 5326 * -0.5. That matches how the hardware calculates the scaling 5327 * factors (from top-left of the first pixel to bottom-right 5328 * of the last pixel, as opposed to the pixel centers). 5329 * 5330 * For 4:2:0 subsampled chroma planes we obviously have to 5331 * adjust that so that the chroma sample position lands in 5332 * the right spot. 5333 * 5334 * Note that for packed YCbCr 4:2:2 formats there is no way to 5335 * control chroma siting. The hardware simply replicates the 5336 * chroma samples for both of the luma samples, and thus we don't 5337 * actually get the expected MPEG2 chroma siting convention :( 5338 * The same behaviour is observed on pre-SKL platforms as well. 5339 * 5340 * Theory behind the formula (note that we ignore sub-pixel 5341 * source coordinates): 5342 * s = source sample position 5343 * d = destination sample position 5344 * 5345 * Downscaling 4:1: 5346 * -0.5 5347 * | 0.0 5348 * | | 1.5 (initial phase) 5349 * | | | 5350 * v v v 5351 * | s | s | s | s | 5352 * | d | 5353 * 5354 * Upscaling 1:4: 5355 * -0.5 5356 * | -0.375 (initial phase) 5357 * | | 0.0 5358 * | | | 5359 * v v v 5360 * | s | 5361 * | d | d | d | d | 5362 */ 5363 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited) 5364 { 5365 int phase = -0x8000; 5366 u16 trip = 0; 5367 5368 if (chroma_cosited) 5369 phase += (sub - 1) * 0x8000 / sub; 5370 5371 phase += scale / (2 * sub); 5372 5373 /* 5374 * Hardware initial phase limited to [-0.5:1.5]. 5375 * Since the max hardware scale factor is 3.0, we 5376 * should never actually excdeed 1.0 here. 5377 */ 5378 WARN_ON(phase < -0x8000 || phase > 0x18000); 5379 5380 if (phase < 0) 5381 phase = 0x10000 + phase; 5382 else 5383 trip = PS_PHASE_TRIP; 5384 5385 return ((phase >> 2) & PS_PHASE_MASK) | trip; 5386 } 5387 5388 #define SKL_MIN_SRC_W 8 5389 #define SKL_MAX_SRC_W 4096 5390 #define SKL_MIN_SRC_H 8 5391 #define SKL_MAX_SRC_H 4096 5392 #define SKL_MIN_DST_W 8 5393 #define SKL_MAX_DST_W 4096 5394 #define SKL_MIN_DST_H 8 5395 #define SKL_MAX_DST_H 4096 5396 #define ICL_MAX_SRC_W 5120 5397 #define ICL_MAX_SRC_H 4096 5398 #define ICL_MAX_DST_W 5120 5399 #define ICL_MAX_DST_H 4096 5400 #define SKL_MIN_YUV_420_SRC_W 16 5401 #define SKL_MIN_YUV_420_SRC_H 16 5402 5403 static int 5404 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 5405 unsigned int scaler_user, int *scaler_id, 5406 int src_w, int src_h, int dst_w, int dst_h, 5407 const struct drm_format_info *format, bool need_scaler) 5408 { 5409 struct intel_crtc_scaler_state *scaler_state = 5410 &crtc_state->scaler_state; 5411 struct intel_crtc *intel_crtc = 5412 to_intel_crtc(crtc_state->base.crtc); 5413 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 5414 const struct drm_display_mode *adjusted_mode = 5415 &crtc_state->base.adjusted_mode; 5416 5417 /* 5418 * Src coordinates are already rotated by 270 degrees for 5419 * the 90/270 degree plane rotation cases (to match the 5420 * GTT mapping), hence no need to account for rotation here. 5421 */ 5422 if (src_w != dst_w || src_h != dst_h) 5423 need_scaler = true; 5424 5425 /* 5426 * Scaling/fitting not supported in IF-ID mode in GEN9+ 5427 * TODO: Interlace fetch mode doesn't support YUV420 planar formats. 5428 * Once NV12 is enabled, handle it here while allocating scaler 5429 * for NV12. 5430 */ 5431 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable && 5432 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 5433 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n"); 5434 return -EINVAL; 5435 } 5436 5437 /* 5438 * if plane is being disabled or scaler is no more required or force detach 5439 * - free scaler binded to this plane/crtc 5440 * - in order to do this, update crtc->scaler_usage 5441 * 5442 * Here scaler state in crtc_state is set free so that 5443 * scaler can be assigned to other user. Actual register 5444 * update to free the scaler is done in plane/panel-fit programming. 5445 * For this purpose crtc/plane_state->scaler_id isn't reset here. 5446 */ 5447 if (force_detach || !need_scaler) { 5448 if (*scaler_id >= 0) { 5449 scaler_state->scaler_users &= ~(1 << scaler_user); 5450 scaler_state->scalers[*scaler_id].in_use = 0; 5451 5452 DRM_DEBUG_KMS("scaler_user index %u.%u: " 5453 "Staged freeing scaler id %d scaler_users = 0x%x\n", 5454 intel_crtc->pipe, scaler_user, *scaler_id, 5455 scaler_state->scaler_users); 5456 *scaler_id = -1; 5457 } 5458 return 0; 5459 } 5460 5461 if (format && is_planar_yuv_format(format->format) && 5462 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { 5463 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n"); 5464 return -EINVAL; 5465 } 5466 5467 /* range checks */ 5468 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 5469 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 5470 (INTEL_GEN(dev_priv) >= 11 && 5471 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H || 5472 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) || 5473 (INTEL_GEN(dev_priv) < 11 && 5474 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 5475 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) { 5476 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " 5477 "size is out of scaler range\n", 5478 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h); 5479 return -EINVAL; 5480 } 5481 5482 /* mark this plane as a scaler user in crtc_state */ 5483 scaler_state->scaler_users |= (1 << scaler_user); 5484 DRM_DEBUG_KMS("scaler_user index %u.%u: " 5485 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 5486 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 5487 scaler_state->scaler_users); 5488 5489 return 0; 5490 } 5491 5492 /** 5493 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc. 5494 * 5495 * @state: crtc's scaler state 5496 * 5497 * Return 5498 * 0 - scaler_usage updated successfully 5499 * error - requested scaling cannot be supported or other error condition 5500 */ 5501 int skl_update_scaler_crtc(struct intel_crtc_state *state) 5502 { 5503 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 5504 bool need_scaler = false; 5505 5506 if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 5507 need_scaler = true; 5508 5509 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 5510 &state->scaler_state.scaler_id, 5511 state->pipe_src_w, state->pipe_src_h, 5512 adjusted_mode->crtc_hdisplay, 5513 adjusted_mode->crtc_vdisplay, NULL, need_scaler); 5514 } 5515 5516 /** 5517 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 5518 * @crtc_state: crtc's scaler state 5519 * @plane_state: atomic plane state to update 5520 * 5521 * Return 5522 * 0 - scaler_usage updated successfully 5523 * error - requested scaling cannot be supported or other error condition 5524 */ 5525 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 5526 struct intel_plane_state *plane_state) 5527 { 5528 struct intel_plane *intel_plane = 5529 to_intel_plane(plane_state->base.plane); 5530 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 5531 struct drm_framebuffer *fb = plane_state->base.fb; 5532 int ret; 5533 bool force_detach = !fb || !plane_state->base.visible; 5534 bool need_scaler = false; 5535 5536 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ 5537 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) && 5538 fb && is_planar_yuv_format(fb->format->format)) 5539 need_scaler = true; 5540 5541 ret = skl_update_scaler(crtc_state, force_detach, 5542 drm_plane_index(&intel_plane->base), 5543 &plane_state->scaler_id, 5544 drm_rect_width(&plane_state->base.src) >> 16, 5545 drm_rect_height(&plane_state->base.src) >> 16, 5546 drm_rect_width(&plane_state->base.dst), 5547 drm_rect_height(&plane_state->base.dst), 5548 fb ? fb->format : NULL, need_scaler); 5549 5550 if (ret || plane_state->scaler_id < 0) 5551 return ret; 5552 5553 /* check colorkey */ 5554 if (plane_state->ckey.flags) { 5555 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed", 5556 intel_plane->base.base.id, 5557 intel_plane->base.name); 5558 return -EINVAL; 5559 } 5560 5561 /* Check src format */ 5562 switch (fb->format->format) { 5563 case DRM_FORMAT_RGB565: 5564 case DRM_FORMAT_XBGR8888: 5565 case DRM_FORMAT_XRGB8888: 5566 case DRM_FORMAT_ABGR8888: 5567 case DRM_FORMAT_ARGB8888: 5568 case DRM_FORMAT_XRGB2101010: 5569 case DRM_FORMAT_XBGR2101010: 5570 case DRM_FORMAT_XBGR16161616F: 5571 case DRM_FORMAT_ABGR16161616F: 5572 case DRM_FORMAT_XRGB16161616F: 5573 case DRM_FORMAT_ARGB16161616F: 5574 case DRM_FORMAT_YUYV: 5575 case DRM_FORMAT_YVYU: 5576 case DRM_FORMAT_UYVY: 5577 case DRM_FORMAT_VYUY: 5578 case DRM_FORMAT_NV12: 5579 case DRM_FORMAT_P010: 5580 case DRM_FORMAT_P012: 5581 case DRM_FORMAT_P016: 5582 case DRM_FORMAT_Y210: 5583 case DRM_FORMAT_Y212: 5584 case DRM_FORMAT_Y216: 5585 case DRM_FORMAT_XVYU2101010: 5586 case DRM_FORMAT_XVYU12_16161616: 5587 case DRM_FORMAT_XVYU16161616: 5588 break; 5589 default: 5590 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", 5591 intel_plane->base.base.id, intel_plane->base.name, 5592 fb->base.id, fb->format->format); 5593 return -EINVAL; 5594 } 5595 5596 return 0; 5597 } 5598 5599 static void skylake_scaler_disable(struct intel_crtc *crtc) 5600 { 5601 int i; 5602 5603 for (i = 0; i < crtc->num_scalers; i++) 5604 skl_detach_scaler(crtc, i); 5605 } 5606 5607 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state) 5608 { 5609 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5610 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5611 enum pipe pipe = crtc->pipe; 5612 const struct intel_crtc_scaler_state *scaler_state = 5613 &crtc_state->scaler_state; 5614 5615 if (crtc_state->pch_pfit.enabled) { 5616 u16 uv_rgb_hphase, uv_rgb_vphase; 5617 int pfit_w, pfit_h, hscale, vscale; 5618 int id; 5619 5620 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0)) 5621 return; 5622 5623 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF; 5624 pfit_h = crtc_state->pch_pfit.size & 0xFFFF; 5625 5626 hscale = (crtc_state->pipe_src_w << 16) / pfit_w; 5627 vscale = (crtc_state->pipe_src_h << 16) / pfit_h; 5628 5629 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); 5630 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); 5631 5632 id = scaler_state->scaler_id; 5633 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 5634 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 5635 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id), 5636 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); 5637 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id), 5638 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); 5639 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos); 5640 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size); 5641 } 5642 } 5643 5644 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state) 5645 { 5646 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5647 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5648 int pipe = crtc->pipe; 5649 5650 if (crtc_state->pch_pfit.enabled) { 5651 /* Force use of hard-coded filter coefficients 5652 * as some pre-programmed values are broken, 5653 * e.g. x201. 5654 */ 5655 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 5656 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 5657 PF_PIPE_SEL_IVB(pipe)); 5658 else 5659 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 5660 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos); 5661 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size); 5662 } 5663 } 5664 5665 void hsw_enable_ips(const struct intel_crtc_state *crtc_state) 5666 { 5667 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5668 struct drm_device *dev = crtc->base.dev; 5669 struct drm_i915_private *dev_priv = to_i915(dev); 5670 5671 if (!crtc_state->ips_enabled) 5672 return; 5673 5674 /* 5675 * We can only enable IPS after we enable a plane and wait for a vblank 5676 * This function is called from post_plane_update, which is run after 5677 * a vblank wait. 5678 */ 5679 WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); 5680 5681 if (IS_BROADWELL(dev_priv)) { 5682 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 5683 IPS_ENABLE | IPS_PCODE_CONTROL)); 5684 /* Quoting Art Runyan: "its not safe to expect any particular 5685 * value in IPS_CTL bit 31 after enabling IPS through the 5686 * mailbox." Moreover, the mailbox may return a bogus state, 5687 * so we need to just enable it and continue on. 5688 */ 5689 } else { 5690 I915_WRITE(IPS_CTL, IPS_ENABLE); 5691 /* The bit only becomes 1 in the next vblank, so this wait here 5692 * is essentially intel_wait_for_vblank. If we don't have this 5693 * and don't wait for vblanks until the end of crtc_enable, then 5694 * the HW state readout code will complain that the expected 5695 * IPS_CTL value is not the one we read. */ 5696 if (intel_wait_for_register(&dev_priv->uncore, 5697 IPS_CTL, IPS_ENABLE, IPS_ENABLE, 5698 50)) 5699 DRM_ERROR("Timed out waiting for IPS enable\n"); 5700 } 5701 } 5702 5703 void hsw_disable_ips(const struct intel_crtc_state *crtc_state) 5704 { 5705 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5706 struct drm_device *dev = crtc->base.dev; 5707 struct drm_i915_private *dev_priv = to_i915(dev); 5708 5709 if (!crtc_state->ips_enabled) 5710 return; 5711 5712 if (IS_BROADWELL(dev_priv)) { 5713 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 5714 /* 5715 * Wait for PCODE to finish disabling IPS. The BSpec specified 5716 * 42ms timeout value leads to occasional timeouts so use 100ms 5717 * instead. 5718 */ 5719 if (intel_wait_for_register(&dev_priv->uncore, 5720 IPS_CTL, IPS_ENABLE, 0, 5721 100)) 5722 DRM_ERROR("Timed out waiting for IPS disable\n"); 5723 } else { 5724 I915_WRITE(IPS_CTL, 0); 5725 POSTING_READ(IPS_CTL); 5726 } 5727 5728 /* We need to wait for a vblank before we can disable the plane. */ 5729 intel_wait_for_vblank(dev_priv, crtc->pipe); 5730 } 5731 5732 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) 5733 { 5734 if (intel_crtc->overlay) { 5735 struct drm_device *dev = intel_crtc->base.dev; 5736 5737 mutex_lock(&dev->struct_mutex); 5738 (void) intel_overlay_switch_off(intel_crtc->overlay); 5739 mutex_unlock(&dev->struct_mutex); 5740 } 5741 5742 /* Let userspace switch the overlay on again. In most cases userspace 5743 * has to recompute where to put it anyway. 5744 */ 5745 } 5746 5747 /** 5748 * intel_post_enable_primary - Perform operations after enabling primary plane 5749 * @crtc: the CRTC whose primary plane was just enabled 5750 * @new_crtc_state: the enabling state 5751 * 5752 * Performs potentially sleeping operations that must be done after the primary 5753 * plane is enabled, such as updating FBC and IPS. Note that this may be 5754 * called due to an explicit primary plane update, or due to an implicit 5755 * re-enable that is caused when a sprite plane is updated to no longer 5756 * completely hide the primary plane. 5757 */ 5758 static void 5759 intel_post_enable_primary(struct drm_crtc *crtc, 5760 const struct intel_crtc_state *new_crtc_state) 5761 { 5762 struct drm_device *dev = crtc->dev; 5763 struct drm_i915_private *dev_priv = to_i915(dev); 5764 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5765 int pipe = intel_crtc->pipe; 5766 5767 /* 5768 * Gen2 reports pipe underruns whenever all planes are disabled. 5769 * So don't enable underrun reporting before at least some planes 5770 * are enabled. 5771 * FIXME: Need to fix the logic to work when we turn off all planes 5772 * but leave the pipe running. 5773 */ 5774 if (IS_GEN(dev_priv, 2)) 5775 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5776 5777 /* Underruns don't always raise interrupts, so check manually. */ 5778 intel_check_cpu_fifo_underruns(dev_priv); 5779 intel_check_pch_fifo_underruns(dev_priv); 5780 } 5781 5782 /* FIXME get rid of this and use pre_plane_update */ 5783 static void 5784 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) 5785 { 5786 struct drm_device *dev = crtc->dev; 5787 struct drm_i915_private *dev_priv = to_i915(dev); 5788 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5789 int pipe = intel_crtc->pipe; 5790 5791 /* 5792 * Gen2 reports pipe underruns whenever all planes are disabled. 5793 * So disable underrun reporting before all the planes get disabled. 5794 */ 5795 if (IS_GEN(dev_priv, 2)) 5796 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5797 5798 hsw_disable_ips(to_intel_crtc_state(crtc->state)); 5799 5800 /* 5801 * Vblank time updates from the shadow to live plane control register 5802 * are blocked if the memory self-refresh mode is active at that 5803 * moment. So to make sure the plane gets truly disabled, disable 5804 * first the self-refresh mode. The self-refresh enable bit in turn 5805 * will be checked/applied by the HW only at the next frame start 5806 * event which is after the vblank start event, so we need to have a 5807 * wait-for-vblank between disabling the plane and the pipe. 5808 */ 5809 if (HAS_GMCH(dev_priv) && 5810 intel_set_memory_cxsr(dev_priv, false)) 5811 intel_wait_for_vblank(dev_priv, pipe); 5812 } 5813 5814 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state, 5815 const struct intel_crtc_state *new_crtc_state) 5816 { 5817 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 5818 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5819 5820 if (!old_crtc_state->ips_enabled) 5821 return false; 5822 5823 if (needs_modeset(new_crtc_state)) 5824 return true; 5825 5826 /* 5827 * Workaround : Do not read or write the pipe palette/gamma data while 5828 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 5829 * 5830 * Disable IPS before we program the LUT. 5831 */ 5832 if (IS_HASWELL(dev_priv) && 5833 (new_crtc_state->base.color_mgmt_changed || 5834 new_crtc_state->update_pipe) && 5835 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) 5836 return true; 5837 5838 return !new_crtc_state->ips_enabled; 5839 } 5840 5841 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state, 5842 const struct intel_crtc_state *new_crtc_state) 5843 { 5844 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 5845 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5846 5847 if (!new_crtc_state->ips_enabled) 5848 return false; 5849 5850 if (needs_modeset(new_crtc_state)) 5851 return true; 5852 5853 /* 5854 * Workaround : Do not read or write the pipe palette/gamma data while 5855 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 5856 * 5857 * Re-enable IPS after the LUT has been programmed. 5858 */ 5859 if (IS_HASWELL(dev_priv) && 5860 (new_crtc_state->base.color_mgmt_changed || 5861 new_crtc_state->update_pipe) && 5862 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) 5863 return true; 5864 5865 /* 5866 * We can't read out IPS on broadwell, assume the worst and 5867 * forcibly enable IPS on the first fastset. 5868 */ 5869 if (new_crtc_state->update_pipe && 5870 old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED) 5871 return true; 5872 5873 return !old_crtc_state->ips_enabled; 5874 } 5875 5876 static bool needs_nv12_wa(struct drm_i915_private *dev_priv, 5877 const struct intel_crtc_state *crtc_state) 5878 { 5879 if (!crtc_state->nv12_planes) 5880 return false; 5881 5882 /* WA Display #0827: Gen9:all */ 5883 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) 5884 return true; 5885 5886 return false; 5887 } 5888 5889 static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv, 5890 const struct intel_crtc_state *crtc_state) 5891 { 5892 /* Wa_2006604312:icl */ 5893 if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv)) 5894 return true; 5895 5896 return false; 5897 } 5898 5899 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) 5900 { 5901 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 5902 struct drm_device *dev = crtc->base.dev; 5903 struct drm_i915_private *dev_priv = to_i915(dev); 5904 struct drm_atomic_state *state = old_crtc_state->base.state; 5905 struct intel_crtc_state *pipe_config = 5906 intel_atomic_get_new_crtc_state(to_intel_atomic_state(state), 5907 crtc); 5908 struct drm_plane *primary = crtc->base.primary; 5909 struct drm_plane_state *old_primary_state = 5910 drm_atomic_get_old_plane_state(state, primary); 5911 5912 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits); 5913 5914 if (pipe_config->update_wm_post && pipe_config->base.active) 5915 intel_update_watermarks(crtc); 5916 5917 if (hsw_post_update_enable_ips(old_crtc_state, pipe_config)) 5918 hsw_enable_ips(pipe_config); 5919 5920 if (old_primary_state) { 5921 struct drm_plane_state *new_primary_state = 5922 drm_atomic_get_new_plane_state(state, primary); 5923 5924 intel_fbc_post_update(crtc); 5925 5926 if (new_primary_state->visible && 5927 (needs_modeset(pipe_config) || 5928 !old_primary_state->visible)) 5929 intel_post_enable_primary(&crtc->base, pipe_config); 5930 } 5931 5932 if (needs_nv12_wa(dev_priv, old_crtc_state) && 5933 !needs_nv12_wa(dev_priv, pipe_config)) 5934 skl_wa_827(dev_priv, crtc->pipe, false); 5935 5936 if (needs_scalerclk_wa(dev_priv, old_crtc_state) && 5937 !needs_scalerclk_wa(dev_priv, pipe_config)) 5938 icl_wa_scalerclkgating(dev_priv, crtc->pipe, false); 5939 } 5940 5941 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, 5942 struct intel_crtc_state *pipe_config) 5943 { 5944 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 5945 struct drm_device *dev = crtc->base.dev; 5946 struct drm_i915_private *dev_priv = to_i915(dev); 5947 struct drm_atomic_state *state = old_crtc_state->base.state; 5948 struct drm_plane *primary = crtc->base.primary; 5949 struct drm_plane_state *old_primary_state = 5950 drm_atomic_get_old_plane_state(state, primary); 5951 bool modeset = needs_modeset(pipe_config); 5952 struct intel_atomic_state *intel_state = 5953 to_intel_atomic_state(state); 5954 5955 if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config)) 5956 hsw_disable_ips(old_crtc_state); 5957 5958 if (old_primary_state) { 5959 struct intel_plane_state *new_primary_state = 5960 intel_atomic_get_new_plane_state(intel_state, 5961 to_intel_plane(primary)); 5962 5963 intel_fbc_pre_update(crtc, pipe_config, new_primary_state); 5964 /* 5965 * Gen2 reports pipe underruns whenever all planes are disabled. 5966 * So disable underrun reporting before all the planes get disabled. 5967 */ 5968 if (IS_GEN(dev_priv, 2) && old_primary_state->visible && 5969 (modeset || !new_primary_state->base.visible)) 5970 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 5971 } 5972 5973 /* Display WA 827 */ 5974 if (!needs_nv12_wa(dev_priv, old_crtc_state) && 5975 needs_nv12_wa(dev_priv, pipe_config)) 5976 skl_wa_827(dev_priv, crtc->pipe, true); 5977 5978 /* Wa_2006604312:icl */ 5979 if (!needs_scalerclk_wa(dev_priv, old_crtc_state) && 5980 needs_scalerclk_wa(dev_priv, pipe_config)) 5981 icl_wa_scalerclkgating(dev_priv, crtc->pipe, true); 5982 5983 /* 5984 * Vblank time updates from the shadow to live plane control register 5985 * are blocked if the memory self-refresh mode is active at that 5986 * moment. So to make sure the plane gets truly disabled, disable 5987 * first the self-refresh mode. The self-refresh enable bit in turn 5988 * will be checked/applied by the HW only at the next frame start 5989 * event which is after the vblank start event, so we need to have a 5990 * wait-for-vblank between disabling the plane and the pipe. 5991 */ 5992 if (HAS_GMCH(dev_priv) && old_crtc_state->base.active && 5993 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 5994 intel_wait_for_vblank(dev_priv, crtc->pipe); 5995 5996 /* 5997 * IVB workaround: must disable low power watermarks for at least 5998 * one frame before enabling scaling. LP watermarks can be re-enabled 5999 * when scaling is disabled. 6000 * 6001 * WaCxSRDisabledForSpriteScaling:ivb 6002 */ 6003 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) && 6004 old_crtc_state->base.active) 6005 intel_wait_for_vblank(dev_priv, crtc->pipe); 6006 6007 /* 6008 * If we're doing a modeset, we're done. No need to do any pre-vblank 6009 * watermark programming here. 6010 */ 6011 if (needs_modeset(pipe_config)) 6012 return; 6013 6014 /* 6015 * For platforms that support atomic watermarks, program the 6016 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 6017 * will be the intermediate values that are safe for both pre- and 6018 * post- vblank; when vblank happens, the 'active' values will be set 6019 * to the final 'target' values and we'll do this again to get the 6020 * optimal watermarks. For gen9+ platforms, the values we program here 6021 * will be the final target values which will get automatically latched 6022 * at vblank time; no further programming will be necessary. 6023 * 6024 * If a platform hasn't been transitioned to atomic watermarks yet, 6025 * we'll continue to update watermarks the old way, if flags tell 6026 * us to. 6027 */ 6028 if (dev_priv->display.initial_watermarks != NULL) 6029 dev_priv->display.initial_watermarks(intel_state, 6030 pipe_config); 6031 else if (pipe_config->update_wm_pre) 6032 intel_update_watermarks(crtc); 6033 } 6034 6035 static void intel_crtc_disable_planes(struct intel_atomic_state *state, 6036 struct intel_crtc *crtc) 6037 { 6038 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6039 const struct intel_crtc_state *new_crtc_state = 6040 intel_atomic_get_new_crtc_state(state, crtc); 6041 unsigned int update_mask = new_crtc_state->update_planes; 6042 const struct intel_plane_state *old_plane_state; 6043 struct intel_plane *plane; 6044 unsigned fb_bits = 0; 6045 int i; 6046 6047 intel_crtc_dpms_overlay_disable(crtc); 6048 6049 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 6050 if (crtc->pipe != plane->pipe || 6051 !(update_mask & BIT(plane->id))) 6052 continue; 6053 6054 intel_disable_plane(plane, new_crtc_state); 6055 6056 if (old_plane_state->base.visible) 6057 fb_bits |= plane->frontbuffer_bit; 6058 } 6059 6060 intel_frontbuffer_flip(dev_priv, fb_bits); 6061 } 6062 6063 /* 6064 * intel_connector_primary_encoder - get the primary encoder for a connector 6065 * @connector: connector for which to return the encoder 6066 * 6067 * Returns the primary encoder for a connector. There is a 1:1 mapping from 6068 * all connectors to their encoder, except for DP-MST connectors which have 6069 * both a virtual and a primary encoder. These DP-MST primary encoders can be 6070 * pointed to by as many DP-MST connectors as there are pipes. 6071 */ 6072 static struct intel_encoder * 6073 intel_connector_primary_encoder(struct intel_connector *connector) 6074 { 6075 struct intel_encoder *encoder; 6076 6077 if (connector->mst_port) 6078 return &dp_to_dig_port(connector->mst_port)->base; 6079 6080 encoder = intel_attached_encoder(&connector->base); 6081 WARN_ON(!encoder); 6082 6083 return encoder; 6084 } 6085 6086 static bool 6087 intel_connector_needs_modeset(struct intel_atomic_state *state, 6088 const struct drm_connector_state *old_conn_state, 6089 const struct drm_connector_state *new_conn_state) 6090 { 6091 struct intel_crtc *old_crtc = old_conn_state->crtc ? 6092 to_intel_crtc(old_conn_state->crtc) : NULL; 6093 struct intel_crtc *new_crtc = new_conn_state->crtc ? 6094 to_intel_crtc(new_conn_state->crtc) : NULL; 6095 6096 return new_crtc != old_crtc || 6097 (new_crtc && 6098 needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc))); 6099 } 6100 6101 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 6102 { 6103 struct drm_connector_state *old_conn_state; 6104 struct drm_connector_state *new_conn_state; 6105 struct drm_connector *conn; 6106 int i; 6107 6108 for_each_oldnew_connector_in_state(&state->base, conn, 6109 old_conn_state, new_conn_state, i) { 6110 struct intel_encoder *encoder; 6111 struct intel_crtc *crtc; 6112 6113 if (!intel_connector_needs_modeset(state, 6114 old_conn_state, 6115 new_conn_state)) 6116 continue; 6117 6118 encoder = intel_connector_primary_encoder(to_intel_connector(conn)); 6119 if (!encoder->update_prepare) 6120 continue; 6121 6122 crtc = new_conn_state->crtc ? 6123 to_intel_crtc(new_conn_state->crtc) : NULL; 6124 encoder->update_prepare(state, encoder, crtc); 6125 } 6126 } 6127 6128 static void intel_encoders_update_complete(struct intel_atomic_state *state) 6129 { 6130 struct drm_connector_state *old_conn_state; 6131 struct drm_connector_state *new_conn_state; 6132 struct drm_connector *conn; 6133 int i; 6134 6135 for_each_oldnew_connector_in_state(&state->base, conn, 6136 old_conn_state, new_conn_state, i) { 6137 struct intel_encoder *encoder; 6138 struct intel_crtc *crtc; 6139 6140 if (!intel_connector_needs_modeset(state, 6141 old_conn_state, 6142 new_conn_state)) 6143 continue; 6144 6145 encoder = intel_connector_primary_encoder(to_intel_connector(conn)); 6146 if (!encoder->update_complete) 6147 continue; 6148 6149 crtc = new_conn_state->crtc ? 6150 to_intel_crtc(new_conn_state->crtc) : NULL; 6151 encoder->update_complete(state, encoder, crtc); 6152 } 6153 } 6154 6155 static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc, 6156 struct intel_crtc_state *crtc_state, 6157 struct intel_atomic_state *state) 6158 { 6159 struct drm_connector_state *conn_state; 6160 struct drm_connector *conn; 6161 int i; 6162 6163 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6164 struct intel_encoder *encoder = 6165 to_intel_encoder(conn_state->best_encoder); 6166 6167 if (conn_state->crtc != &crtc->base) 6168 continue; 6169 6170 if (encoder->pre_pll_enable) 6171 encoder->pre_pll_enable(encoder, crtc_state, conn_state); 6172 } 6173 } 6174 6175 static void intel_encoders_pre_enable(struct intel_crtc *crtc, 6176 struct intel_crtc_state *crtc_state, 6177 struct intel_atomic_state *state) 6178 { 6179 struct drm_connector_state *conn_state; 6180 struct drm_connector *conn; 6181 int i; 6182 6183 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6184 struct intel_encoder *encoder = 6185 to_intel_encoder(conn_state->best_encoder); 6186 6187 if (conn_state->crtc != &crtc->base) 6188 continue; 6189 6190 if (encoder->pre_enable) 6191 encoder->pre_enable(encoder, crtc_state, conn_state); 6192 } 6193 } 6194 6195 static void intel_encoders_enable(struct intel_crtc *crtc, 6196 struct intel_crtc_state *crtc_state, 6197 struct intel_atomic_state *state) 6198 { 6199 struct drm_connector_state *conn_state; 6200 struct drm_connector *conn; 6201 int i; 6202 6203 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6204 struct intel_encoder *encoder = 6205 to_intel_encoder(conn_state->best_encoder); 6206 6207 if (conn_state->crtc != &crtc->base) 6208 continue; 6209 6210 if (encoder->enable) 6211 encoder->enable(encoder, crtc_state, conn_state); 6212 intel_opregion_notify_encoder(encoder, true); 6213 } 6214 } 6215 6216 static void intel_encoders_disable(struct intel_crtc *crtc, 6217 struct intel_crtc_state *old_crtc_state, 6218 struct intel_atomic_state *state) 6219 { 6220 struct drm_connector_state *old_conn_state; 6221 struct drm_connector *conn; 6222 int i; 6223 6224 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6225 struct intel_encoder *encoder = 6226 to_intel_encoder(old_conn_state->best_encoder); 6227 6228 if (old_conn_state->crtc != &crtc->base) 6229 continue; 6230 6231 intel_opregion_notify_encoder(encoder, false); 6232 if (encoder->disable) 6233 encoder->disable(encoder, old_crtc_state, old_conn_state); 6234 } 6235 } 6236 6237 static void intel_encoders_post_disable(struct intel_crtc *crtc, 6238 struct intel_crtc_state *old_crtc_state, 6239 struct intel_atomic_state *state) 6240 { 6241 struct drm_connector_state *old_conn_state; 6242 struct drm_connector *conn; 6243 int i; 6244 6245 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6246 struct intel_encoder *encoder = 6247 to_intel_encoder(old_conn_state->best_encoder); 6248 6249 if (old_conn_state->crtc != &crtc->base) 6250 continue; 6251 6252 if (encoder->post_disable) 6253 encoder->post_disable(encoder, old_crtc_state, old_conn_state); 6254 } 6255 } 6256 6257 static void intel_encoders_post_pll_disable(struct intel_crtc *crtc, 6258 struct intel_crtc_state *old_crtc_state, 6259 struct intel_atomic_state *state) 6260 { 6261 struct drm_connector_state *old_conn_state; 6262 struct drm_connector *conn; 6263 int i; 6264 6265 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6266 struct intel_encoder *encoder = 6267 to_intel_encoder(old_conn_state->best_encoder); 6268 6269 if (old_conn_state->crtc != &crtc->base) 6270 continue; 6271 6272 if (encoder->post_pll_disable) 6273 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state); 6274 } 6275 } 6276 6277 static void intel_encoders_update_pipe(struct intel_crtc *crtc, 6278 struct intel_crtc_state *crtc_state, 6279 struct intel_atomic_state *state) 6280 { 6281 struct drm_connector_state *conn_state; 6282 struct drm_connector *conn; 6283 int i; 6284 6285 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6286 struct intel_encoder *encoder = 6287 to_intel_encoder(conn_state->best_encoder); 6288 6289 if (conn_state->crtc != &crtc->base) 6290 continue; 6291 6292 if (encoder->update_pipe) 6293 encoder->update_pipe(encoder, crtc_state, conn_state); 6294 } 6295 } 6296 6297 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) 6298 { 6299 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 6300 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 6301 6302 plane->disable_plane(plane, crtc_state); 6303 } 6304 6305 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, 6306 struct intel_atomic_state *state) 6307 { 6308 struct drm_crtc *crtc = pipe_config->base.crtc; 6309 struct drm_device *dev = crtc->dev; 6310 struct drm_i915_private *dev_priv = to_i915(dev); 6311 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6312 int pipe = intel_crtc->pipe; 6313 6314 if (WARN_ON(intel_crtc->active)) 6315 return; 6316 6317 /* 6318 * Sometimes spurious CPU pipe underruns happen during FDI 6319 * training, at least with VGA+HDMI cloning. Suppress them. 6320 * 6321 * On ILK we get an occasional spurious CPU pipe underruns 6322 * between eDP port A enable and vdd enable. Also PCH port 6323 * enable seems to result in the occasional CPU pipe underrun. 6324 * 6325 * Spurious PCH underruns also occur during PCH enabling. 6326 */ 6327 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6328 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 6329 6330 if (pipe_config->has_pch_encoder) 6331 intel_prepare_shared_dpll(pipe_config); 6332 6333 if (intel_crtc_has_dp_encoder(pipe_config)) 6334 intel_dp_set_m_n(pipe_config, M1_N1); 6335 6336 intel_set_pipe_timings(pipe_config); 6337 intel_set_pipe_src_size(pipe_config); 6338 6339 if (pipe_config->has_pch_encoder) { 6340 intel_cpu_transcoder_set_m_n(pipe_config, 6341 &pipe_config->fdi_m_n, NULL); 6342 } 6343 6344 ironlake_set_pipeconf(pipe_config); 6345 6346 intel_crtc->active = true; 6347 6348 intel_encoders_pre_enable(intel_crtc, pipe_config, state); 6349 6350 if (pipe_config->has_pch_encoder) { 6351 /* Note: FDI PLL enabling _must_ be done before we enable the 6352 * cpu pipes, hence this is separate from all the other fdi/pch 6353 * enabling. */ 6354 ironlake_fdi_pll_enable(pipe_config); 6355 } else { 6356 assert_fdi_tx_disabled(dev_priv, pipe); 6357 assert_fdi_rx_disabled(dev_priv, pipe); 6358 } 6359 6360 ironlake_pfit_enable(pipe_config); 6361 6362 /* 6363 * On ILK+ LUT must be loaded before the pipe is running but with 6364 * clocks enabled 6365 */ 6366 intel_color_load_luts(pipe_config); 6367 intel_color_commit(pipe_config); 6368 /* update DSPCNTR to configure gamma for pipe bottom color */ 6369 intel_disable_primary_plane(pipe_config); 6370 6371 if (dev_priv->display.initial_watermarks != NULL) 6372 dev_priv->display.initial_watermarks(state, pipe_config); 6373 intel_enable_pipe(pipe_config); 6374 6375 if (pipe_config->has_pch_encoder) 6376 ironlake_pch_enable(state, pipe_config); 6377 6378 assert_vblank_disabled(crtc); 6379 intel_crtc_vblank_on(pipe_config); 6380 6381 intel_encoders_enable(intel_crtc, pipe_config, state); 6382 6383 if (HAS_PCH_CPT(dev_priv)) 6384 cpt_verify_modeset(dev, intel_crtc->pipe); 6385 6386 /* 6387 * Must wait for vblank to avoid spurious PCH FIFO underruns. 6388 * And a second vblank wait is needed at least on ILK with 6389 * some interlaced HDMI modes. Let's do the double wait always 6390 * in case there are more corner cases we don't know about. 6391 */ 6392 if (pipe_config->has_pch_encoder) { 6393 intel_wait_for_vblank(dev_priv, pipe); 6394 intel_wait_for_vblank(dev_priv, pipe); 6395 } 6396 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6397 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 6398 } 6399 6400 /* IPS only exists on ULT machines and is tied to pipe A. */ 6401 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 6402 { 6403 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A; 6404 } 6405 6406 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, 6407 enum pipe pipe, bool apply) 6408 { 6409 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe)); 6410 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 6411 6412 if (apply) 6413 val |= mask; 6414 else 6415 val &= ~mask; 6416 6417 I915_WRITE(CLKGATE_DIS_PSL(pipe), val); 6418 } 6419 6420 static void icl_pipe_mbus_enable(struct intel_crtc *crtc) 6421 { 6422 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6423 enum pipe pipe = crtc->pipe; 6424 u32 val; 6425 6426 val = MBUS_DBOX_A_CREDIT(2); 6427 6428 if (INTEL_GEN(dev_priv) >= 12) { 6429 val |= MBUS_DBOX_BW_CREDIT(2); 6430 val |= MBUS_DBOX_B_CREDIT(12); 6431 } else { 6432 val |= MBUS_DBOX_BW_CREDIT(1); 6433 val |= MBUS_DBOX_B_CREDIT(8); 6434 } 6435 6436 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val); 6437 } 6438 6439 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, 6440 struct intel_atomic_state *state) 6441 { 6442 struct drm_crtc *crtc = pipe_config->base.crtc; 6443 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 6444 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6445 int pipe = intel_crtc->pipe, hsw_workaround_pipe; 6446 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 6447 bool psl_clkgate_wa; 6448 6449 if (WARN_ON(intel_crtc->active)) 6450 return; 6451 6452 intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state); 6453 6454 if (pipe_config->shared_dpll) 6455 intel_enable_shared_dpll(pipe_config); 6456 6457 intel_encoders_pre_enable(intel_crtc, pipe_config, state); 6458 6459 if (intel_crtc_has_dp_encoder(pipe_config)) 6460 intel_dp_set_m_n(pipe_config, M1_N1); 6461 6462 if (!transcoder_is_dsi(cpu_transcoder)) 6463 intel_set_pipe_timings(pipe_config); 6464 6465 intel_set_pipe_src_size(pipe_config); 6466 6467 if (cpu_transcoder != TRANSCODER_EDP && 6468 !transcoder_is_dsi(cpu_transcoder)) { 6469 I915_WRITE(PIPE_MULT(cpu_transcoder), 6470 pipe_config->pixel_multiplier - 1); 6471 } 6472 6473 if (pipe_config->has_pch_encoder) { 6474 intel_cpu_transcoder_set_m_n(pipe_config, 6475 &pipe_config->fdi_m_n, NULL); 6476 } 6477 6478 if (!transcoder_is_dsi(cpu_transcoder)) 6479 haswell_set_pipeconf(pipe_config); 6480 6481 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 6482 bdw_set_pipemisc(pipe_config); 6483 6484 intel_crtc->active = true; 6485 6486 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */ 6487 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && 6488 pipe_config->pch_pfit.enabled; 6489 if (psl_clkgate_wa) 6490 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); 6491 6492 if (INTEL_GEN(dev_priv) >= 9) 6493 skylake_pfit_enable(pipe_config); 6494 else 6495 ironlake_pfit_enable(pipe_config); 6496 6497 /* 6498 * On ILK+ LUT must be loaded before the pipe is running but with 6499 * clocks enabled 6500 */ 6501 intel_color_load_luts(pipe_config); 6502 intel_color_commit(pipe_config); 6503 /* update DSPCNTR to configure gamma/csc for pipe bottom color */ 6504 if (INTEL_GEN(dev_priv) < 9) 6505 intel_disable_primary_plane(pipe_config); 6506 6507 if (INTEL_GEN(dev_priv) >= 11) 6508 icl_set_pipe_chicken(intel_crtc); 6509 6510 intel_ddi_set_pipe_settings(pipe_config); 6511 if (!transcoder_is_dsi(cpu_transcoder)) 6512 intel_ddi_enable_transcoder_func(pipe_config); 6513 6514 if (dev_priv->display.initial_watermarks != NULL) 6515 dev_priv->display.initial_watermarks(state, pipe_config); 6516 6517 if (INTEL_GEN(dev_priv) >= 11) 6518 icl_pipe_mbus_enable(intel_crtc); 6519 6520 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 6521 if (!transcoder_is_dsi(cpu_transcoder)) 6522 intel_enable_pipe(pipe_config); 6523 6524 if (pipe_config->has_pch_encoder) 6525 lpt_pch_enable(state, pipe_config); 6526 6527 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) 6528 intel_ddi_set_vc_payload_alloc(pipe_config, true); 6529 6530 assert_vblank_disabled(crtc); 6531 intel_crtc_vblank_on(pipe_config); 6532 6533 intel_encoders_enable(intel_crtc, pipe_config, state); 6534 6535 if (psl_clkgate_wa) { 6536 intel_wait_for_vblank(dev_priv, pipe); 6537 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); 6538 } 6539 6540 /* If we change the relative order between pipe/planes enabling, we need 6541 * to change the workaround. */ 6542 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; 6543 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 6544 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 6545 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 6546 } 6547 } 6548 6549 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state) 6550 { 6551 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 6552 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6553 enum pipe pipe = crtc->pipe; 6554 6555 /* To avoid upsetting the power well on haswell only disable the pfit if 6556 * it's in use. The hw state code will make sure we get this right. */ 6557 if (old_crtc_state->pch_pfit.enabled) { 6558 I915_WRITE(PF_CTL(pipe), 0); 6559 I915_WRITE(PF_WIN_POS(pipe), 0); 6560 I915_WRITE(PF_WIN_SZ(pipe), 0); 6561 } 6562 } 6563 6564 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, 6565 struct intel_atomic_state *state) 6566 { 6567 struct drm_crtc *crtc = old_crtc_state->base.crtc; 6568 struct drm_device *dev = crtc->dev; 6569 struct drm_i915_private *dev_priv = to_i915(dev); 6570 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6571 int pipe = intel_crtc->pipe; 6572 6573 /* 6574 * Sometimes spurious CPU pipe underruns happen when the 6575 * pipe is already disabled, but FDI RX/TX is still enabled. 6576 * Happens at least with VGA+HDMI cloning. Suppress them. 6577 */ 6578 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6579 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 6580 6581 intel_encoders_disable(intel_crtc, old_crtc_state, state); 6582 6583 drm_crtc_vblank_off(crtc); 6584 assert_vblank_disabled(crtc); 6585 6586 intel_disable_pipe(old_crtc_state); 6587 6588 ironlake_pfit_disable(old_crtc_state); 6589 6590 if (old_crtc_state->has_pch_encoder) 6591 ironlake_fdi_disable(crtc); 6592 6593 intel_encoders_post_disable(intel_crtc, old_crtc_state, state); 6594 6595 if (old_crtc_state->has_pch_encoder) { 6596 ironlake_disable_pch_transcoder(dev_priv, pipe); 6597 6598 if (HAS_PCH_CPT(dev_priv)) { 6599 i915_reg_t reg; 6600 u32 temp; 6601 6602 /* disable TRANS_DP_CTL */ 6603 reg = TRANS_DP_CTL(pipe); 6604 temp = I915_READ(reg); 6605 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 6606 TRANS_DP_PORT_SEL_MASK); 6607 temp |= TRANS_DP_PORT_SEL_NONE; 6608 I915_WRITE(reg, temp); 6609 6610 /* disable DPLL_SEL */ 6611 temp = I915_READ(PCH_DPLL_SEL); 6612 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 6613 I915_WRITE(PCH_DPLL_SEL, temp); 6614 } 6615 6616 ironlake_fdi_pll_disable(intel_crtc); 6617 } 6618 6619 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6620 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 6621 } 6622 6623 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, 6624 struct intel_atomic_state *state) 6625 { 6626 struct drm_crtc *crtc = old_crtc_state->base.crtc; 6627 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 6628 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6629 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 6630 6631 intel_encoders_disable(intel_crtc, old_crtc_state, state); 6632 6633 drm_crtc_vblank_off(crtc); 6634 assert_vblank_disabled(crtc); 6635 6636 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 6637 if (!transcoder_is_dsi(cpu_transcoder)) 6638 intel_disable_pipe(old_crtc_state); 6639 6640 if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) 6641 intel_ddi_set_vc_payload_alloc(old_crtc_state, false); 6642 6643 if (!transcoder_is_dsi(cpu_transcoder)) 6644 intel_ddi_disable_transcoder_func(old_crtc_state); 6645 6646 intel_dsc_disable(old_crtc_state); 6647 6648 if (INTEL_GEN(dev_priv) >= 9) 6649 skylake_scaler_disable(intel_crtc); 6650 else 6651 ironlake_pfit_disable(old_crtc_state); 6652 6653 intel_encoders_post_disable(intel_crtc, old_crtc_state, state); 6654 6655 intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state); 6656 } 6657 6658 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) 6659 { 6660 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 6661 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6662 6663 if (!crtc_state->gmch_pfit.control) 6664 return; 6665 6666 /* 6667 * The panel fitter should only be adjusted whilst the pipe is disabled, 6668 * according to register description and PRM. 6669 */ 6670 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 6671 assert_pipe_disabled(dev_priv, crtc->pipe); 6672 6673 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios); 6674 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control); 6675 6676 /* Border color in case we don't scale up to the full screen. Black by 6677 * default, change to something else for debugging. */ 6678 I915_WRITE(BCLRPAT(crtc->pipe), 0); 6679 } 6680 6681 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) 6682 { 6683 if (phy == PHY_NONE) 6684 return false; 6685 6686 if (IS_ELKHARTLAKE(dev_priv) || INTEL_GEN(dev_priv) >= 12) 6687 return phy <= PHY_C; 6688 6689 if (INTEL_GEN(dev_priv) >= 11) 6690 return phy <= PHY_B; 6691 6692 return false; 6693 } 6694 6695 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) 6696 { 6697 if (INTEL_GEN(dev_priv) >= 12) 6698 return phy >= PHY_D && phy <= PHY_I; 6699 6700 if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv)) 6701 return phy >= PHY_C && phy <= PHY_F; 6702 6703 return false; 6704 } 6705 6706 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) 6707 { 6708 if (IS_ELKHARTLAKE(i915) && port == PORT_D) 6709 return PHY_A; 6710 6711 return (enum phy)port; 6712 } 6713 6714 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) 6715 { 6716 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) 6717 return PORT_TC_NONE; 6718 6719 if (INTEL_GEN(dev_priv) >= 12) 6720 return port - PORT_D; 6721 6722 return port - PORT_C; 6723 } 6724 6725 enum intel_display_power_domain intel_port_to_power_domain(enum port port) 6726 { 6727 switch (port) { 6728 case PORT_A: 6729 return POWER_DOMAIN_PORT_DDI_A_LANES; 6730 case PORT_B: 6731 return POWER_DOMAIN_PORT_DDI_B_LANES; 6732 case PORT_C: 6733 return POWER_DOMAIN_PORT_DDI_C_LANES; 6734 case PORT_D: 6735 return POWER_DOMAIN_PORT_DDI_D_LANES; 6736 case PORT_E: 6737 return POWER_DOMAIN_PORT_DDI_E_LANES; 6738 case PORT_F: 6739 return POWER_DOMAIN_PORT_DDI_F_LANES; 6740 default: 6741 MISSING_CASE(port); 6742 return POWER_DOMAIN_PORT_OTHER; 6743 } 6744 } 6745 6746 enum intel_display_power_domain 6747 intel_aux_power_domain(struct intel_digital_port *dig_port) 6748 { 6749 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 6750 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 6751 6752 if (intel_phy_is_tc(dev_priv, phy) && 6753 dig_port->tc_mode == TC_PORT_TBT_ALT) { 6754 switch (dig_port->aux_ch) { 6755 case AUX_CH_C: 6756 return POWER_DOMAIN_AUX_TBT1; 6757 case AUX_CH_D: 6758 return POWER_DOMAIN_AUX_TBT2; 6759 case AUX_CH_E: 6760 return POWER_DOMAIN_AUX_TBT3; 6761 case AUX_CH_F: 6762 return POWER_DOMAIN_AUX_TBT4; 6763 default: 6764 MISSING_CASE(dig_port->aux_ch); 6765 return POWER_DOMAIN_AUX_TBT1; 6766 } 6767 } 6768 6769 switch (dig_port->aux_ch) { 6770 case AUX_CH_A: 6771 return POWER_DOMAIN_AUX_A; 6772 case AUX_CH_B: 6773 return POWER_DOMAIN_AUX_B; 6774 case AUX_CH_C: 6775 return POWER_DOMAIN_AUX_C; 6776 case AUX_CH_D: 6777 return POWER_DOMAIN_AUX_D; 6778 case AUX_CH_E: 6779 return POWER_DOMAIN_AUX_E; 6780 case AUX_CH_F: 6781 return POWER_DOMAIN_AUX_F; 6782 default: 6783 MISSING_CASE(dig_port->aux_ch); 6784 return POWER_DOMAIN_AUX_A; 6785 } 6786 } 6787 6788 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) 6789 { 6790 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 6791 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6792 struct drm_encoder *encoder; 6793 enum pipe pipe = crtc->pipe; 6794 u64 mask; 6795 enum transcoder transcoder = crtc_state->cpu_transcoder; 6796 6797 if (!crtc_state->base.active) 6798 return 0; 6799 6800 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe)); 6801 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder)); 6802 if (crtc_state->pch_pfit.enabled || 6803 crtc_state->pch_pfit.force_thru) 6804 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 6805 6806 drm_for_each_encoder_mask(encoder, &dev_priv->drm, 6807 crtc_state->base.encoder_mask) { 6808 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 6809 6810 mask |= BIT_ULL(intel_encoder->power_domain); 6811 } 6812 6813 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 6814 mask |= BIT_ULL(POWER_DOMAIN_AUDIO); 6815 6816 if (crtc_state->shared_dpll) 6817 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE); 6818 6819 return mask; 6820 } 6821 6822 static u64 6823 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state) 6824 { 6825 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 6826 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6827 enum intel_display_power_domain domain; 6828 u64 domains, new_domains, old_domains; 6829 6830 old_domains = crtc->enabled_power_domains; 6831 crtc->enabled_power_domains = new_domains = 6832 get_crtc_power_domains(crtc_state); 6833 6834 domains = new_domains & ~old_domains; 6835 6836 for_each_power_domain(domain, domains) 6837 intel_display_power_get(dev_priv, domain); 6838 6839 return old_domains & ~new_domains; 6840 } 6841 6842 static void modeset_put_power_domains(struct drm_i915_private *dev_priv, 6843 u64 domains) 6844 { 6845 enum intel_display_power_domain domain; 6846 6847 for_each_power_domain(domain, domains) 6848 intel_display_power_put_unchecked(dev_priv, domain); 6849 } 6850 6851 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, 6852 struct intel_atomic_state *state) 6853 { 6854 struct drm_crtc *crtc = pipe_config->base.crtc; 6855 struct drm_device *dev = crtc->dev; 6856 struct drm_i915_private *dev_priv = to_i915(dev); 6857 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6858 int pipe = intel_crtc->pipe; 6859 6860 if (WARN_ON(intel_crtc->active)) 6861 return; 6862 6863 if (intel_crtc_has_dp_encoder(pipe_config)) 6864 intel_dp_set_m_n(pipe_config, M1_N1); 6865 6866 intel_set_pipe_timings(pipe_config); 6867 intel_set_pipe_src_size(pipe_config); 6868 6869 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 6870 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 6871 I915_WRITE(CHV_CANVAS(pipe), 0); 6872 } 6873 6874 i9xx_set_pipeconf(pipe_config); 6875 6876 intel_crtc->active = true; 6877 6878 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6879 6880 intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state); 6881 6882 if (IS_CHERRYVIEW(dev_priv)) { 6883 chv_prepare_pll(intel_crtc, pipe_config); 6884 chv_enable_pll(intel_crtc, pipe_config); 6885 } else { 6886 vlv_prepare_pll(intel_crtc, pipe_config); 6887 vlv_enable_pll(intel_crtc, pipe_config); 6888 } 6889 6890 intel_encoders_pre_enable(intel_crtc, pipe_config, state); 6891 6892 i9xx_pfit_enable(pipe_config); 6893 6894 intel_color_load_luts(pipe_config); 6895 intel_color_commit(pipe_config); 6896 /* update DSPCNTR to configure gamma for pipe bottom color */ 6897 intel_disable_primary_plane(pipe_config); 6898 6899 dev_priv->display.initial_watermarks(state, pipe_config); 6900 intel_enable_pipe(pipe_config); 6901 6902 assert_vblank_disabled(crtc); 6903 intel_crtc_vblank_on(pipe_config); 6904 6905 intel_encoders_enable(intel_crtc, pipe_config, state); 6906 } 6907 6908 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state) 6909 { 6910 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 6911 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6912 6913 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0); 6914 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1); 6915 } 6916 6917 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, 6918 struct intel_atomic_state *state) 6919 { 6920 struct drm_crtc *crtc = pipe_config->base.crtc; 6921 struct drm_device *dev = crtc->dev; 6922 struct drm_i915_private *dev_priv = to_i915(dev); 6923 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6924 enum pipe pipe = intel_crtc->pipe; 6925 6926 if (WARN_ON(intel_crtc->active)) 6927 return; 6928 6929 i9xx_set_pll_dividers(pipe_config); 6930 6931 if (intel_crtc_has_dp_encoder(pipe_config)) 6932 intel_dp_set_m_n(pipe_config, M1_N1); 6933 6934 intel_set_pipe_timings(pipe_config); 6935 intel_set_pipe_src_size(pipe_config); 6936 6937 i9xx_set_pipeconf(pipe_config); 6938 6939 intel_crtc->active = true; 6940 6941 if (!IS_GEN(dev_priv, 2)) 6942 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6943 6944 intel_encoders_pre_enable(intel_crtc, pipe_config, state); 6945 6946 i9xx_enable_pll(intel_crtc, pipe_config); 6947 6948 i9xx_pfit_enable(pipe_config); 6949 6950 intel_color_load_luts(pipe_config); 6951 intel_color_commit(pipe_config); 6952 /* update DSPCNTR to configure gamma for pipe bottom color */ 6953 intel_disable_primary_plane(pipe_config); 6954 6955 if (dev_priv->display.initial_watermarks != NULL) 6956 dev_priv->display.initial_watermarks(state, 6957 pipe_config); 6958 else 6959 intel_update_watermarks(intel_crtc); 6960 intel_enable_pipe(pipe_config); 6961 6962 assert_vblank_disabled(crtc); 6963 intel_crtc_vblank_on(pipe_config); 6964 6965 intel_encoders_enable(intel_crtc, pipe_config, state); 6966 } 6967 6968 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) 6969 { 6970 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 6971 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6972 6973 if (!old_crtc_state->gmch_pfit.control) 6974 return; 6975 6976 assert_pipe_disabled(dev_priv, crtc->pipe); 6977 6978 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n", 6979 I915_READ(PFIT_CONTROL)); 6980 I915_WRITE(PFIT_CONTROL, 0); 6981 } 6982 6983 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, 6984 struct intel_atomic_state *state) 6985 { 6986 struct drm_crtc *crtc = old_crtc_state->base.crtc; 6987 struct drm_device *dev = crtc->dev; 6988 struct drm_i915_private *dev_priv = to_i915(dev); 6989 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6990 int pipe = intel_crtc->pipe; 6991 6992 /* 6993 * On gen2 planes are double buffered but the pipe isn't, so we must 6994 * wait for planes to fully turn off before disabling the pipe. 6995 */ 6996 if (IS_GEN(dev_priv, 2)) 6997 intel_wait_for_vblank(dev_priv, pipe); 6998 6999 intel_encoders_disable(intel_crtc, old_crtc_state, state); 7000 7001 drm_crtc_vblank_off(crtc); 7002 assert_vblank_disabled(crtc); 7003 7004 intel_disable_pipe(old_crtc_state); 7005 7006 i9xx_pfit_disable(old_crtc_state); 7007 7008 intel_encoders_post_disable(intel_crtc, old_crtc_state, state); 7009 7010 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 7011 if (IS_CHERRYVIEW(dev_priv)) 7012 chv_disable_pll(dev_priv, pipe); 7013 else if (IS_VALLEYVIEW(dev_priv)) 7014 vlv_disable_pll(dev_priv, pipe); 7015 else 7016 i9xx_disable_pll(old_crtc_state); 7017 } 7018 7019 intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state); 7020 7021 if (!IS_GEN(dev_priv, 2)) 7022 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 7023 7024 if (!dev_priv->display.initial_watermarks) 7025 intel_update_watermarks(intel_crtc); 7026 7027 /* clock the pipe down to 640x480@60 to potentially save power */ 7028 if (IS_I830(dev_priv)) 7029 i830_enable_pipe(dev_priv, pipe); 7030 } 7031 7032 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, 7033 struct drm_modeset_acquire_ctx *ctx) 7034 { 7035 struct intel_encoder *encoder; 7036 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7037 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 7038 struct intel_bw_state *bw_state = 7039 to_intel_bw_state(dev_priv->bw_obj.state); 7040 enum intel_display_power_domain domain; 7041 struct intel_plane *plane; 7042 u64 domains; 7043 struct drm_atomic_state *state; 7044 struct intel_crtc_state *crtc_state; 7045 int ret; 7046 7047 if (!intel_crtc->active) 7048 return; 7049 7050 for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) { 7051 const struct intel_plane_state *plane_state = 7052 to_intel_plane_state(plane->base.state); 7053 7054 if (plane_state->base.visible) 7055 intel_plane_disable_noatomic(intel_crtc, plane); 7056 } 7057 7058 state = drm_atomic_state_alloc(crtc->dev); 7059 if (!state) { 7060 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory", 7061 crtc->base.id, crtc->name); 7062 return; 7063 } 7064 7065 state->acquire_ctx = ctx; 7066 7067 /* Everything's already locked, -EDEADLK can't happen. */ 7068 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 7069 ret = drm_atomic_add_affected_connectors(state, crtc); 7070 7071 WARN_ON(IS_ERR(crtc_state) || ret); 7072 7073 dev_priv->display.crtc_disable(crtc_state, to_intel_atomic_state(state)); 7074 7075 drm_atomic_state_put(state); 7076 7077 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", 7078 crtc->base.id, crtc->name); 7079 7080 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0); 7081 crtc->state->active = false; 7082 intel_crtc->active = false; 7083 crtc->enabled = false; 7084 crtc->state->connector_mask = 0; 7085 crtc->state->encoder_mask = 0; 7086 7087 for_each_encoder_on_crtc(crtc->dev, crtc, encoder) 7088 encoder->base.crtc = NULL; 7089 7090 intel_fbc_disable(intel_crtc); 7091 intel_update_watermarks(intel_crtc); 7092 intel_disable_shared_dpll(to_intel_crtc_state(crtc->state)); 7093 7094 domains = intel_crtc->enabled_power_domains; 7095 for_each_power_domain(domain, domains) 7096 intel_display_power_put_unchecked(dev_priv, domain); 7097 intel_crtc->enabled_power_domains = 0; 7098 7099 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe); 7100 dev_priv->min_cdclk[intel_crtc->pipe] = 0; 7101 dev_priv->min_voltage_level[intel_crtc->pipe] = 0; 7102 7103 bw_state->data_rate[intel_crtc->pipe] = 0; 7104 bw_state->num_active_planes[intel_crtc->pipe] = 0; 7105 } 7106 7107 /* 7108 * turn all crtc's off, but do not adjust state 7109 * This has to be paired with a call to intel_modeset_setup_hw_state. 7110 */ 7111 int intel_display_suspend(struct drm_device *dev) 7112 { 7113 struct drm_i915_private *dev_priv = to_i915(dev); 7114 struct drm_atomic_state *state; 7115 int ret; 7116 7117 state = drm_atomic_helper_suspend(dev); 7118 ret = PTR_ERR_OR_ZERO(state); 7119 if (ret) 7120 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 7121 else 7122 dev_priv->modeset_restore_state = state; 7123 return ret; 7124 } 7125 7126 void intel_encoder_destroy(struct drm_encoder *encoder) 7127 { 7128 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 7129 7130 drm_encoder_cleanup(encoder); 7131 kfree(intel_encoder); 7132 } 7133 7134 /* Cross check the actual hw state with our own modeset state tracking (and it's 7135 * internal consistency). */ 7136 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, 7137 struct drm_connector_state *conn_state) 7138 { 7139 struct intel_connector *connector = to_intel_connector(conn_state->connector); 7140 7141 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 7142 connector->base.base.id, 7143 connector->base.name); 7144 7145 if (connector->get_hw_state(connector)) { 7146 struct intel_encoder *encoder = connector->encoder; 7147 7148 I915_STATE_WARN(!crtc_state, 7149 "connector enabled without attached crtc\n"); 7150 7151 if (!crtc_state) 7152 return; 7153 7154 I915_STATE_WARN(!crtc_state->base.active, 7155 "connector is active, but attached crtc isn't\n"); 7156 7157 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 7158 return; 7159 7160 I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 7161 "atomic encoder doesn't match attached encoder\n"); 7162 7163 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 7164 "attached encoder crtc differs from connector crtc\n"); 7165 } else { 7166 I915_STATE_WARN(crtc_state && crtc_state->base.active, 7167 "attached crtc is active, but connector isn't\n"); 7168 I915_STATE_WARN(!crtc_state && conn_state->best_encoder, 7169 "best encoder set without crtc!\n"); 7170 } 7171 } 7172 7173 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 7174 { 7175 if (crtc_state->base.enable && crtc_state->has_pch_encoder) 7176 return crtc_state->fdi_lanes; 7177 7178 return 0; 7179 } 7180 7181 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, 7182 struct intel_crtc_state *pipe_config) 7183 { 7184 struct drm_i915_private *dev_priv = to_i915(dev); 7185 struct drm_atomic_state *state = pipe_config->base.state; 7186 struct intel_crtc *other_crtc; 7187 struct intel_crtc_state *other_crtc_state; 7188 7189 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", 7190 pipe_name(pipe), pipe_config->fdi_lanes); 7191 if (pipe_config->fdi_lanes > 4) { 7192 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", 7193 pipe_name(pipe), pipe_config->fdi_lanes); 7194 return -EINVAL; 7195 } 7196 7197 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 7198 if (pipe_config->fdi_lanes > 2) { 7199 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 7200 pipe_config->fdi_lanes); 7201 return -EINVAL; 7202 } else { 7203 return 0; 7204 } 7205 } 7206 7207 if (INTEL_INFO(dev_priv)->num_pipes == 2) 7208 return 0; 7209 7210 /* Ivybridge 3 pipe is really complicated */ 7211 switch (pipe) { 7212 case PIPE_A: 7213 return 0; 7214 case PIPE_B: 7215 if (pipe_config->fdi_lanes <= 2) 7216 return 0; 7217 7218 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C); 7219 other_crtc_state = 7220 intel_atomic_get_crtc_state(state, other_crtc); 7221 if (IS_ERR(other_crtc_state)) 7222 return PTR_ERR(other_crtc_state); 7223 7224 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 7225 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 7226 pipe_name(pipe), pipe_config->fdi_lanes); 7227 return -EINVAL; 7228 } 7229 return 0; 7230 case PIPE_C: 7231 if (pipe_config->fdi_lanes > 2) { 7232 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", 7233 pipe_name(pipe), pipe_config->fdi_lanes); 7234 return -EINVAL; 7235 } 7236 7237 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B); 7238 other_crtc_state = 7239 intel_atomic_get_crtc_state(state, other_crtc); 7240 if (IS_ERR(other_crtc_state)) 7241 return PTR_ERR(other_crtc_state); 7242 7243 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 7244 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 7245 return -EINVAL; 7246 } 7247 return 0; 7248 default: 7249 BUG(); 7250 } 7251 } 7252 7253 #define RETRY 1 7254 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, 7255 struct intel_crtc_state *pipe_config) 7256 { 7257 struct drm_device *dev = intel_crtc->base.dev; 7258 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 7259 int lane, link_bw, fdi_dotclock, ret; 7260 bool needs_recompute = false; 7261 7262 retry: 7263 /* FDI is a binary signal running at ~2.7GHz, encoding 7264 * each output octet as 10 bits. The actual frequency 7265 * is stored as a divider into a 100MHz clock, and the 7266 * mode pixel clock is stored in units of 1KHz. 7267 * Hence the bw of each lane in terms of the mode signal 7268 * is: 7269 */ 7270 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config); 7271 7272 fdi_dotclock = adjusted_mode->crtc_clock; 7273 7274 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 7275 pipe_config->pipe_bpp); 7276 7277 pipe_config->fdi_lanes = lane; 7278 7279 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 7280 link_bw, &pipe_config->fdi_m_n, false); 7281 7282 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 7283 if (ret == -EDEADLK) 7284 return ret; 7285 7286 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 7287 pipe_config->pipe_bpp -= 2*3; 7288 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 7289 pipe_config->pipe_bpp); 7290 needs_recompute = true; 7291 pipe_config->bw_constrained = true; 7292 7293 goto retry; 7294 } 7295 7296 if (needs_recompute) 7297 return RETRY; 7298 7299 return ret; 7300 } 7301 7302 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) 7303 { 7304 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 7305 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7306 7307 /* IPS only exists on ULT machines and is tied to pipe A. */ 7308 if (!hsw_crtc_supports_ips(crtc)) 7309 return false; 7310 7311 if (!i915_modparams.enable_ips) 7312 return false; 7313 7314 if (crtc_state->pipe_bpp > 24) 7315 return false; 7316 7317 /* 7318 * We compare against max which means we must take 7319 * the increased cdclk requirement into account when 7320 * calculating the new cdclk. 7321 * 7322 * Should measure whether using a lower cdclk w/o IPS 7323 */ 7324 if (IS_BROADWELL(dev_priv) && 7325 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100) 7326 return false; 7327 7328 return true; 7329 } 7330 7331 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state) 7332 { 7333 struct drm_i915_private *dev_priv = 7334 to_i915(crtc_state->base.crtc->dev); 7335 struct intel_atomic_state *intel_state = 7336 to_intel_atomic_state(crtc_state->base.state); 7337 7338 if (!hsw_crtc_state_ips_capable(crtc_state)) 7339 return false; 7340 7341 /* 7342 * When IPS gets enabled, the pipe CRC changes. Since IPS gets 7343 * enabled and disabled dynamically based on package C states, 7344 * user space can't make reliable use of the CRCs, so let's just 7345 * completely disable it. 7346 */ 7347 if (crtc_state->crc_enabled) 7348 return false; 7349 7350 /* IPS should be fine as long as at least one plane is enabled. */ 7351 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))) 7352 return false; 7353 7354 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 7355 if (IS_BROADWELL(dev_priv) && 7356 crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100) 7357 return false; 7358 7359 return true; 7360 } 7361 7362 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 7363 { 7364 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7365 7366 /* GDG double wide on either pipe, otherwise pipe A only */ 7367 return INTEL_GEN(dev_priv) < 4 && 7368 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 7369 } 7370 7371 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) 7372 { 7373 u32 pixel_rate; 7374 7375 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock; 7376 7377 /* 7378 * We only use IF-ID interlacing. If we ever use 7379 * PF-ID we'll need to adjust the pixel_rate here. 7380 */ 7381 7382 if (pipe_config->pch_pfit.enabled) { 7383 u64 pipe_w, pipe_h, pfit_w, pfit_h; 7384 u32 pfit_size = pipe_config->pch_pfit.size; 7385 7386 pipe_w = pipe_config->pipe_src_w; 7387 pipe_h = pipe_config->pipe_src_h; 7388 7389 pfit_w = (pfit_size >> 16) & 0xFFFF; 7390 pfit_h = pfit_size & 0xFFFF; 7391 if (pipe_w < pfit_w) 7392 pipe_w = pfit_w; 7393 if (pipe_h < pfit_h) 7394 pipe_h = pfit_h; 7395 7396 if (WARN_ON(!pfit_w || !pfit_h)) 7397 return pixel_rate; 7398 7399 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h), 7400 pfit_w * pfit_h); 7401 } 7402 7403 return pixel_rate; 7404 } 7405 7406 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 7407 { 7408 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 7409 7410 if (HAS_GMCH(dev_priv)) 7411 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 7412 crtc_state->pixel_rate = 7413 crtc_state->base.adjusted_mode.crtc_clock; 7414 else 7415 crtc_state->pixel_rate = 7416 ilk_pipe_pixel_rate(crtc_state); 7417 } 7418 7419 static int intel_crtc_compute_config(struct intel_crtc *crtc, 7420 struct intel_crtc_state *pipe_config) 7421 { 7422 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7423 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 7424 int clock_limit = dev_priv->max_dotclk_freq; 7425 7426 if (INTEL_GEN(dev_priv) < 4) { 7427 clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 7428 7429 /* 7430 * Enable double wide mode when the dot clock 7431 * is > 90% of the (display) core speed. 7432 */ 7433 if (intel_crtc_supports_double_wide(crtc) && 7434 adjusted_mode->crtc_clock > clock_limit) { 7435 clock_limit = dev_priv->max_dotclk_freq; 7436 pipe_config->double_wide = true; 7437 } 7438 } 7439 7440 if (adjusted_mode->crtc_clock > clock_limit) { 7441 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 7442 adjusted_mode->crtc_clock, clock_limit, 7443 yesno(pipe_config->double_wide)); 7444 return -EINVAL; 7445 } 7446 7447 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 7448 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) && 7449 pipe_config->base.ctm) { 7450 /* 7451 * There is only one pipe CSC unit per pipe, and we need that 7452 * for output conversion from RGB->YCBCR. So if CTM is already 7453 * applied we can't support YCBCR420 output. 7454 */ 7455 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n"); 7456 return -EINVAL; 7457 } 7458 7459 /* 7460 * Pipe horizontal size must be even in: 7461 * - DVO ganged mode 7462 * - LVDS dual channel mode 7463 * - Double wide pipe 7464 */ 7465 if (pipe_config->pipe_src_w & 1) { 7466 if (pipe_config->double_wide) { 7467 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n"); 7468 return -EINVAL; 7469 } 7470 7471 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && 7472 intel_is_dual_link_lvds(dev_priv)) { 7473 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n"); 7474 return -EINVAL; 7475 } 7476 } 7477 7478 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 7479 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 7480 */ 7481 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) && 7482 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay) 7483 return -EINVAL; 7484 7485 intel_crtc_compute_pixel_rate(pipe_config); 7486 7487 if (pipe_config->has_pch_encoder) 7488 return ironlake_fdi_compute_config(crtc, pipe_config); 7489 7490 return 0; 7491 } 7492 7493 static void 7494 intel_reduce_m_n_ratio(u32 *num, u32 *den) 7495 { 7496 while (*num > DATA_LINK_M_N_MASK || 7497 *den > DATA_LINK_M_N_MASK) { 7498 *num >>= 1; 7499 *den >>= 1; 7500 } 7501 } 7502 7503 static void compute_m_n(unsigned int m, unsigned int n, 7504 u32 *ret_m, u32 *ret_n, 7505 bool constant_n) 7506 { 7507 /* 7508 * Several DP dongles in particular seem to be fussy about 7509 * too large link M/N values. Give N value as 0x8000 that 7510 * should be acceptable by specific devices. 0x8000 is the 7511 * specified fixed N value for asynchronous clock mode, 7512 * which the devices expect also in synchronous clock mode. 7513 */ 7514 if (constant_n) 7515 *ret_n = 0x8000; 7516 else 7517 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 7518 7519 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 7520 intel_reduce_m_n_ratio(ret_m, ret_n); 7521 } 7522 7523 void 7524 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, 7525 int pixel_clock, int link_clock, 7526 struct intel_link_m_n *m_n, 7527 bool constant_n) 7528 { 7529 m_n->tu = 64; 7530 7531 compute_m_n(bits_per_pixel * pixel_clock, 7532 link_clock * nlanes * 8, 7533 &m_n->gmch_m, &m_n->gmch_n, 7534 constant_n); 7535 7536 compute_m_n(pixel_clock, link_clock, 7537 &m_n->link_m, &m_n->link_n, 7538 constant_n); 7539 } 7540 7541 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 7542 { 7543 if (i915_modparams.panel_use_ssc >= 0) 7544 return i915_modparams.panel_use_ssc != 0; 7545 return dev_priv->vbt.lvds_use_ssc 7546 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 7547 } 7548 7549 static u32 pnv_dpll_compute_fp(struct dpll *dpll) 7550 { 7551 return (1 << dpll->n) << 16 | dpll->m2; 7552 } 7553 7554 static u32 i9xx_dpll_compute_fp(struct dpll *dpll) 7555 { 7556 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 7557 } 7558 7559 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 7560 struct intel_crtc_state *crtc_state, 7561 struct dpll *reduced_clock) 7562 { 7563 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7564 u32 fp, fp2 = 0; 7565 7566 if (IS_PINEVIEW(dev_priv)) { 7567 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 7568 if (reduced_clock) 7569 fp2 = pnv_dpll_compute_fp(reduced_clock); 7570 } else { 7571 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 7572 if (reduced_clock) 7573 fp2 = i9xx_dpll_compute_fp(reduced_clock); 7574 } 7575 7576 crtc_state->dpll_hw_state.fp0 = fp; 7577 7578 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 7579 reduced_clock) { 7580 crtc_state->dpll_hw_state.fp1 = fp2; 7581 } else { 7582 crtc_state->dpll_hw_state.fp1 = fp; 7583 } 7584 } 7585 7586 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe 7587 pipe) 7588 { 7589 u32 reg_val; 7590 7591 /* 7592 * PLLB opamp always calibrates to max value of 0x3f, force enable it 7593 * and set it to a reasonable value instead. 7594 */ 7595 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 7596 reg_val &= 0xffffff00; 7597 reg_val |= 0x00000030; 7598 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 7599 7600 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 7601 reg_val &= 0x00ffffff; 7602 reg_val |= 0x8c000000; 7603 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 7604 7605 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 7606 reg_val &= 0xffffff00; 7607 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 7608 7609 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 7610 reg_val &= 0x00ffffff; 7611 reg_val |= 0xb0000000; 7612 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 7613 } 7614 7615 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 7616 const struct intel_link_m_n *m_n) 7617 { 7618 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 7619 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7620 enum pipe pipe = crtc->pipe; 7621 7622 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7623 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 7624 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); 7625 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 7626 } 7627 7628 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, 7629 enum transcoder transcoder) 7630 { 7631 if (IS_HASWELL(dev_priv)) 7632 return transcoder == TRANSCODER_EDP; 7633 7634 /* 7635 * Strictly speaking some registers are available before 7636 * gen7, but we only support DRRS on gen7+ 7637 */ 7638 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv); 7639 } 7640 7641 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 7642 const struct intel_link_m_n *m_n, 7643 const struct intel_link_m_n *m2_n2) 7644 { 7645 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 7646 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7647 enum pipe pipe = crtc->pipe; 7648 enum transcoder transcoder = crtc_state->cpu_transcoder; 7649 7650 if (INTEL_GEN(dev_priv) >= 5) { 7651 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 7652 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 7653 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 7654 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 7655 /* 7656 * M2_N2 registers are set only if DRRS is supported 7657 * (to make sure the registers are not unnecessarily accessed). 7658 */ 7659 if (m2_n2 && crtc_state->has_drrs && 7660 transcoder_has_m2_n2(dev_priv, transcoder)) { 7661 I915_WRITE(PIPE_DATA_M2(transcoder), 7662 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 7663 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 7664 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); 7665 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); 7666 } 7667 } else { 7668 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7669 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 7670 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); 7671 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); 7672 } 7673 } 7674 7675 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n) 7676 { 7677 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 7678 7679 if (m_n == M1_N1) { 7680 dp_m_n = &crtc_state->dp_m_n; 7681 dp_m2_n2 = &crtc_state->dp_m2_n2; 7682 } else if (m_n == M2_N2) { 7683 7684 /* 7685 * M2_N2 registers are not supported. Hence m2_n2 divider value 7686 * needs to be programmed into M1_N1. 7687 */ 7688 dp_m_n = &crtc_state->dp_m2_n2; 7689 } else { 7690 DRM_ERROR("Unsupported divider value\n"); 7691 return; 7692 } 7693 7694 if (crtc_state->has_pch_encoder) 7695 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n); 7696 else 7697 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2); 7698 } 7699 7700 static void vlv_compute_dpll(struct intel_crtc *crtc, 7701 struct intel_crtc_state *pipe_config) 7702 { 7703 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | 7704 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 7705 if (crtc->pipe != PIPE_A) 7706 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7707 7708 /* DPLL not used with DSI, but still need the rest set up */ 7709 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 7710 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | 7711 DPLL_EXT_BUFFER_ENABLE_VLV; 7712 7713 pipe_config->dpll_hw_state.dpll_md = 7714 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7715 } 7716 7717 static void chv_compute_dpll(struct intel_crtc *crtc, 7718 struct intel_crtc_state *pipe_config) 7719 { 7720 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | 7721 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 7722 if (crtc->pipe != PIPE_A) 7723 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7724 7725 /* DPLL not used with DSI, but still need the rest set up */ 7726 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 7727 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; 7728 7729 pipe_config->dpll_hw_state.dpll_md = 7730 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7731 } 7732 7733 static void vlv_prepare_pll(struct intel_crtc *crtc, 7734 const struct intel_crtc_state *pipe_config) 7735 { 7736 struct drm_device *dev = crtc->base.dev; 7737 struct drm_i915_private *dev_priv = to_i915(dev); 7738 enum pipe pipe = crtc->pipe; 7739 u32 mdiv; 7740 u32 bestn, bestm1, bestm2, bestp1, bestp2; 7741 u32 coreclk, reg_val; 7742 7743 /* Enable Refclk */ 7744 I915_WRITE(DPLL(pipe), 7745 pipe_config->dpll_hw_state.dpll & 7746 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); 7747 7748 /* No need to actually set up the DPLL with DSI */ 7749 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7750 return; 7751 7752 vlv_dpio_get(dev_priv); 7753 7754 bestn = pipe_config->dpll.n; 7755 bestm1 = pipe_config->dpll.m1; 7756 bestm2 = pipe_config->dpll.m2; 7757 bestp1 = pipe_config->dpll.p1; 7758 bestp2 = pipe_config->dpll.p2; 7759 7760 /* See eDP HDMI DPIO driver vbios notes doc */ 7761 7762 /* PLL B needs special handling */ 7763 if (pipe == PIPE_B) 7764 vlv_pllb_recal_opamp(dev_priv, pipe); 7765 7766 /* Set up Tx target for periodic Rcomp update */ 7767 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 7768 7769 /* Disable target IRef on PLL */ 7770 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 7771 reg_val &= 0x00ffffff; 7772 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 7773 7774 /* Disable fast lock */ 7775 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 7776 7777 /* Set idtafcrecal before PLL is enabled */ 7778 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 7779 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 7780 mdiv |= ((bestn << DPIO_N_SHIFT)); 7781 mdiv |= (1 << DPIO_K_SHIFT); 7782 7783 /* 7784 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 7785 * but we don't support that). 7786 * Note: don't use the DAC post divider as it seems unstable. 7787 */ 7788 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 7789 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 7790 7791 mdiv |= DPIO_ENABLE_CALIBRATION; 7792 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 7793 7794 /* Set HBR and RBR LPF coefficients */ 7795 if (pipe_config->port_clock == 162000 || 7796 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) || 7797 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) 7798 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7799 0x009f0003); 7800 else 7801 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7802 0x00d0000f); 7803 7804 if (intel_crtc_has_dp_encoder(pipe_config)) { 7805 /* Use SSC source */ 7806 if (pipe == PIPE_A) 7807 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7808 0x0df40000); 7809 else 7810 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7811 0x0df70000); 7812 } else { /* HDMI or VGA */ 7813 /* Use bend source */ 7814 if (pipe == PIPE_A) 7815 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7816 0x0df70000); 7817 else 7818 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7819 0x0df40000); 7820 } 7821 7822 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 7823 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 7824 if (intel_crtc_has_dp_encoder(pipe_config)) 7825 coreclk |= 0x01000000; 7826 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 7827 7828 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 7829 7830 vlv_dpio_put(dev_priv); 7831 } 7832 7833 static void chv_prepare_pll(struct intel_crtc *crtc, 7834 const struct intel_crtc_state *pipe_config) 7835 { 7836 struct drm_device *dev = crtc->base.dev; 7837 struct drm_i915_private *dev_priv = to_i915(dev); 7838 enum pipe pipe = crtc->pipe; 7839 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7840 u32 loopfilter, tribuf_calcntr; 7841 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 7842 u32 dpio_val; 7843 int vco; 7844 7845 /* Enable Refclk and SSC */ 7846 I915_WRITE(DPLL(pipe), 7847 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 7848 7849 /* No need to actually set up the DPLL with DSI */ 7850 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7851 return; 7852 7853 bestn = pipe_config->dpll.n; 7854 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 7855 bestm1 = pipe_config->dpll.m1; 7856 bestm2 = pipe_config->dpll.m2 >> 22; 7857 bestp1 = pipe_config->dpll.p1; 7858 bestp2 = pipe_config->dpll.p2; 7859 vco = pipe_config->dpll.vco; 7860 dpio_val = 0; 7861 loopfilter = 0; 7862 7863 vlv_dpio_get(dev_priv); 7864 7865 /* p1 and p2 divider */ 7866 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 7867 5 << DPIO_CHV_S1_DIV_SHIFT | 7868 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 7869 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 7870 1 << DPIO_CHV_K_DIV_SHIFT); 7871 7872 /* Feedback post-divider - m2 */ 7873 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 7874 7875 /* Feedback refclk divider - n and m1 */ 7876 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 7877 DPIO_CHV_M1_DIV_BY_2 | 7878 1 << DPIO_CHV_N_DIV_SHIFT); 7879 7880 /* M2 fraction division */ 7881 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 7882 7883 /* M2 fraction division enable */ 7884 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 7885 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 7886 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 7887 if (bestm2_frac) 7888 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 7889 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 7890 7891 /* Program digital lock detect threshold */ 7892 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 7893 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 7894 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 7895 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 7896 if (!bestm2_frac) 7897 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 7898 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 7899 7900 /* Loop filter */ 7901 if (vco == 5400000) { 7902 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 7903 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 7904 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 7905 tribuf_calcntr = 0x9; 7906 } else if (vco <= 6200000) { 7907 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 7908 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 7909 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7910 tribuf_calcntr = 0x9; 7911 } else if (vco <= 6480000) { 7912 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 7913 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 7914 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7915 tribuf_calcntr = 0x8; 7916 } else { 7917 /* Not supported. Apply the same limits as in the max case */ 7918 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 7919 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 7920 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7921 tribuf_calcntr = 0; 7922 } 7923 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 7924 7925 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 7926 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 7927 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 7928 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 7929 7930 /* AFC Recal */ 7931 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 7932 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 7933 DPIO_AFC_RECAL); 7934 7935 vlv_dpio_put(dev_priv); 7936 } 7937 7938 /** 7939 * vlv_force_pll_on - forcibly enable just the PLL 7940 * @dev_priv: i915 private structure 7941 * @pipe: pipe PLL to enable 7942 * @dpll: PLL configuration 7943 * 7944 * Enable the PLL for @pipe using the supplied @dpll config. To be used 7945 * in cases where we need the PLL enabled even when @pipe is not going to 7946 * be enabled. 7947 */ 7948 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, 7949 const struct dpll *dpll) 7950 { 7951 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 7952 struct intel_crtc_state *pipe_config; 7953 7954 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 7955 if (!pipe_config) 7956 return -ENOMEM; 7957 7958 pipe_config->base.crtc = &crtc->base; 7959 pipe_config->pixel_multiplier = 1; 7960 pipe_config->dpll = *dpll; 7961 7962 if (IS_CHERRYVIEW(dev_priv)) { 7963 chv_compute_dpll(crtc, pipe_config); 7964 chv_prepare_pll(crtc, pipe_config); 7965 chv_enable_pll(crtc, pipe_config); 7966 } else { 7967 vlv_compute_dpll(crtc, pipe_config); 7968 vlv_prepare_pll(crtc, pipe_config); 7969 vlv_enable_pll(crtc, pipe_config); 7970 } 7971 7972 kfree(pipe_config); 7973 7974 return 0; 7975 } 7976 7977 /** 7978 * vlv_force_pll_off - forcibly disable just the PLL 7979 * @dev_priv: i915 private structure 7980 * @pipe: pipe PLL to disable 7981 * 7982 * Disable the PLL for @pipe. To be used in cases where we need 7983 * the PLL enabled even when @pipe is not going to be enabled. 7984 */ 7985 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) 7986 { 7987 if (IS_CHERRYVIEW(dev_priv)) 7988 chv_disable_pll(dev_priv, pipe); 7989 else 7990 vlv_disable_pll(dev_priv, pipe); 7991 } 7992 7993 static void i9xx_compute_dpll(struct intel_crtc *crtc, 7994 struct intel_crtc_state *crtc_state, 7995 struct dpll *reduced_clock) 7996 { 7997 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7998 u32 dpll; 7999 struct dpll *clock = &crtc_state->dpll; 8000 8001 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 8002 8003 dpll = DPLL_VGA_MODE_DIS; 8004 8005 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 8006 dpll |= DPLLB_MODE_LVDS; 8007 else 8008 dpll |= DPLLB_MODE_DAC_SERIAL; 8009 8010 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 8011 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 8012 dpll |= (crtc_state->pixel_multiplier - 1) 8013 << SDVO_MULTIPLIER_SHIFT_HIRES; 8014 } 8015 8016 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 8017 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 8018 dpll |= DPLL_SDVO_HIGH_SPEED; 8019 8020 if (intel_crtc_has_dp_encoder(crtc_state)) 8021 dpll |= DPLL_SDVO_HIGH_SPEED; 8022 8023 /* compute bitmask from p1 value */ 8024 if (IS_PINEVIEW(dev_priv)) 8025 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 8026 else { 8027 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8028 if (IS_G4X(dev_priv) && reduced_clock) 8029 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 8030 } 8031 switch (clock->p2) { 8032 case 5: 8033 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 8034 break; 8035 case 7: 8036 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 8037 break; 8038 case 10: 8039 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 8040 break; 8041 case 14: 8042 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 8043 break; 8044 } 8045 if (INTEL_GEN(dev_priv) >= 4) 8046 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 8047 8048 if (crtc_state->sdvo_tv_clock) 8049 dpll |= PLL_REF_INPUT_TVCLKINBC; 8050 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8051 intel_panel_use_ssc(dev_priv)) 8052 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8053 else 8054 dpll |= PLL_REF_INPUT_DREFCLK; 8055 8056 dpll |= DPLL_VCO_ENABLE; 8057 crtc_state->dpll_hw_state.dpll = dpll; 8058 8059 if (INTEL_GEN(dev_priv) >= 4) { 8060 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 8061 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 8062 crtc_state->dpll_hw_state.dpll_md = dpll_md; 8063 } 8064 } 8065 8066 static void i8xx_compute_dpll(struct intel_crtc *crtc, 8067 struct intel_crtc_state *crtc_state, 8068 struct dpll *reduced_clock) 8069 { 8070 struct drm_device *dev = crtc->base.dev; 8071 struct drm_i915_private *dev_priv = to_i915(dev); 8072 u32 dpll; 8073 struct dpll *clock = &crtc_state->dpll; 8074 8075 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 8076 8077 dpll = DPLL_VGA_MODE_DIS; 8078 8079 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8080 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8081 } else { 8082 if (clock->p1 == 2) 8083 dpll |= PLL_P1_DIVIDE_BY_TWO; 8084 else 8085 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8086 if (clock->p2 == 4) 8087 dpll |= PLL_P2_DIVIDE_BY_4; 8088 } 8089 8090 /* 8091 * Bspec: 8092 * "[Almador Errata}: For the correct operation of the muxed DVO pins 8093 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data, 8094 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock 8095 * Enable) must be set to “1” in both the DPLL A Control Register 8096 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)." 8097 * 8098 * For simplicity We simply keep both bits always enabled in 8099 * both DPLLS. The spec says we should disable the DVO 2X clock 8100 * when not needed, but this seems to work fine in practice. 8101 */ 8102 if (IS_I830(dev_priv) || 8103 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) 8104 dpll |= DPLL_DVO_2X_MODE; 8105 8106 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8107 intel_panel_use_ssc(dev_priv)) 8108 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8109 else 8110 dpll |= PLL_REF_INPUT_DREFCLK; 8111 8112 dpll |= DPLL_VCO_ENABLE; 8113 crtc_state->dpll_hw_state.dpll = dpll; 8114 } 8115 8116 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state) 8117 { 8118 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 8119 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8120 enum pipe pipe = crtc->pipe; 8121 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 8122 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; 8123 u32 crtc_vtotal, crtc_vblank_end; 8124 int vsyncshift = 0; 8125 8126 /* We need to be careful not to changed the adjusted mode, for otherwise 8127 * the hw state checker will get angry at the mismatch. */ 8128 crtc_vtotal = adjusted_mode->crtc_vtotal; 8129 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 8130 8131 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 8132 /* the chip adds 2 halflines automatically */ 8133 crtc_vtotal -= 1; 8134 crtc_vblank_end -= 1; 8135 8136 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 8137 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 8138 else 8139 vsyncshift = adjusted_mode->crtc_hsync_start - 8140 adjusted_mode->crtc_htotal / 2; 8141 if (vsyncshift < 0) 8142 vsyncshift += adjusted_mode->crtc_htotal; 8143 } 8144 8145 if (INTEL_GEN(dev_priv) > 3) 8146 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 8147 8148 I915_WRITE(HTOTAL(cpu_transcoder), 8149 (adjusted_mode->crtc_hdisplay - 1) | 8150 ((adjusted_mode->crtc_htotal - 1) << 16)); 8151 I915_WRITE(HBLANK(cpu_transcoder), 8152 (adjusted_mode->crtc_hblank_start - 1) | 8153 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 8154 I915_WRITE(HSYNC(cpu_transcoder), 8155 (adjusted_mode->crtc_hsync_start - 1) | 8156 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 8157 8158 I915_WRITE(VTOTAL(cpu_transcoder), 8159 (adjusted_mode->crtc_vdisplay - 1) | 8160 ((crtc_vtotal - 1) << 16)); 8161 I915_WRITE(VBLANK(cpu_transcoder), 8162 (adjusted_mode->crtc_vblank_start - 1) | 8163 ((crtc_vblank_end - 1) << 16)); 8164 I915_WRITE(VSYNC(cpu_transcoder), 8165 (adjusted_mode->crtc_vsync_start - 1) | 8166 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 8167 8168 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 8169 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 8170 * documented on the DDI_FUNC_CTL register description, EDP Input Select 8171 * bits. */ 8172 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 8173 (pipe == PIPE_B || pipe == PIPE_C)) 8174 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 8175 8176 } 8177 8178 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 8179 { 8180 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 8181 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8182 enum pipe pipe = crtc->pipe; 8183 8184 /* pipesrc controls the size that is scaled from, which should 8185 * always be the user's requested size. 8186 */ 8187 I915_WRITE(PIPESRC(pipe), 8188 ((crtc_state->pipe_src_w - 1) << 16) | 8189 (crtc_state->pipe_src_h - 1)); 8190 } 8191 8192 static void intel_get_pipe_timings(struct intel_crtc *crtc, 8193 struct intel_crtc_state *pipe_config) 8194 { 8195 struct drm_device *dev = crtc->base.dev; 8196 struct drm_i915_private *dev_priv = to_i915(dev); 8197 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 8198 u32 tmp; 8199 8200 tmp = I915_READ(HTOTAL(cpu_transcoder)); 8201 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 8202 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 8203 8204 if (!transcoder_is_dsi(cpu_transcoder)) { 8205 tmp = I915_READ(HBLANK(cpu_transcoder)); 8206 pipe_config->base.adjusted_mode.crtc_hblank_start = 8207 (tmp & 0xffff) + 1; 8208 pipe_config->base.adjusted_mode.crtc_hblank_end = 8209 ((tmp >> 16) & 0xffff) + 1; 8210 } 8211 tmp = I915_READ(HSYNC(cpu_transcoder)); 8212 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 8213 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 8214 8215 tmp = I915_READ(VTOTAL(cpu_transcoder)); 8216 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 8217 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 8218 8219 if (!transcoder_is_dsi(cpu_transcoder)) { 8220 tmp = I915_READ(VBLANK(cpu_transcoder)); 8221 pipe_config->base.adjusted_mode.crtc_vblank_start = 8222 (tmp & 0xffff) + 1; 8223 pipe_config->base.adjusted_mode.crtc_vblank_end = 8224 ((tmp >> 16) & 0xffff) + 1; 8225 } 8226 tmp = I915_READ(VSYNC(cpu_transcoder)); 8227 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 8228 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 8229 8230 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) { 8231 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 8232 pipe_config->base.adjusted_mode.crtc_vtotal += 1; 8233 pipe_config->base.adjusted_mode.crtc_vblank_end += 1; 8234 } 8235 } 8236 8237 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 8238 struct intel_crtc_state *pipe_config) 8239 { 8240 struct drm_device *dev = crtc->base.dev; 8241 struct drm_i915_private *dev_priv = to_i915(dev); 8242 u32 tmp; 8243 8244 tmp = I915_READ(PIPESRC(crtc->pipe)); 8245 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 8246 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 8247 8248 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h; 8249 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w; 8250 } 8251 8252 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 8253 struct intel_crtc_state *pipe_config) 8254 { 8255 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay; 8256 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal; 8257 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start; 8258 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end; 8259 8260 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay; 8261 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal; 8262 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start; 8263 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end; 8264 8265 mode->flags = pipe_config->base.adjusted_mode.flags; 8266 mode->type = DRM_MODE_TYPE_DRIVER; 8267 8268 mode->clock = pipe_config->base.adjusted_mode.crtc_clock; 8269 8270 mode->hsync = drm_mode_hsync(mode); 8271 mode->vrefresh = drm_mode_vrefresh(mode); 8272 drm_mode_set_name(mode); 8273 } 8274 8275 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 8276 { 8277 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 8278 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8279 u32 pipeconf; 8280 8281 pipeconf = 0; 8282 8283 /* we keep both pipes enabled on 830 */ 8284 if (IS_I830(dev_priv)) 8285 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE; 8286 8287 if (crtc_state->double_wide) 8288 pipeconf |= PIPECONF_DOUBLE_WIDE; 8289 8290 /* only g4x and later have fancy bpc/dither controls */ 8291 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 8292 IS_CHERRYVIEW(dev_priv)) { 8293 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 8294 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 8295 pipeconf |= PIPECONF_DITHER_EN | 8296 PIPECONF_DITHER_TYPE_SP; 8297 8298 switch (crtc_state->pipe_bpp) { 8299 case 18: 8300 pipeconf |= PIPECONF_6BPC; 8301 break; 8302 case 24: 8303 pipeconf |= PIPECONF_8BPC; 8304 break; 8305 case 30: 8306 pipeconf |= PIPECONF_10BPC; 8307 break; 8308 default: 8309 /* Case prevented by intel_choose_pipe_bpp_dither. */ 8310 BUG(); 8311 } 8312 } 8313 8314 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 8315 if (INTEL_GEN(dev_priv) < 4 || 8316 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 8317 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 8318 else 8319 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 8320 } else { 8321 pipeconf |= PIPECONF_PROGRESSIVE; 8322 } 8323 8324 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 8325 crtc_state->limited_color_range) 8326 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 8327 8328 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 8329 8330 I915_WRITE(PIPECONF(crtc->pipe), pipeconf); 8331 POSTING_READ(PIPECONF(crtc->pipe)); 8332 } 8333 8334 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, 8335 struct intel_crtc_state *crtc_state) 8336 { 8337 struct drm_device *dev = crtc->base.dev; 8338 struct drm_i915_private *dev_priv = to_i915(dev); 8339 const struct intel_limit *limit; 8340 int refclk = 48000; 8341 8342 memset(&crtc_state->dpll_hw_state, 0, 8343 sizeof(crtc_state->dpll_hw_state)); 8344 8345 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8346 if (intel_panel_use_ssc(dev_priv)) { 8347 refclk = dev_priv->vbt.lvds_ssc_freq; 8348 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8349 } 8350 8351 limit = &intel_limits_i8xx_lvds; 8352 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) { 8353 limit = &intel_limits_i8xx_dvo; 8354 } else { 8355 limit = &intel_limits_i8xx_dac; 8356 } 8357 8358 if (!crtc_state->clock_set && 8359 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8360 refclk, NULL, &crtc_state->dpll)) { 8361 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8362 return -EINVAL; 8363 } 8364 8365 i8xx_compute_dpll(crtc, crtc_state, NULL); 8366 8367 return 0; 8368 } 8369 8370 static int g4x_crtc_compute_clock(struct intel_crtc *crtc, 8371 struct intel_crtc_state *crtc_state) 8372 { 8373 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8374 const struct intel_limit *limit; 8375 int refclk = 96000; 8376 8377 memset(&crtc_state->dpll_hw_state, 0, 8378 sizeof(crtc_state->dpll_hw_state)); 8379 8380 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8381 if (intel_panel_use_ssc(dev_priv)) { 8382 refclk = dev_priv->vbt.lvds_ssc_freq; 8383 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8384 } 8385 8386 if (intel_is_dual_link_lvds(dev_priv)) 8387 limit = &intel_limits_g4x_dual_channel_lvds; 8388 else 8389 limit = &intel_limits_g4x_single_channel_lvds; 8390 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || 8391 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 8392 limit = &intel_limits_g4x_hdmi; 8393 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) { 8394 limit = &intel_limits_g4x_sdvo; 8395 } else { 8396 /* The option is for other outputs */ 8397 limit = &intel_limits_i9xx_sdvo; 8398 } 8399 8400 if (!crtc_state->clock_set && 8401 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8402 refclk, NULL, &crtc_state->dpll)) { 8403 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8404 return -EINVAL; 8405 } 8406 8407 i9xx_compute_dpll(crtc, crtc_state, NULL); 8408 8409 return 0; 8410 } 8411 8412 static int pnv_crtc_compute_clock(struct intel_crtc *crtc, 8413 struct intel_crtc_state *crtc_state) 8414 { 8415 struct drm_device *dev = crtc->base.dev; 8416 struct drm_i915_private *dev_priv = to_i915(dev); 8417 const struct intel_limit *limit; 8418 int refclk = 96000; 8419 8420 memset(&crtc_state->dpll_hw_state, 0, 8421 sizeof(crtc_state->dpll_hw_state)); 8422 8423 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8424 if (intel_panel_use_ssc(dev_priv)) { 8425 refclk = dev_priv->vbt.lvds_ssc_freq; 8426 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8427 } 8428 8429 limit = &intel_limits_pineview_lvds; 8430 } else { 8431 limit = &intel_limits_pineview_sdvo; 8432 } 8433 8434 if (!crtc_state->clock_set && 8435 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8436 refclk, NULL, &crtc_state->dpll)) { 8437 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8438 return -EINVAL; 8439 } 8440 8441 i9xx_compute_dpll(crtc, crtc_state, NULL); 8442 8443 return 0; 8444 } 8445 8446 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 8447 struct intel_crtc_state *crtc_state) 8448 { 8449 struct drm_device *dev = crtc->base.dev; 8450 struct drm_i915_private *dev_priv = to_i915(dev); 8451 const struct intel_limit *limit; 8452 int refclk = 96000; 8453 8454 memset(&crtc_state->dpll_hw_state, 0, 8455 sizeof(crtc_state->dpll_hw_state)); 8456 8457 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8458 if (intel_panel_use_ssc(dev_priv)) { 8459 refclk = dev_priv->vbt.lvds_ssc_freq; 8460 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8461 } 8462 8463 limit = &intel_limits_i9xx_lvds; 8464 } else { 8465 limit = &intel_limits_i9xx_sdvo; 8466 } 8467 8468 if (!crtc_state->clock_set && 8469 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8470 refclk, NULL, &crtc_state->dpll)) { 8471 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8472 return -EINVAL; 8473 } 8474 8475 i9xx_compute_dpll(crtc, crtc_state, NULL); 8476 8477 return 0; 8478 } 8479 8480 static int chv_crtc_compute_clock(struct intel_crtc *crtc, 8481 struct intel_crtc_state *crtc_state) 8482 { 8483 int refclk = 100000; 8484 const struct intel_limit *limit = &intel_limits_chv; 8485 8486 memset(&crtc_state->dpll_hw_state, 0, 8487 sizeof(crtc_state->dpll_hw_state)); 8488 8489 if (!crtc_state->clock_set && 8490 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8491 refclk, NULL, &crtc_state->dpll)) { 8492 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8493 return -EINVAL; 8494 } 8495 8496 chv_compute_dpll(crtc, crtc_state); 8497 8498 return 0; 8499 } 8500 8501 static int vlv_crtc_compute_clock(struct intel_crtc *crtc, 8502 struct intel_crtc_state *crtc_state) 8503 { 8504 int refclk = 100000; 8505 const struct intel_limit *limit = &intel_limits_vlv; 8506 8507 memset(&crtc_state->dpll_hw_state, 0, 8508 sizeof(crtc_state->dpll_hw_state)); 8509 8510 if (!crtc_state->clock_set && 8511 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8512 refclk, NULL, &crtc_state->dpll)) { 8513 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8514 return -EINVAL; 8515 } 8516 8517 vlv_compute_dpll(crtc, crtc_state); 8518 8519 return 0; 8520 } 8521 8522 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) 8523 { 8524 if (IS_I830(dev_priv)) 8525 return false; 8526 8527 return INTEL_GEN(dev_priv) >= 4 || 8528 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 8529 } 8530 8531 static void i9xx_get_pfit_config(struct intel_crtc *crtc, 8532 struct intel_crtc_state *pipe_config) 8533 { 8534 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8535 u32 tmp; 8536 8537 if (!i9xx_has_pfit(dev_priv)) 8538 return; 8539 8540 tmp = I915_READ(PFIT_CONTROL); 8541 if (!(tmp & PFIT_ENABLE)) 8542 return; 8543 8544 /* Check whether the pfit is attached to our pipe. */ 8545 if (INTEL_GEN(dev_priv) < 4) { 8546 if (crtc->pipe != PIPE_B) 8547 return; 8548 } else { 8549 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 8550 return; 8551 } 8552 8553 pipe_config->gmch_pfit.control = tmp; 8554 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 8555 } 8556 8557 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 8558 struct intel_crtc_state *pipe_config) 8559 { 8560 struct drm_device *dev = crtc->base.dev; 8561 struct drm_i915_private *dev_priv = to_i915(dev); 8562 int pipe = pipe_config->cpu_transcoder; 8563 struct dpll clock; 8564 u32 mdiv; 8565 int refclk = 100000; 8566 8567 /* In case of DSI, DPLL will not be used */ 8568 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8569 return; 8570 8571 vlv_dpio_get(dev_priv); 8572 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 8573 vlv_dpio_put(dev_priv); 8574 8575 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 8576 clock.m2 = mdiv & DPIO_M2DIV_MASK; 8577 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 8578 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 8579 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 8580 8581 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 8582 } 8583 8584 static void 8585 i9xx_get_initial_plane_config(struct intel_crtc *crtc, 8586 struct intel_initial_plane_config *plane_config) 8587 { 8588 struct drm_device *dev = crtc->base.dev; 8589 struct drm_i915_private *dev_priv = to_i915(dev); 8590 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 8591 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 8592 enum pipe pipe; 8593 u32 val, base, offset; 8594 int fourcc, pixel_format; 8595 unsigned int aligned_height; 8596 struct drm_framebuffer *fb; 8597 struct intel_framebuffer *intel_fb; 8598 8599 if (!plane->get_hw_state(plane, &pipe)) 8600 return; 8601 8602 WARN_ON(pipe != crtc->pipe); 8603 8604 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8605 if (!intel_fb) { 8606 DRM_DEBUG_KMS("failed to alloc fb\n"); 8607 return; 8608 } 8609 8610 fb = &intel_fb->base; 8611 8612 fb->dev = dev; 8613 8614 val = I915_READ(DSPCNTR(i9xx_plane)); 8615 8616 if (INTEL_GEN(dev_priv) >= 4) { 8617 if (val & DISPPLANE_TILED) { 8618 plane_config->tiling = I915_TILING_X; 8619 fb->modifier = I915_FORMAT_MOD_X_TILED; 8620 } 8621 8622 if (val & DISPPLANE_ROTATE_180) 8623 plane_config->rotation = DRM_MODE_ROTATE_180; 8624 } 8625 8626 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B && 8627 val & DISPPLANE_MIRROR) 8628 plane_config->rotation |= DRM_MODE_REFLECT_X; 8629 8630 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 8631 fourcc = i9xx_format_to_fourcc(pixel_format); 8632 fb->format = drm_format_info(fourcc); 8633 8634 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 8635 offset = I915_READ(DSPOFFSET(i9xx_plane)); 8636 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000; 8637 } else if (INTEL_GEN(dev_priv) >= 4) { 8638 if (plane_config->tiling) 8639 offset = I915_READ(DSPTILEOFF(i9xx_plane)); 8640 else 8641 offset = I915_READ(DSPLINOFF(i9xx_plane)); 8642 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000; 8643 } else { 8644 base = I915_READ(DSPADDR(i9xx_plane)); 8645 } 8646 plane_config->base = base; 8647 8648 val = I915_READ(PIPESRC(pipe)); 8649 fb->width = ((val >> 16) & 0xfff) + 1; 8650 fb->height = ((val >> 0) & 0xfff) + 1; 8651 8652 val = I915_READ(DSPSTRIDE(i9xx_plane)); 8653 fb->pitches[0] = val & 0xffffffc0; 8654 8655 aligned_height = intel_fb_align_height(fb, 0, fb->height); 8656 8657 plane_config->size = fb->pitches[0] * aligned_height; 8658 8659 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8660 crtc->base.name, plane->base.name, fb->width, fb->height, 8661 fb->format->cpp[0] * 8, base, fb->pitches[0], 8662 plane_config->size); 8663 8664 plane_config->fb = intel_fb; 8665 } 8666 8667 static void chv_crtc_clock_get(struct intel_crtc *crtc, 8668 struct intel_crtc_state *pipe_config) 8669 { 8670 struct drm_device *dev = crtc->base.dev; 8671 struct drm_i915_private *dev_priv = to_i915(dev); 8672 int pipe = pipe_config->cpu_transcoder; 8673 enum dpio_channel port = vlv_pipe_to_channel(pipe); 8674 struct dpll clock; 8675 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 8676 int refclk = 100000; 8677 8678 /* In case of DSI, DPLL will not be used */ 8679 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8680 return; 8681 8682 vlv_dpio_get(dev_priv); 8683 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 8684 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 8685 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 8686 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 8687 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 8688 vlv_dpio_put(dev_priv); 8689 8690 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 8691 clock.m2 = (pll_dw0 & 0xff) << 22; 8692 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 8693 clock.m2 |= pll_dw2 & 0x3fffff; 8694 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 8695 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 8696 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 8697 8698 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 8699 } 8700 8701 static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc, 8702 struct intel_crtc_state *pipe_config) 8703 { 8704 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8705 enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB; 8706 8707 pipe_config->lspcon_downsampling = false; 8708 8709 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) { 8710 u32 tmp = I915_READ(PIPEMISC(crtc->pipe)); 8711 8712 if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { 8713 bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE; 8714 bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND; 8715 8716 if (ycbcr420_enabled) { 8717 /* We support 4:2:0 in full blend mode only */ 8718 if (!blend) 8719 output = INTEL_OUTPUT_FORMAT_INVALID; 8720 else if (!(IS_GEMINILAKE(dev_priv) || 8721 INTEL_GEN(dev_priv) >= 10)) 8722 output = INTEL_OUTPUT_FORMAT_INVALID; 8723 else 8724 output = INTEL_OUTPUT_FORMAT_YCBCR420; 8725 } else { 8726 /* 8727 * Currently there is no interface defined to 8728 * check user preference between RGB/YCBCR444 8729 * or YCBCR420. So the only possible case for 8730 * YCBCR444 usage is driving YCBCR420 output 8731 * with LSPCON, when pipe is configured for 8732 * YCBCR444 output and LSPCON takes care of 8733 * downsampling it. 8734 */ 8735 pipe_config->lspcon_downsampling = true; 8736 output = INTEL_OUTPUT_FORMAT_YCBCR444; 8737 } 8738 } 8739 } 8740 8741 pipe_config->output_format = output; 8742 } 8743 8744 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) 8745 { 8746 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 8747 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 8748 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8749 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 8750 u32 tmp; 8751 8752 tmp = I915_READ(DSPCNTR(i9xx_plane)); 8753 8754 if (tmp & DISPPLANE_GAMMA_ENABLE) 8755 crtc_state->gamma_enable = true; 8756 8757 if (!HAS_GMCH(dev_priv) && 8758 tmp & DISPPLANE_PIPE_CSC_ENABLE) 8759 crtc_state->csc_enable = true; 8760 } 8761 8762 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 8763 struct intel_crtc_state *pipe_config) 8764 { 8765 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8766 enum intel_display_power_domain power_domain; 8767 intel_wakeref_t wakeref; 8768 u32 tmp; 8769 bool ret; 8770 8771 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 8772 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 8773 if (!wakeref) 8774 return false; 8775 8776 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 8777 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8778 pipe_config->shared_dpll = NULL; 8779 8780 ret = false; 8781 8782 tmp = I915_READ(PIPECONF(crtc->pipe)); 8783 if (!(tmp & PIPECONF_ENABLE)) 8784 goto out; 8785 8786 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 8787 IS_CHERRYVIEW(dev_priv)) { 8788 switch (tmp & PIPECONF_BPC_MASK) { 8789 case PIPECONF_6BPC: 8790 pipe_config->pipe_bpp = 18; 8791 break; 8792 case PIPECONF_8BPC: 8793 pipe_config->pipe_bpp = 24; 8794 break; 8795 case PIPECONF_10BPC: 8796 pipe_config->pipe_bpp = 30; 8797 break; 8798 default: 8799 break; 8800 } 8801 } 8802 8803 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 8804 (tmp & PIPECONF_COLOR_RANGE_SELECT)) 8805 pipe_config->limited_color_range = true; 8806 8807 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >> 8808 PIPECONF_GAMMA_MODE_SHIFT; 8809 8810 if (IS_CHERRYVIEW(dev_priv)) 8811 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe)); 8812 8813 i9xx_get_pipe_color_config(pipe_config); 8814 intel_color_get_config(pipe_config); 8815 8816 if (INTEL_GEN(dev_priv) < 4) 8817 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 8818 8819 intel_get_pipe_timings(crtc, pipe_config); 8820 intel_get_pipe_src_size(crtc, pipe_config); 8821 8822 i9xx_get_pfit_config(crtc, pipe_config); 8823 8824 if (INTEL_GEN(dev_priv) >= 4) { 8825 /* No way to read it out on pipes B and C */ 8826 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 8827 tmp = dev_priv->chv_dpll_md[crtc->pipe]; 8828 else 8829 tmp = I915_READ(DPLL_MD(crtc->pipe)); 8830 pipe_config->pixel_multiplier = 8831 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 8832 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 8833 pipe_config->dpll_hw_state.dpll_md = tmp; 8834 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 8835 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 8836 tmp = I915_READ(DPLL(crtc->pipe)); 8837 pipe_config->pixel_multiplier = 8838 ((tmp & SDVO_MULTIPLIER_MASK) 8839 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 8840 } else { 8841 /* Note that on i915G/GM the pixel multiplier is in the sdvo 8842 * port and will be fixed up in the encoder->get_config 8843 * function. */ 8844 pipe_config->pixel_multiplier = 1; 8845 } 8846 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 8847 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 8848 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 8849 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 8850 } else { 8851 /* Mask out read-only status bits. */ 8852 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 8853 DPLL_PORTC_READY_MASK | 8854 DPLL_PORTB_READY_MASK); 8855 } 8856 8857 if (IS_CHERRYVIEW(dev_priv)) 8858 chv_crtc_clock_get(crtc, pipe_config); 8859 else if (IS_VALLEYVIEW(dev_priv)) 8860 vlv_crtc_clock_get(crtc, pipe_config); 8861 else 8862 i9xx_crtc_clock_get(crtc, pipe_config); 8863 8864 /* 8865 * Normally the dotclock is filled in by the encoder .get_config() 8866 * but in case the pipe is enabled w/o any ports we need a sane 8867 * default. 8868 */ 8869 pipe_config->base.adjusted_mode.crtc_clock = 8870 pipe_config->port_clock / pipe_config->pixel_multiplier; 8871 8872 ret = true; 8873 8874 out: 8875 intel_display_power_put(dev_priv, power_domain, wakeref); 8876 8877 return ret; 8878 } 8879 8880 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv) 8881 { 8882 struct intel_encoder *encoder; 8883 int i; 8884 u32 val, final; 8885 bool has_lvds = false; 8886 bool has_cpu_edp = false; 8887 bool has_panel = false; 8888 bool has_ck505 = false; 8889 bool can_ssc = false; 8890 bool using_ssc_source = false; 8891 8892 /* We need to take the global config into account */ 8893 for_each_intel_encoder(&dev_priv->drm, encoder) { 8894 switch (encoder->type) { 8895 case INTEL_OUTPUT_LVDS: 8896 has_panel = true; 8897 has_lvds = true; 8898 break; 8899 case INTEL_OUTPUT_EDP: 8900 has_panel = true; 8901 if (encoder->port == PORT_A) 8902 has_cpu_edp = true; 8903 break; 8904 default: 8905 break; 8906 } 8907 } 8908 8909 if (HAS_PCH_IBX(dev_priv)) { 8910 has_ck505 = dev_priv->vbt.display_clock_mode; 8911 can_ssc = has_ck505; 8912 } else { 8913 has_ck505 = false; 8914 can_ssc = true; 8915 } 8916 8917 /* Check if any DPLLs are using the SSC source */ 8918 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 8919 u32 temp = I915_READ(PCH_DPLL(i)); 8920 8921 if (!(temp & DPLL_VCO_ENABLE)) 8922 continue; 8923 8924 if ((temp & PLL_REF_INPUT_MASK) == 8925 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 8926 using_ssc_source = true; 8927 break; 8928 } 8929 } 8930 8931 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 8932 has_panel, has_lvds, has_ck505, using_ssc_source); 8933 8934 /* Ironlake: try to setup display ref clock before DPLL 8935 * enabling. This is only under driver's control after 8936 * PCH B stepping, previous chipset stepping should be 8937 * ignoring this setting. 8938 */ 8939 val = I915_READ(PCH_DREF_CONTROL); 8940 8941 /* As we must carefully and slowly disable/enable each source in turn, 8942 * compute the final state we want first and check if we need to 8943 * make any changes at all. 8944 */ 8945 final = val; 8946 final &= ~DREF_NONSPREAD_SOURCE_MASK; 8947 if (has_ck505) 8948 final |= DREF_NONSPREAD_CK505_ENABLE; 8949 else 8950 final |= DREF_NONSPREAD_SOURCE_ENABLE; 8951 8952 final &= ~DREF_SSC_SOURCE_MASK; 8953 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8954 final &= ~DREF_SSC1_ENABLE; 8955 8956 if (has_panel) { 8957 final |= DREF_SSC_SOURCE_ENABLE; 8958 8959 if (intel_panel_use_ssc(dev_priv) && can_ssc) 8960 final |= DREF_SSC1_ENABLE; 8961 8962 if (has_cpu_edp) { 8963 if (intel_panel_use_ssc(dev_priv) && can_ssc) 8964 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 8965 else 8966 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 8967 } else 8968 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8969 } else if (using_ssc_source) { 8970 final |= DREF_SSC_SOURCE_ENABLE; 8971 final |= DREF_SSC1_ENABLE; 8972 } 8973 8974 if (final == val) 8975 return; 8976 8977 /* Always enable nonspread source */ 8978 val &= ~DREF_NONSPREAD_SOURCE_MASK; 8979 8980 if (has_ck505) 8981 val |= DREF_NONSPREAD_CK505_ENABLE; 8982 else 8983 val |= DREF_NONSPREAD_SOURCE_ENABLE; 8984 8985 if (has_panel) { 8986 val &= ~DREF_SSC_SOURCE_MASK; 8987 val |= DREF_SSC_SOURCE_ENABLE; 8988 8989 /* SSC must be turned on before enabling the CPU output */ 8990 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 8991 DRM_DEBUG_KMS("Using SSC on panel\n"); 8992 val |= DREF_SSC1_ENABLE; 8993 } else 8994 val &= ~DREF_SSC1_ENABLE; 8995 8996 /* Get SSC going before enabling the outputs */ 8997 I915_WRITE(PCH_DREF_CONTROL, val); 8998 POSTING_READ(PCH_DREF_CONTROL); 8999 udelay(200); 9000 9001 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9002 9003 /* Enable CPU source on CPU attached eDP */ 9004 if (has_cpu_edp) { 9005 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 9006 DRM_DEBUG_KMS("Using SSC on eDP\n"); 9007 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 9008 } else 9009 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 9010 } else 9011 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9012 9013 I915_WRITE(PCH_DREF_CONTROL, val); 9014 POSTING_READ(PCH_DREF_CONTROL); 9015 udelay(200); 9016 } else { 9017 DRM_DEBUG_KMS("Disabling CPU source output\n"); 9018 9019 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9020 9021 /* Turn off CPU output */ 9022 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9023 9024 I915_WRITE(PCH_DREF_CONTROL, val); 9025 POSTING_READ(PCH_DREF_CONTROL); 9026 udelay(200); 9027 9028 if (!using_ssc_source) { 9029 DRM_DEBUG_KMS("Disabling SSC source\n"); 9030 9031 /* Turn off the SSC source */ 9032 val &= ~DREF_SSC_SOURCE_MASK; 9033 val |= DREF_SSC_SOURCE_DISABLE; 9034 9035 /* Turn off SSC1 */ 9036 val &= ~DREF_SSC1_ENABLE; 9037 9038 I915_WRITE(PCH_DREF_CONTROL, val); 9039 POSTING_READ(PCH_DREF_CONTROL); 9040 udelay(200); 9041 } 9042 } 9043 9044 BUG_ON(val != final); 9045 } 9046 9047 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 9048 { 9049 u32 tmp; 9050 9051 tmp = I915_READ(SOUTH_CHICKEN2); 9052 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 9053 I915_WRITE(SOUTH_CHICKEN2, tmp); 9054 9055 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) & 9056 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 9057 DRM_ERROR("FDI mPHY reset assert timeout\n"); 9058 9059 tmp = I915_READ(SOUTH_CHICKEN2); 9060 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 9061 I915_WRITE(SOUTH_CHICKEN2, tmp); 9062 9063 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) & 9064 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 9065 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 9066 } 9067 9068 /* WaMPhyProgramming:hsw */ 9069 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 9070 { 9071 u32 tmp; 9072 9073 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 9074 tmp &= ~(0xFF << 24); 9075 tmp |= (0x12 << 24); 9076 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 9077 9078 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 9079 tmp |= (1 << 11); 9080 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 9081 9082 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 9083 tmp |= (1 << 11); 9084 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 9085 9086 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 9087 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 9088 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 9089 9090 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 9091 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 9092 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 9093 9094 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 9095 tmp &= ~(7 << 13); 9096 tmp |= (5 << 13); 9097 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 9098 9099 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 9100 tmp &= ~(7 << 13); 9101 tmp |= (5 << 13); 9102 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 9103 9104 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 9105 tmp &= ~0xFF; 9106 tmp |= 0x1C; 9107 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 9108 9109 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 9110 tmp &= ~0xFF; 9111 tmp |= 0x1C; 9112 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 9113 9114 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 9115 tmp &= ~(0xFF << 16); 9116 tmp |= (0x1C << 16); 9117 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 9118 9119 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 9120 tmp &= ~(0xFF << 16); 9121 tmp |= (0x1C << 16); 9122 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 9123 9124 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 9125 tmp |= (1 << 27); 9126 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 9127 9128 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 9129 tmp |= (1 << 27); 9130 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 9131 9132 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 9133 tmp &= ~(0xF << 28); 9134 tmp |= (4 << 28); 9135 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 9136 9137 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 9138 tmp &= ~(0xF << 28); 9139 tmp |= (4 << 28); 9140 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 9141 } 9142 9143 /* Implements 3 different sequences from BSpec chapter "Display iCLK 9144 * Programming" based on the parameters passed: 9145 * - Sequence to enable CLKOUT_DP 9146 * - Sequence to enable CLKOUT_DP without spread 9147 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 9148 */ 9149 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, 9150 bool with_spread, bool with_fdi) 9151 { 9152 u32 reg, tmp; 9153 9154 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 9155 with_spread = true; 9156 if (WARN(HAS_PCH_LPT_LP(dev_priv) && 9157 with_fdi, "LP PCH doesn't have FDI\n")) 9158 with_fdi = false; 9159 9160 mutex_lock(&dev_priv->sb_lock); 9161 9162 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9163 tmp &= ~SBI_SSCCTL_DISABLE; 9164 tmp |= SBI_SSCCTL_PATHALT; 9165 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9166 9167 udelay(24); 9168 9169 if (with_spread) { 9170 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9171 tmp &= ~SBI_SSCCTL_PATHALT; 9172 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9173 9174 if (with_fdi) { 9175 lpt_reset_fdi_mphy(dev_priv); 9176 lpt_program_fdi_mphy(dev_priv); 9177 } 9178 } 9179 9180 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 9181 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 9182 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 9183 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 9184 9185 mutex_unlock(&dev_priv->sb_lock); 9186 } 9187 9188 /* Sequence to disable CLKOUT_DP */ 9189 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) 9190 { 9191 u32 reg, tmp; 9192 9193 mutex_lock(&dev_priv->sb_lock); 9194 9195 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 9196 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 9197 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 9198 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 9199 9200 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9201 if (!(tmp & SBI_SSCCTL_DISABLE)) { 9202 if (!(tmp & SBI_SSCCTL_PATHALT)) { 9203 tmp |= SBI_SSCCTL_PATHALT; 9204 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9205 udelay(32); 9206 } 9207 tmp |= SBI_SSCCTL_DISABLE; 9208 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9209 } 9210 9211 mutex_unlock(&dev_priv->sb_lock); 9212 } 9213 9214 #define BEND_IDX(steps) ((50 + (steps)) / 5) 9215 9216 static const u16 sscdivintphase[] = { 9217 [BEND_IDX( 50)] = 0x3B23, 9218 [BEND_IDX( 45)] = 0x3B23, 9219 [BEND_IDX( 40)] = 0x3C23, 9220 [BEND_IDX( 35)] = 0x3C23, 9221 [BEND_IDX( 30)] = 0x3D23, 9222 [BEND_IDX( 25)] = 0x3D23, 9223 [BEND_IDX( 20)] = 0x3E23, 9224 [BEND_IDX( 15)] = 0x3E23, 9225 [BEND_IDX( 10)] = 0x3F23, 9226 [BEND_IDX( 5)] = 0x3F23, 9227 [BEND_IDX( 0)] = 0x0025, 9228 [BEND_IDX( -5)] = 0x0025, 9229 [BEND_IDX(-10)] = 0x0125, 9230 [BEND_IDX(-15)] = 0x0125, 9231 [BEND_IDX(-20)] = 0x0225, 9232 [BEND_IDX(-25)] = 0x0225, 9233 [BEND_IDX(-30)] = 0x0325, 9234 [BEND_IDX(-35)] = 0x0325, 9235 [BEND_IDX(-40)] = 0x0425, 9236 [BEND_IDX(-45)] = 0x0425, 9237 [BEND_IDX(-50)] = 0x0525, 9238 }; 9239 9240 /* 9241 * Bend CLKOUT_DP 9242 * steps -50 to 50 inclusive, in steps of 5 9243 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 9244 * change in clock period = -(steps / 10) * 5.787 ps 9245 */ 9246 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 9247 { 9248 u32 tmp; 9249 int idx = BEND_IDX(steps); 9250 9251 if (WARN_ON(steps % 5 != 0)) 9252 return; 9253 9254 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase))) 9255 return; 9256 9257 mutex_lock(&dev_priv->sb_lock); 9258 9259 if (steps % 10 != 0) 9260 tmp = 0xAAAAAAAB; 9261 else 9262 tmp = 0x00000000; 9263 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 9264 9265 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 9266 tmp &= 0xffff0000; 9267 tmp |= sscdivintphase[idx]; 9268 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 9269 9270 mutex_unlock(&dev_priv->sb_lock); 9271 } 9272 9273 #undef BEND_IDX 9274 9275 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) 9276 { 9277 u32 fuse_strap = I915_READ(FUSE_STRAP); 9278 u32 ctl = I915_READ(SPLL_CTL); 9279 9280 if ((ctl & SPLL_PLL_ENABLE) == 0) 9281 return false; 9282 9283 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && 9284 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 9285 return true; 9286 9287 if (IS_BROADWELL(dev_priv) && 9288 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) 9289 return true; 9290 9291 return false; 9292 } 9293 9294 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, 9295 enum intel_dpll_id id) 9296 { 9297 u32 fuse_strap = I915_READ(FUSE_STRAP); 9298 u32 ctl = I915_READ(WRPLL_CTL(id)); 9299 9300 if ((ctl & WRPLL_PLL_ENABLE) == 0) 9301 return false; 9302 9303 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) 9304 return true; 9305 9306 if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && 9307 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && 9308 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 9309 return true; 9310 9311 return false; 9312 } 9313 9314 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) 9315 { 9316 struct intel_encoder *encoder; 9317 bool pch_ssc_in_use = false; 9318 bool has_fdi = false; 9319 9320 for_each_intel_encoder(&dev_priv->drm, encoder) { 9321 switch (encoder->type) { 9322 case INTEL_OUTPUT_ANALOG: 9323 has_fdi = true; 9324 break; 9325 default: 9326 break; 9327 } 9328 } 9329 9330 /* 9331 * The BIOS may have decided to use the PCH SSC 9332 * reference so we must not disable it until the 9333 * relevant PLLs have stopped relying on it. We'll 9334 * just leave the PCH SSC reference enabled in case 9335 * any active PLL is using it. It will get disabled 9336 * after runtime suspend if we don't have FDI. 9337 * 9338 * TODO: Move the whole reference clock handling 9339 * to the modeset sequence proper so that we can 9340 * actually enable/disable/reconfigure these things 9341 * safely. To do that we need to introduce a real 9342 * clock hierarchy. That would also allow us to do 9343 * clock bending finally. 9344 */ 9345 if (spll_uses_pch_ssc(dev_priv)) { 9346 DRM_DEBUG_KMS("SPLL using PCH SSC\n"); 9347 pch_ssc_in_use = true; 9348 } 9349 9350 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { 9351 DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n"); 9352 pch_ssc_in_use = true; 9353 } 9354 9355 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { 9356 DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n"); 9357 pch_ssc_in_use = true; 9358 } 9359 9360 if (pch_ssc_in_use) 9361 return; 9362 9363 if (has_fdi) { 9364 lpt_bend_clkout_dp(dev_priv, 0); 9365 lpt_enable_clkout_dp(dev_priv, true, true); 9366 } else { 9367 lpt_disable_clkout_dp(dev_priv); 9368 } 9369 } 9370 9371 /* 9372 * Initialize reference clocks when the driver loads 9373 */ 9374 void intel_init_pch_refclk(struct drm_i915_private *dev_priv) 9375 { 9376 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 9377 ironlake_init_pch_refclk(dev_priv); 9378 else if (HAS_PCH_LPT(dev_priv)) 9379 lpt_init_pch_refclk(dev_priv); 9380 } 9381 9382 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state) 9383 { 9384 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 9385 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9386 enum pipe pipe = crtc->pipe; 9387 u32 val; 9388 9389 val = 0; 9390 9391 switch (crtc_state->pipe_bpp) { 9392 case 18: 9393 val |= PIPECONF_6BPC; 9394 break; 9395 case 24: 9396 val |= PIPECONF_8BPC; 9397 break; 9398 case 30: 9399 val |= PIPECONF_10BPC; 9400 break; 9401 case 36: 9402 val |= PIPECONF_12BPC; 9403 break; 9404 default: 9405 /* Case prevented by intel_choose_pipe_bpp_dither. */ 9406 BUG(); 9407 } 9408 9409 if (crtc_state->dither) 9410 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 9411 9412 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 9413 val |= PIPECONF_INTERLACED_ILK; 9414 else 9415 val |= PIPECONF_PROGRESSIVE; 9416 9417 if (crtc_state->limited_color_range) 9418 val |= PIPECONF_COLOR_RANGE_SELECT; 9419 9420 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 9421 9422 I915_WRITE(PIPECONF(pipe), val); 9423 POSTING_READ(PIPECONF(pipe)); 9424 } 9425 9426 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state) 9427 { 9428 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 9429 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9430 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 9431 u32 val = 0; 9432 9433 if (IS_HASWELL(dev_priv) && crtc_state->dither) 9434 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 9435 9436 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 9437 val |= PIPECONF_INTERLACED_ILK; 9438 else 9439 val |= PIPECONF_PROGRESSIVE; 9440 9441 I915_WRITE(PIPECONF(cpu_transcoder), val); 9442 POSTING_READ(PIPECONF(cpu_transcoder)); 9443 } 9444 9445 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) 9446 { 9447 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 9448 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9449 u32 val = 0; 9450 9451 switch (crtc_state->pipe_bpp) { 9452 case 18: 9453 val |= PIPEMISC_DITHER_6_BPC; 9454 break; 9455 case 24: 9456 val |= PIPEMISC_DITHER_8_BPC; 9457 break; 9458 case 30: 9459 val |= PIPEMISC_DITHER_10_BPC; 9460 break; 9461 case 36: 9462 val |= PIPEMISC_DITHER_12_BPC; 9463 break; 9464 default: 9465 MISSING_CASE(crtc_state->pipe_bpp); 9466 break; 9467 } 9468 9469 if (crtc_state->dither) 9470 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 9471 9472 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 9473 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 9474 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV; 9475 9476 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 9477 val |= PIPEMISC_YUV420_ENABLE | 9478 PIPEMISC_YUV420_MODE_FULL_BLEND; 9479 9480 if (INTEL_GEN(dev_priv) >= 11 && 9481 (crtc_state->active_planes & ~(icl_hdr_plane_mask() | 9482 BIT(PLANE_CURSOR))) == 0) 9483 val |= PIPEMISC_HDR_MODE_PRECISION; 9484 9485 I915_WRITE(PIPEMISC(crtc->pipe), val); 9486 } 9487 9488 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) 9489 { 9490 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9491 u32 tmp; 9492 9493 tmp = I915_READ(PIPEMISC(crtc->pipe)); 9494 9495 switch (tmp & PIPEMISC_DITHER_BPC_MASK) { 9496 case PIPEMISC_DITHER_6_BPC: 9497 return 18; 9498 case PIPEMISC_DITHER_8_BPC: 9499 return 24; 9500 case PIPEMISC_DITHER_10_BPC: 9501 return 30; 9502 case PIPEMISC_DITHER_12_BPC: 9503 return 36; 9504 default: 9505 MISSING_CASE(tmp); 9506 return 0; 9507 } 9508 } 9509 9510 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 9511 { 9512 /* 9513 * Account for spread spectrum to avoid 9514 * oversubscribing the link. Max center spread 9515 * is 2.5%; use 5% for safety's sake. 9516 */ 9517 u32 bps = target_clock * bpp * 21 / 20; 9518 return DIV_ROUND_UP(bps, link_bw * 8); 9519 } 9520 9521 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) 9522 { 9523 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 9524 } 9525 9526 static void ironlake_compute_dpll(struct intel_crtc *crtc, 9527 struct intel_crtc_state *crtc_state, 9528 struct dpll *reduced_clock) 9529 { 9530 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9531 u32 dpll, fp, fp2; 9532 int factor; 9533 9534 /* Enable autotuning of the PLL clock (if permissible) */ 9535 factor = 21; 9536 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9537 if ((intel_panel_use_ssc(dev_priv) && 9538 dev_priv->vbt.lvds_ssc_freq == 100000) || 9539 (HAS_PCH_IBX(dev_priv) && 9540 intel_is_dual_link_lvds(dev_priv))) 9541 factor = 25; 9542 } else if (crtc_state->sdvo_tv_clock) { 9543 factor = 20; 9544 } 9545 9546 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 9547 9548 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor)) 9549 fp |= FP_CB_TUNE; 9550 9551 if (reduced_clock) { 9552 fp2 = i9xx_dpll_compute_fp(reduced_clock); 9553 9554 if (reduced_clock->m < factor * reduced_clock->n) 9555 fp2 |= FP_CB_TUNE; 9556 } else { 9557 fp2 = fp; 9558 } 9559 9560 dpll = 0; 9561 9562 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 9563 dpll |= DPLLB_MODE_LVDS; 9564 else 9565 dpll |= DPLLB_MODE_DAC_SERIAL; 9566 9567 dpll |= (crtc_state->pixel_multiplier - 1) 9568 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 9569 9570 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 9571 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 9572 dpll |= DPLL_SDVO_HIGH_SPEED; 9573 9574 if (intel_crtc_has_dp_encoder(crtc_state)) 9575 dpll |= DPLL_SDVO_HIGH_SPEED; 9576 9577 /* 9578 * The high speed IO clock is only really required for 9579 * SDVO/HDMI/DP, but we also enable it for CRT to make it 9580 * possible to share the DPLL between CRT and HDMI. Enabling 9581 * the clock needlessly does no real harm, except use up a 9582 * bit of power potentially. 9583 * 9584 * We'll limit this to IVB with 3 pipes, since it has only two 9585 * DPLLs and so DPLL sharing is the only way to get three pipes 9586 * driving PCH ports at the same time. On SNB we could do this, 9587 * and potentially avoid enabling the second DPLL, but it's not 9588 * clear if it''s a win or loss power wise. No point in doing 9589 * this on ILK at all since it has a fixed DPLL<->pipe mapping. 9590 */ 9591 if (INTEL_INFO(dev_priv)->num_pipes == 3 && 9592 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) 9593 dpll |= DPLL_SDVO_HIGH_SPEED; 9594 9595 /* compute bitmask from p1 value */ 9596 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 9597 /* also FPA1 */ 9598 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 9599 9600 switch (crtc_state->dpll.p2) { 9601 case 5: 9602 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 9603 break; 9604 case 7: 9605 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 9606 break; 9607 case 10: 9608 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 9609 break; 9610 case 14: 9611 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 9612 break; 9613 } 9614 9615 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 9616 intel_panel_use_ssc(dev_priv)) 9617 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 9618 else 9619 dpll |= PLL_REF_INPUT_DREFCLK; 9620 9621 dpll |= DPLL_VCO_ENABLE; 9622 9623 crtc_state->dpll_hw_state.dpll = dpll; 9624 crtc_state->dpll_hw_state.fp0 = fp; 9625 crtc_state->dpll_hw_state.fp1 = fp2; 9626 } 9627 9628 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, 9629 struct intel_crtc_state *crtc_state) 9630 { 9631 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9632 struct intel_atomic_state *state = 9633 to_intel_atomic_state(crtc_state->base.state); 9634 const struct intel_limit *limit; 9635 int refclk = 120000; 9636 9637 memset(&crtc_state->dpll_hw_state, 0, 9638 sizeof(crtc_state->dpll_hw_state)); 9639 9640 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 9641 if (!crtc_state->has_pch_encoder) 9642 return 0; 9643 9644 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9645 if (intel_panel_use_ssc(dev_priv)) { 9646 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 9647 dev_priv->vbt.lvds_ssc_freq); 9648 refclk = dev_priv->vbt.lvds_ssc_freq; 9649 } 9650 9651 if (intel_is_dual_link_lvds(dev_priv)) { 9652 if (refclk == 100000) 9653 limit = &intel_limits_ironlake_dual_lvds_100m; 9654 else 9655 limit = &intel_limits_ironlake_dual_lvds; 9656 } else { 9657 if (refclk == 100000) 9658 limit = &intel_limits_ironlake_single_lvds_100m; 9659 else 9660 limit = &intel_limits_ironlake_single_lvds; 9661 } 9662 } else { 9663 limit = &intel_limits_ironlake_dac; 9664 } 9665 9666 if (!crtc_state->clock_set && 9667 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9668 refclk, NULL, &crtc_state->dpll)) { 9669 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 9670 return -EINVAL; 9671 } 9672 9673 ironlake_compute_dpll(crtc, crtc_state, NULL); 9674 9675 if (!intel_reserve_shared_dplls(state, crtc, NULL)) { 9676 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", 9677 pipe_name(crtc->pipe)); 9678 return -EINVAL; 9679 } 9680 9681 return 0; 9682 } 9683 9684 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 9685 struct intel_link_m_n *m_n) 9686 { 9687 struct drm_device *dev = crtc->base.dev; 9688 struct drm_i915_private *dev_priv = to_i915(dev); 9689 enum pipe pipe = crtc->pipe; 9690 9691 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 9692 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); 9693 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) 9694 & ~TU_SIZE_MASK; 9695 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); 9696 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) 9697 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9698 } 9699 9700 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 9701 enum transcoder transcoder, 9702 struct intel_link_m_n *m_n, 9703 struct intel_link_m_n *m2_n2) 9704 { 9705 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9706 enum pipe pipe = crtc->pipe; 9707 9708 if (INTEL_GEN(dev_priv) >= 5) { 9709 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); 9710 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); 9711 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 9712 & ~TU_SIZE_MASK; 9713 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 9714 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 9715 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9716 9717 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) { 9718 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 9719 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 9720 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 9721 & ~TU_SIZE_MASK; 9722 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); 9723 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) 9724 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9725 } 9726 } else { 9727 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 9728 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 9729 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) 9730 & ~TU_SIZE_MASK; 9731 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); 9732 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) 9733 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9734 } 9735 } 9736 9737 void intel_dp_get_m_n(struct intel_crtc *crtc, 9738 struct intel_crtc_state *pipe_config) 9739 { 9740 if (pipe_config->has_pch_encoder) 9741 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 9742 else 9743 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 9744 &pipe_config->dp_m_n, 9745 &pipe_config->dp_m2_n2); 9746 } 9747 9748 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 9749 struct intel_crtc_state *pipe_config) 9750 { 9751 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 9752 &pipe_config->fdi_m_n, NULL); 9753 } 9754 9755 static void skylake_get_pfit_config(struct intel_crtc *crtc, 9756 struct intel_crtc_state *pipe_config) 9757 { 9758 struct drm_device *dev = crtc->base.dev; 9759 struct drm_i915_private *dev_priv = to_i915(dev); 9760 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; 9761 u32 ps_ctrl = 0; 9762 int id = -1; 9763 int i; 9764 9765 /* find scaler attached to this pipe */ 9766 for (i = 0; i < crtc->num_scalers; i++) { 9767 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i)); 9768 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { 9769 id = i; 9770 pipe_config->pch_pfit.enabled = true; 9771 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); 9772 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i)); 9773 scaler_state->scalers[i].in_use = true; 9774 break; 9775 } 9776 } 9777 9778 scaler_state->scaler_id = id; 9779 if (id >= 0) { 9780 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 9781 } else { 9782 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 9783 } 9784 } 9785 9786 static void 9787 skylake_get_initial_plane_config(struct intel_crtc *crtc, 9788 struct intel_initial_plane_config *plane_config) 9789 { 9790 struct drm_device *dev = crtc->base.dev; 9791 struct drm_i915_private *dev_priv = to_i915(dev); 9792 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 9793 enum plane_id plane_id = plane->id; 9794 enum pipe pipe; 9795 u32 val, base, offset, stride_mult, tiling, alpha; 9796 int fourcc, pixel_format; 9797 unsigned int aligned_height; 9798 struct drm_framebuffer *fb; 9799 struct intel_framebuffer *intel_fb; 9800 9801 if (!plane->get_hw_state(plane, &pipe)) 9802 return; 9803 9804 WARN_ON(pipe != crtc->pipe); 9805 9806 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9807 if (!intel_fb) { 9808 DRM_DEBUG_KMS("failed to alloc fb\n"); 9809 return; 9810 } 9811 9812 fb = &intel_fb->base; 9813 9814 fb->dev = dev; 9815 9816 val = I915_READ(PLANE_CTL(pipe, plane_id)); 9817 9818 if (INTEL_GEN(dev_priv) >= 11) 9819 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK; 9820 else 9821 pixel_format = val & PLANE_CTL_FORMAT_MASK; 9822 9823 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { 9824 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id)); 9825 alpha &= PLANE_COLOR_ALPHA_MASK; 9826 } else { 9827 alpha = val & PLANE_CTL_ALPHA_MASK; 9828 } 9829 9830 fourcc = skl_format_to_fourcc(pixel_format, 9831 val & PLANE_CTL_ORDER_RGBX, alpha); 9832 fb->format = drm_format_info(fourcc); 9833 9834 tiling = val & PLANE_CTL_TILED_MASK; 9835 switch (tiling) { 9836 case PLANE_CTL_TILED_LINEAR: 9837 fb->modifier = DRM_FORMAT_MOD_LINEAR; 9838 break; 9839 case PLANE_CTL_TILED_X: 9840 plane_config->tiling = I915_TILING_X; 9841 fb->modifier = I915_FORMAT_MOD_X_TILED; 9842 break; 9843 case PLANE_CTL_TILED_Y: 9844 plane_config->tiling = I915_TILING_Y; 9845 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) 9846 fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS; 9847 else 9848 fb->modifier = I915_FORMAT_MOD_Y_TILED; 9849 break; 9850 case PLANE_CTL_TILED_YF: 9851 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) 9852 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS; 9853 else 9854 fb->modifier = I915_FORMAT_MOD_Yf_TILED; 9855 break; 9856 default: 9857 MISSING_CASE(tiling); 9858 goto error; 9859 } 9860 9861 /* 9862 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 9863 * while i915 HW rotation is clockwise, thats why this swapping. 9864 */ 9865 switch (val & PLANE_CTL_ROTATE_MASK) { 9866 case PLANE_CTL_ROTATE_0: 9867 plane_config->rotation = DRM_MODE_ROTATE_0; 9868 break; 9869 case PLANE_CTL_ROTATE_90: 9870 plane_config->rotation = DRM_MODE_ROTATE_270; 9871 break; 9872 case PLANE_CTL_ROTATE_180: 9873 plane_config->rotation = DRM_MODE_ROTATE_180; 9874 break; 9875 case PLANE_CTL_ROTATE_270: 9876 plane_config->rotation = DRM_MODE_ROTATE_90; 9877 break; 9878 } 9879 9880 if (INTEL_GEN(dev_priv) >= 10 && 9881 val & PLANE_CTL_FLIP_HORIZONTAL) 9882 plane_config->rotation |= DRM_MODE_REFLECT_X; 9883 9884 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000; 9885 plane_config->base = base; 9886 9887 offset = I915_READ(PLANE_OFFSET(pipe, plane_id)); 9888 9889 val = I915_READ(PLANE_SIZE(pipe, plane_id)); 9890 fb->height = ((val >> 16) & 0xfff) + 1; 9891 fb->width = ((val >> 0) & 0x1fff) + 1; 9892 9893 val = I915_READ(PLANE_STRIDE(pipe, plane_id)); 9894 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); 9895 fb->pitches[0] = (val & 0x3ff) * stride_mult; 9896 9897 aligned_height = intel_fb_align_height(fb, 0, fb->height); 9898 9899 plane_config->size = fb->pitches[0] * aligned_height; 9900 9901 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 9902 crtc->base.name, plane->base.name, fb->width, fb->height, 9903 fb->format->cpp[0] * 8, base, fb->pitches[0], 9904 plane_config->size); 9905 9906 plane_config->fb = intel_fb; 9907 return; 9908 9909 error: 9910 kfree(intel_fb); 9911 } 9912 9913 static void ironlake_get_pfit_config(struct intel_crtc *crtc, 9914 struct intel_crtc_state *pipe_config) 9915 { 9916 struct drm_device *dev = crtc->base.dev; 9917 struct drm_i915_private *dev_priv = to_i915(dev); 9918 u32 tmp; 9919 9920 tmp = I915_READ(PF_CTL(crtc->pipe)); 9921 9922 if (tmp & PF_ENABLE) { 9923 pipe_config->pch_pfit.enabled = true; 9924 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 9925 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 9926 9927 /* We currently do not free assignements of panel fitters on 9928 * ivb/hsw (since we don't use the higher upscaling modes which 9929 * differentiates them) so just WARN about this case for now. */ 9930 if (IS_GEN(dev_priv, 7)) { 9931 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 9932 PF_PIPE_SEL_IVB(crtc->pipe)); 9933 } 9934 } 9935 } 9936 9937 static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 9938 struct intel_crtc_state *pipe_config) 9939 { 9940 struct drm_device *dev = crtc->base.dev; 9941 struct drm_i915_private *dev_priv = to_i915(dev); 9942 enum intel_display_power_domain power_domain; 9943 intel_wakeref_t wakeref; 9944 u32 tmp; 9945 bool ret; 9946 9947 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9948 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 9949 if (!wakeref) 9950 return false; 9951 9952 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 9953 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9954 pipe_config->shared_dpll = NULL; 9955 9956 ret = false; 9957 tmp = I915_READ(PIPECONF(crtc->pipe)); 9958 if (!(tmp & PIPECONF_ENABLE)) 9959 goto out; 9960 9961 switch (tmp & PIPECONF_BPC_MASK) { 9962 case PIPECONF_6BPC: 9963 pipe_config->pipe_bpp = 18; 9964 break; 9965 case PIPECONF_8BPC: 9966 pipe_config->pipe_bpp = 24; 9967 break; 9968 case PIPECONF_10BPC: 9969 pipe_config->pipe_bpp = 30; 9970 break; 9971 case PIPECONF_12BPC: 9972 pipe_config->pipe_bpp = 36; 9973 break; 9974 default: 9975 break; 9976 } 9977 9978 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 9979 pipe_config->limited_color_range = true; 9980 9981 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >> 9982 PIPECONF_GAMMA_MODE_SHIFT; 9983 9984 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe)); 9985 9986 i9xx_get_pipe_color_config(pipe_config); 9987 intel_color_get_config(pipe_config); 9988 9989 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 9990 struct intel_shared_dpll *pll; 9991 enum intel_dpll_id pll_id; 9992 9993 pipe_config->has_pch_encoder = true; 9994 9995 tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); 9996 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 9997 FDI_DP_PORT_WIDTH_SHIFT) + 1; 9998 9999 ironlake_get_fdi_m_n_config(crtc, pipe_config); 10000 10001 if (HAS_PCH_IBX(dev_priv)) { 10002 /* 10003 * The pipe->pch transcoder and pch transcoder->pll 10004 * mapping is fixed. 10005 */ 10006 pll_id = (enum intel_dpll_id) crtc->pipe; 10007 } else { 10008 tmp = I915_READ(PCH_DPLL_SEL); 10009 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 10010 pll_id = DPLL_ID_PCH_PLL_B; 10011 else 10012 pll_id= DPLL_ID_PCH_PLL_A; 10013 } 10014 10015 pipe_config->shared_dpll = 10016 intel_get_shared_dpll_by_id(dev_priv, pll_id); 10017 pll = pipe_config->shared_dpll; 10018 10019 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll, 10020 &pipe_config->dpll_hw_state)); 10021 10022 tmp = pipe_config->dpll_hw_state.dpll; 10023 pipe_config->pixel_multiplier = 10024 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 10025 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 10026 10027 ironlake_pch_clock_get(crtc, pipe_config); 10028 } else { 10029 pipe_config->pixel_multiplier = 1; 10030 } 10031 10032 intel_get_pipe_timings(crtc, pipe_config); 10033 intel_get_pipe_src_size(crtc, pipe_config); 10034 10035 ironlake_get_pfit_config(crtc, pipe_config); 10036 10037 ret = true; 10038 10039 out: 10040 intel_display_power_put(dev_priv, power_domain, wakeref); 10041 10042 return ret; 10043 } 10044 static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 10045 struct intel_crtc_state *crtc_state) 10046 { 10047 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10048 struct intel_atomic_state *state = 10049 to_intel_atomic_state(crtc_state->base.state); 10050 10051 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) || 10052 INTEL_GEN(dev_priv) >= 11) { 10053 struct intel_encoder *encoder = 10054 intel_get_crtc_new_encoder(state, crtc_state); 10055 10056 if (!intel_reserve_shared_dplls(state, crtc, encoder)) { 10057 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", 10058 pipe_name(crtc->pipe)); 10059 return -EINVAL; 10060 } 10061 } 10062 10063 return 0; 10064 } 10065 10066 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv, 10067 enum port port, 10068 struct intel_crtc_state *pipe_config) 10069 { 10070 enum intel_dpll_id id; 10071 u32 temp; 10072 10073 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); 10074 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); 10075 10076 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2)) 10077 return; 10078 10079 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10080 } 10081 10082 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv, 10083 enum port port, 10084 struct intel_crtc_state *pipe_config) 10085 { 10086 enum phy phy = intel_port_to_phy(dev_priv, port); 10087 enum icl_port_dpll_id port_dpll_id; 10088 enum intel_dpll_id id; 10089 u32 temp; 10090 10091 if (intel_phy_is_combo(dev_priv, phy)) { 10092 temp = I915_READ(ICL_DPCLKA_CFGCR0) & 10093 ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); 10094 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); 10095 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 10096 } else if (intel_phy_is_tc(dev_priv, phy)) { 10097 u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; 10098 10099 if (clk_sel == DDI_CLK_SEL_MG) { 10100 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, 10101 port)); 10102 port_dpll_id = ICL_PORT_DPLL_MG_PHY; 10103 } else { 10104 WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162); 10105 id = DPLL_ID_ICL_TBTPLL; 10106 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 10107 } 10108 } else { 10109 WARN(1, "Invalid port %x\n", port); 10110 return; 10111 } 10112 10113 pipe_config->icl_port_dplls[port_dpll_id].pll = 10114 intel_get_shared_dpll_by_id(dev_priv, id); 10115 10116 icl_set_active_port_dpll(pipe_config, port_dpll_id); 10117 } 10118 10119 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 10120 enum port port, 10121 struct intel_crtc_state *pipe_config) 10122 { 10123 enum intel_dpll_id id; 10124 10125 switch (port) { 10126 case PORT_A: 10127 id = DPLL_ID_SKL_DPLL0; 10128 break; 10129 case PORT_B: 10130 id = DPLL_ID_SKL_DPLL1; 10131 break; 10132 case PORT_C: 10133 id = DPLL_ID_SKL_DPLL2; 10134 break; 10135 default: 10136 DRM_ERROR("Incorrect port type\n"); 10137 return; 10138 } 10139 10140 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10141 } 10142 10143 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, 10144 enum port port, 10145 struct intel_crtc_state *pipe_config) 10146 { 10147 enum intel_dpll_id id; 10148 u32 temp; 10149 10150 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 10151 id = temp >> (port * 3 + 1); 10152 10153 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3)) 10154 return; 10155 10156 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10157 } 10158 10159 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, 10160 enum port port, 10161 struct intel_crtc_state *pipe_config) 10162 { 10163 enum intel_dpll_id id; 10164 u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 10165 10166 switch (ddi_pll_sel) { 10167 case PORT_CLK_SEL_WRPLL1: 10168 id = DPLL_ID_WRPLL1; 10169 break; 10170 case PORT_CLK_SEL_WRPLL2: 10171 id = DPLL_ID_WRPLL2; 10172 break; 10173 case PORT_CLK_SEL_SPLL: 10174 id = DPLL_ID_SPLL; 10175 break; 10176 case PORT_CLK_SEL_LCPLL_810: 10177 id = DPLL_ID_LCPLL_810; 10178 break; 10179 case PORT_CLK_SEL_LCPLL_1350: 10180 id = DPLL_ID_LCPLL_1350; 10181 break; 10182 case PORT_CLK_SEL_LCPLL_2700: 10183 id = DPLL_ID_LCPLL_2700; 10184 break; 10185 default: 10186 MISSING_CASE(ddi_pll_sel); 10187 /* fall through */ 10188 case PORT_CLK_SEL_NONE: 10189 return; 10190 } 10191 10192 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10193 } 10194 10195 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 10196 struct intel_crtc_state *pipe_config, 10197 u64 *power_domain_mask, 10198 intel_wakeref_t *wakerefs) 10199 { 10200 struct drm_device *dev = crtc->base.dev; 10201 struct drm_i915_private *dev_priv = to_i915(dev); 10202 enum intel_display_power_domain power_domain; 10203 unsigned long panel_transcoder_mask = 0; 10204 unsigned long enabled_panel_transcoders = 0; 10205 enum transcoder panel_transcoder; 10206 intel_wakeref_t wf; 10207 u32 tmp; 10208 10209 if (INTEL_GEN(dev_priv) >= 11) 10210 panel_transcoder_mask |= 10211 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 10212 10213 if (HAS_TRANSCODER_EDP(dev_priv)) 10214 panel_transcoder_mask |= BIT(TRANSCODER_EDP); 10215 10216 /* 10217 * The pipe->transcoder mapping is fixed with the exception of the eDP 10218 * and DSI transcoders handled below. 10219 */ 10220 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 10221 10222 /* 10223 * XXX: Do intel_display_power_get_if_enabled before reading this (for 10224 * consistency and less surprising code; it's in always on power). 10225 */ 10226 for_each_set_bit(panel_transcoder, 10227 &panel_transcoder_mask, 10228 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) { 10229 bool force_thru = false; 10230 enum pipe trans_pipe; 10231 10232 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder)); 10233 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 10234 continue; 10235 10236 /* 10237 * Log all enabled ones, only use the first one. 10238 * 10239 * FIXME: This won't work for two separate DSI displays. 10240 */ 10241 enabled_panel_transcoders |= BIT(panel_transcoder); 10242 if (enabled_panel_transcoders != BIT(panel_transcoder)) 10243 continue; 10244 10245 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 10246 default: 10247 WARN(1, "unknown pipe linked to transcoder %s\n", 10248 transcoder_name(panel_transcoder)); 10249 /* fall through */ 10250 case TRANS_DDI_EDP_INPUT_A_ONOFF: 10251 force_thru = true; 10252 /* fall through */ 10253 case TRANS_DDI_EDP_INPUT_A_ON: 10254 trans_pipe = PIPE_A; 10255 break; 10256 case TRANS_DDI_EDP_INPUT_B_ONOFF: 10257 trans_pipe = PIPE_B; 10258 break; 10259 case TRANS_DDI_EDP_INPUT_C_ONOFF: 10260 trans_pipe = PIPE_C; 10261 break; 10262 } 10263 10264 if (trans_pipe == crtc->pipe) { 10265 pipe_config->cpu_transcoder = panel_transcoder; 10266 pipe_config->pch_pfit.force_thru = force_thru; 10267 } 10268 } 10269 10270 /* 10271 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1 10272 */ 10273 WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) && 10274 enabled_panel_transcoders != BIT(TRANSCODER_EDP)); 10275 10276 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); 10277 WARN_ON(*power_domain_mask & BIT_ULL(power_domain)); 10278 10279 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10280 if (!wf) 10281 return false; 10282 10283 wakerefs[power_domain] = wf; 10284 *power_domain_mask |= BIT_ULL(power_domain); 10285 10286 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 10287 10288 return tmp & PIPECONF_ENABLE; 10289 } 10290 10291 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 10292 struct intel_crtc_state *pipe_config, 10293 u64 *power_domain_mask, 10294 intel_wakeref_t *wakerefs) 10295 { 10296 struct drm_device *dev = crtc->base.dev; 10297 struct drm_i915_private *dev_priv = to_i915(dev); 10298 enum intel_display_power_domain power_domain; 10299 enum transcoder cpu_transcoder; 10300 intel_wakeref_t wf; 10301 enum port port; 10302 u32 tmp; 10303 10304 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 10305 if (port == PORT_A) 10306 cpu_transcoder = TRANSCODER_DSI_A; 10307 else 10308 cpu_transcoder = TRANSCODER_DSI_C; 10309 10310 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 10311 WARN_ON(*power_domain_mask & BIT_ULL(power_domain)); 10312 10313 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10314 if (!wf) 10315 continue; 10316 10317 wakerefs[power_domain] = wf; 10318 *power_domain_mask |= BIT_ULL(power_domain); 10319 10320 /* 10321 * The PLL needs to be enabled with a valid divider 10322 * configuration, otherwise accessing DSI registers will hang 10323 * the machine. See BSpec North Display Engine 10324 * registers/MIPI[BXT]. We can break out here early, since we 10325 * need the same DSI PLL to be enabled for both DSI ports. 10326 */ 10327 if (!bxt_dsi_pll_is_enabled(dev_priv)) 10328 break; 10329 10330 /* XXX: this works for video mode only */ 10331 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port)); 10332 if (!(tmp & DPI_ENABLE)) 10333 continue; 10334 10335 tmp = I915_READ(MIPI_CTRL(port)); 10336 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 10337 continue; 10338 10339 pipe_config->cpu_transcoder = cpu_transcoder; 10340 break; 10341 } 10342 10343 return transcoder_is_dsi(pipe_config->cpu_transcoder); 10344 } 10345 10346 static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 10347 struct intel_crtc_state *pipe_config) 10348 { 10349 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10350 struct intel_shared_dpll *pll; 10351 enum port port; 10352 u32 tmp; 10353 10354 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 10355 10356 if (INTEL_GEN(dev_priv) >= 12) 10357 port = (tmp & TGL_TRANS_DDI_PORT_MASK) >> 10358 TGL_TRANS_DDI_PORT_SHIFT; 10359 else 10360 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 10361 10362 if (INTEL_GEN(dev_priv) >= 11) 10363 icelake_get_ddi_pll(dev_priv, port, pipe_config); 10364 else if (IS_CANNONLAKE(dev_priv)) 10365 cannonlake_get_ddi_pll(dev_priv, port, pipe_config); 10366 else if (IS_GEN9_BC(dev_priv)) 10367 skylake_get_ddi_pll(dev_priv, port, pipe_config); 10368 else if (IS_GEN9_LP(dev_priv)) 10369 bxt_get_ddi_pll(dev_priv, port, pipe_config); 10370 else 10371 haswell_get_ddi_pll(dev_priv, port, pipe_config); 10372 10373 pll = pipe_config->shared_dpll; 10374 if (pll) { 10375 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll, 10376 &pipe_config->dpll_hw_state)); 10377 } 10378 10379 /* 10380 * Haswell has only FDI/PCH transcoder A. It is which is connected to 10381 * DDI E. So just check whether this pipe is wired to DDI E and whether 10382 * the PCH transcoder is on. 10383 */ 10384 if (INTEL_GEN(dev_priv) < 9 && 10385 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 10386 pipe_config->has_pch_encoder = true; 10387 10388 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 10389 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 10390 FDI_DP_PORT_WIDTH_SHIFT) + 1; 10391 10392 ironlake_get_fdi_m_n_config(crtc, pipe_config); 10393 } 10394 } 10395 10396 static bool haswell_get_pipe_config(struct intel_crtc *crtc, 10397 struct intel_crtc_state *pipe_config) 10398 { 10399 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10400 intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf; 10401 enum intel_display_power_domain power_domain; 10402 u64 power_domain_mask; 10403 bool active; 10404 10405 intel_crtc_init_scalers(crtc, pipe_config); 10406 10407 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 10408 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10409 if (!wf) 10410 return false; 10411 10412 wakerefs[power_domain] = wf; 10413 power_domain_mask = BIT_ULL(power_domain); 10414 10415 pipe_config->shared_dpll = NULL; 10416 10417 active = hsw_get_transcoder_state(crtc, pipe_config, 10418 &power_domain_mask, wakerefs); 10419 10420 if (IS_GEN9_LP(dev_priv) && 10421 bxt_get_dsi_transcoder_state(crtc, pipe_config, 10422 &power_domain_mask, wakerefs)) { 10423 WARN_ON(active); 10424 active = true; 10425 } 10426 10427 if (!active) 10428 goto out; 10429 10430 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 10431 INTEL_GEN(dev_priv) >= 11) { 10432 haswell_get_ddi_port_state(crtc, pipe_config); 10433 intel_get_pipe_timings(crtc, pipe_config); 10434 } 10435 10436 intel_get_pipe_src_size(crtc, pipe_config); 10437 intel_get_crtc_ycbcr_config(crtc, pipe_config); 10438 10439 pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe)); 10440 10441 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe)); 10442 10443 if (INTEL_GEN(dev_priv) >= 9) { 10444 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe)); 10445 10446 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) 10447 pipe_config->gamma_enable = true; 10448 10449 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE) 10450 pipe_config->csc_enable = true; 10451 } else { 10452 i9xx_get_pipe_color_config(pipe_config); 10453 } 10454 10455 intel_color_get_config(pipe_config); 10456 10457 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 10458 WARN_ON(power_domain_mask & BIT_ULL(power_domain)); 10459 10460 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10461 if (wf) { 10462 wakerefs[power_domain] = wf; 10463 power_domain_mask |= BIT_ULL(power_domain); 10464 10465 if (INTEL_GEN(dev_priv) >= 9) 10466 skylake_get_pfit_config(crtc, pipe_config); 10467 else 10468 ironlake_get_pfit_config(crtc, pipe_config); 10469 } 10470 10471 if (hsw_crtc_supports_ips(crtc)) { 10472 if (IS_HASWELL(dev_priv)) 10473 pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE; 10474 else { 10475 /* 10476 * We cannot readout IPS state on broadwell, set to 10477 * true so we can set it to a defined state on first 10478 * commit. 10479 */ 10480 pipe_config->ips_enabled = true; 10481 } 10482 } 10483 10484 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 10485 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 10486 pipe_config->pixel_multiplier = 10487 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 10488 } else { 10489 pipe_config->pixel_multiplier = 1; 10490 } 10491 10492 out: 10493 for_each_power_domain(power_domain, power_domain_mask) 10494 intel_display_power_put(dev_priv, 10495 power_domain, wakerefs[power_domain]); 10496 10497 return active; 10498 } 10499 10500 static u32 intel_cursor_base(const struct intel_plane_state *plane_state) 10501 { 10502 struct drm_i915_private *dev_priv = 10503 to_i915(plane_state->base.plane->dev); 10504 const struct drm_framebuffer *fb = plane_state->base.fb; 10505 const struct drm_i915_gem_object *obj = intel_fb_obj(fb); 10506 u32 base; 10507 10508 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) 10509 base = obj->phys_handle->busaddr; 10510 else 10511 base = intel_plane_ggtt_offset(plane_state); 10512 10513 base += plane_state->color_plane[0].offset; 10514 10515 /* ILK+ do this automagically */ 10516 if (HAS_GMCH(dev_priv) && 10517 plane_state->base.rotation & DRM_MODE_ROTATE_180) 10518 base += (plane_state->base.crtc_h * 10519 plane_state->base.crtc_w - 1) * fb->format->cpp[0]; 10520 10521 return base; 10522 } 10523 10524 static u32 intel_cursor_position(const struct intel_plane_state *plane_state) 10525 { 10526 int x = plane_state->base.crtc_x; 10527 int y = plane_state->base.crtc_y; 10528 u32 pos = 0; 10529 10530 if (x < 0) { 10531 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 10532 x = -x; 10533 } 10534 pos |= x << CURSOR_X_SHIFT; 10535 10536 if (y < 0) { 10537 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 10538 y = -y; 10539 } 10540 pos |= y << CURSOR_Y_SHIFT; 10541 10542 return pos; 10543 } 10544 10545 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) 10546 { 10547 const struct drm_mode_config *config = 10548 &plane_state->base.plane->dev->mode_config; 10549 int width = plane_state->base.crtc_w; 10550 int height = plane_state->base.crtc_h; 10551 10552 return width > 0 && width <= config->cursor_width && 10553 height > 0 && height <= config->cursor_height; 10554 } 10555 10556 static int intel_cursor_check_surface(struct intel_plane_state *plane_state) 10557 { 10558 int src_x, src_y; 10559 u32 offset; 10560 int ret; 10561 10562 ret = intel_plane_compute_gtt(plane_state); 10563 if (ret) 10564 return ret; 10565 10566 if (!plane_state->base.visible) 10567 return 0; 10568 10569 src_x = plane_state->base.src_x >> 16; 10570 src_y = plane_state->base.src_y >> 16; 10571 10572 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 10573 offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 10574 plane_state, 0); 10575 10576 if (src_x != 0 || src_y != 0) { 10577 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n"); 10578 return -EINVAL; 10579 } 10580 10581 plane_state->color_plane[0].offset = offset; 10582 10583 return 0; 10584 } 10585 10586 static int intel_check_cursor(struct intel_crtc_state *crtc_state, 10587 struct intel_plane_state *plane_state) 10588 { 10589 const struct drm_framebuffer *fb = plane_state->base.fb; 10590 int ret; 10591 10592 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) { 10593 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 10594 return -EINVAL; 10595 } 10596 10597 ret = drm_atomic_helper_check_plane_state(&plane_state->base, 10598 &crtc_state->base, 10599 DRM_PLANE_HELPER_NO_SCALING, 10600 DRM_PLANE_HELPER_NO_SCALING, 10601 true, true); 10602 if (ret) 10603 return ret; 10604 10605 ret = intel_cursor_check_surface(plane_state); 10606 if (ret) 10607 return ret; 10608 10609 if (!plane_state->base.visible) 10610 return 0; 10611 10612 ret = intel_plane_check_src_coordinates(plane_state); 10613 if (ret) 10614 return ret; 10615 10616 return 0; 10617 } 10618 10619 static unsigned int 10620 i845_cursor_max_stride(struct intel_plane *plane, 10621 u32 pixel_format, u64 modifier, 10622 unsigned int rotation) 10623 { 10624 return 2048; 10625 } 10626 10627 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 10628 { 10629 u32 cntl = 0; 10630 10631 if (crtc_state->gamma_enable) 10632 cntl |= CURSOR_GAMMA_ENABLE; 10633 10634 return cntl; 10635 } 10636 10637 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, 10638 const struct intel_plane_state *plane_state) 10639 { 10640 return CURSOR_ENABLE | 10641 CURSOR_FORMAT_ARGB | 10642 CURSOR_STRIDE(plane_state->color_plane[0].stride); 10643 } 10644 10645 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) 10646 { 10647 int width = plane_state->base.crtc_w; 10648 10649 /* 10650 * 845g/865g are only limited by the width of their cursors, 10651 * the height is arbitrary up to the precision of the register. 10652 */ 10653 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64); 10654 } 10655 10656 static int i845_check_cursor(struct intel_crtc_state *crtc_state, 10657 struct intel_plane_state *plane_state) 10658 { 10659 const struct drm_framebuffer *fb = plane_state->base.fb; 10660 int ret; 10661 10662 ret = intel_check_cursor(crtc_state, plane_state); 10663 if (ret) 10664 return ret; 10665 10666 /* if we want to turn off the cursor ignore width and height */ 10667 if (!fb) 10668 return 0; 10669 10670 /* Check for which cursor types we support */ 10671 if (!i845_cursor_size_ok(plane_state)) { 10672 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 10673 plane_state->base.crtc_w, 10674 plane_state->base.crtc_h); 10675 return -EINVAL; 10676 } 10677 10678 WARN_ON(plane_state->base.visible && 10679 plane_state->color_plane[0].stride != fb->pitches[0]); 10680 10681 switch (fb->pitches[0]) { 10682 case 256: 10683 case 512: 10684 case 1024: 10685 case 2048: 10686 break; 10687 default: 10688 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n", 10689 fb->pitches[0]); 10690 return -EINVAL; 10691 } 10692 10693 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state); 10694 10695 return 0; 10696 } 10697 10698 static void i845_update_cursor(struct intel_plane *plane, 10699 const struct intel_crtc_state *crtc_state, 10700 const struct intel_plane_state *plane_state) 10701 { 10702 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 10703 u32 cntl = 0, base = 0, pos = 0, size = 0; 10704 unsigned long irqflags; 10705 10706 if (plane_state && plane_state->base.visible) { 10707 unsigned int width = plane_state->base.crtc_w; 10708 unsigned int height = plane_state->base.crtc_h; 10709 10710 cntl = plane_state->ctl | 10711 i845_cursor_ctl_crtc(crtc_state); 10712 10713 size = (height << 12) | width; 10714 10715 base = intel_cursor_base(plane_state); 10716 pos = intel_cursor_position(plane_state); 10717 } 10718 10719 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 10720 10721 /* On these chipsets we can only modify the base/size/stride 10722 * whilst the cursor is disabled. 10723 */ 10724 if (plane->cursor.base != base || 10725 plane->cursor.size != size || 10726 plane->cursor.cntl != cntl) { 10727 I915_WRITE_FW(CURCNTR(PIPE_A), 0); 10728 I915_WRITE_FW(CURBASE(PIPE_A), base); 10729 I915_WRITE_FW(CURSIZE, size); 10730 I915_WRITE_FW(CURPOS(PIPE_A), pos); 10731 I915_WRITE_FW(CURCNTR(PIPE_A), cntl); 10732 10733 plane->cursor.base = base; 10734 plane->cursor.size = size; 10735 plane->cursor.cntl = cntl; 10736 } else { 10737 I915_WRITE_FW(CURPOS(PIPE_A), pos); 10738 } 10739 10740 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 10741 } 10742 10743 static void i845_disable_cursor(struct intel_plane *plane, 10744 const struct intel_crtc_state *crtc_state) 10745 { 10746 i845_update_cursor(plane, crtc_state, NULL); 10747 } 10748 10749 static bool i845_cursor_get_hw_state(struct intel_plane *plane, 10750 enum pipe *pipe) 10751 { 10752 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 10753 enum intel_display_power_domain power_domain; 10754 intel_wakeref_t wakeref; 10755 bool ret; 10756 10757 power_domain = POWER_DOMAIN_PIPE(PIPE_A); 10758 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 10759 if (!wakeref) 10760 return false; 10761 10762 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 10763 10764 *pipe = PIPE_A; 10765 10766 intel_display_power_put(dev_priv, power_domain, wakeref); 10767 10768 return ret; 10769 } 10770 10771 static unsigned int 10772 i9xx_cursor_max_stride(struct intel_plane *plane, 10773 u32 pixel_format, u64 modifier, 10774 unsigned int rotation) 10775 { 10776 return plane->base.dev->mode_config.cursor_width * 4; 10777 } 10778 10779 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 10780 { 10781 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 10782 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10783 u32 cntl = 0; 10784 10785 if (INTEL_GEN(dev_priv) >= 11) 10786 return cntl; 10787 10788 if (crtc_state->gamma_enable) 10789 cntl = MCURSOR_GAMMA_ENABLE; 10790 10791 if (crtc_state->csc_enable) 10792 cntl |= MCURSOR_PIPE_CSC_ENABLE; 10793 10794 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 10795 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); 10796 10797 return cntl; 10798 } 10799 10800 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, 10801 const struct intel_plane_state *plane_state) 10802 { 10803 struct drm_i915_private *dev_priv = 10804 to_i915(plane_state->base.plane->dev); 10805 u32 cntl = 0; 10806 10807 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 10808 cntl |= MCURSOR_TRICKLE_FEED_DISABLE; 10809 10810 switch (plane_state->base.crtc_w) { 10811 case 64: 10812 cntl |= MCURSOR_MODE_64_ARGB_AX; 10813 break; 10814 case 128: 10815 cntl |= MCURSOR_MODE_128_ARGB_AX; 10816 break; 10817 case 256: 10818 cntl |= MCURSOR_MODE_256_ARGB_AX; 10819 break; 10820 default: 10821 MISSING_CASE(plane_state->base.crtc_w); 10822 return 0; 10823 } 10824 10825 if (plane_state->base.rotation & DRM_MODE_ROTATE_180) 10826 cntl |= MCURSOR_ROTATE_180; 10827 10828 return cntl; 10829 } 10830 10831 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) 10832 { 10833 struct drm_i915_private *dev_priv = 10834 to_i915(plane_state->base.plane->dev); 10835 int width = plane_state->base.crtc_w; 10836 int height = plane_state->base.crtc_h; 10837 10838 if (!intel_cursor_size_ok(plane_state)) 10839 return false; 10840 10841 /* Cursor width is limited to a few power-of-two sizes */ 10842 switch (width) { 10843 case 256: 10844 case 128: 10845 case 64: 10846 break; 10847 default: 10848 return false; 10849 } 10850 10851 /* 10852 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor 10853 * height from 8 lines up to the cursor width, when the 10854 * cursor is not rotated. Everything else requires square 10855 * cursors. 10856 */ 10857 if (HAS_CUR_FBC(dev_priv) && 10858 plane_state->base.rotation & DRM_MODE_ROTATE_0) { 10859 if (height < 8 || height > width) 10860 return false; 10861 } else { 10862 if (height != width) 10863 return false; 10864 } 10865 10866 return true; 10867 } 10868 10869 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, 10870 struct intel_plane_state *plane_state) 10871 { 10872 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 10873 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 10874 const struct drm_framebuffer *fb = plane_state->base.fb; 10875 enum pipe pipe = plane->pipe; 10876 int ret; 10877 10878 ret = intel_check_cursor(crtc_state, plane_state); 10879 if (ret) 10880 return ret; 10881 10882 /* if we want to turn off the cursor ignore width and height */ 10883 if (!fb) 10884 return 0; 10885 10886 /* Check for which cursor types we support */ 10887 if (!i9xx_cursor_size_ok(plane_state)) { 10888 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 10889 plane_state->base.crtc_w, 10890 plane_state->base.crtc_h); 10891 return -EINVAL; 10892 } 10893 10894 WARN_ON(plane_state->base.visible && 10895 plane_state->color_plane[0].stride != fb->pitches[0]); 10896 10897 if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) { 10898 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n", 10899 fb->pitches[0], plane_state->base.crtc_w); 10900 return -EINVAL; 10901 } 10902 10903 /* 10904 * There's something wrong with the cursor on CHV pipe C. 10905 * If it straddles the left edge of the screen then 10906 * moving it away from the edge or disabling it often 10907 * results in a pipe underrun, and often that can lead to 10908 * dead pipe (constant underrun reported, and it scans 10909 * out just a solid color). To recover from that, the 10910 * display power well must be turned off and on again. 10911 * Refuse the put the cursor into that compromised position. 10912 */ 10913 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && 10914 plane_state->base.visible && plane_state->base.crtc_x < 0) { 10915 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); 10916 return -EINVAL; 10917 } 10918 10919 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state); 10920 10921 return 0; 10922 } 10923 10924 static void i9xx_update_cursor(struct intel_plane *plane, 10925 const struct intel_crtc_state *crtc_state, 10926 const struct intel_plane_state *plane_state) 10927 { 10928 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 10929 enum pipe pipe = plane->pipe; 10930 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0; 10931 unsigned long irqflags; 10932 10933 if (plane_state && plane_state->base.visible) { 10934 cntl = plane_state->ctl | 10935 i9xx_cursor_ctl_crtc(crtc_state); 10936 10937 if (plane_state->base.crtc_h != plane_state->base.crtc_w) 10938 fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1); 10939 10940 base = intel_cursor_base(plane_state); 10941 pos = intel_cursor_position(plane_state); 10942 } 10943 10944 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 10945 10946 /* 10947 * On some platforms writing CURCNTR first will also 10948 * cause CURPOS to be armed by the CURBASE write. 10949 * Without the CURCNTR write the CURPOS write would 10950 * arm itself. Thus we always update CURCNTR before 10951 * CURPOS. 10952 * 10953 * On other platforms CURPOS always requires the 10954 * CURBASE write to arm the update. Additonally 10955 * a write to any of the cursor register will cancel 10956 * an already armed cursor update. Thus leaving out 10957 * the CURBASE write after CURPOS could lead to a 10958 * cursor that doesn't appear to move, or even change 10959 * shape. Thus we always write CURBASE. 10960 * 10961 * The other registers are armed by by the CURBASE write 10962 * except when the plane is getting enabled at which time 10963 * the CURCNTR write arms the update. 10964 */ 10965 10966 if (INTEL_GEN(dev_priv) >= 9) 10967 skl_write_cursor_wm(plane, crtc_state); 10968 10969 if (plane->cursor.base != base || 10970 plane->cursor.size != fbc_ctl || 10971 plane->cursor.cntl != cntl) { 10972 if (HAS_CUR_FBC(dev_priv)) 10973 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl); 10974 I915_WRITE_FW(CURCNTR(pipe), cntl); 10975 I915_WRITE_FW(CURPOS(pipe), pos); 10976 I915_WRITE_FW(CURBASE(pipe), base); 10977 10978 plane->cursor.base = base; 10979 plane->cursor.size = fbc_ctl; 10980 plane->cursor.cntl = cntl; 10981 } else { 10982 I915_WRITE_FW(CURPOS(pipe), pos); 10983 I915_WRITE_FW(CURBASE(pipe), base); 10984 } 10985 10986 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 10987 } 10988 10989 static void i9xx_disable_cursor(struct intel_plane *plane, 10990 const struct intel_crtc_state *crtc_state) 10991 { 10992 i9xx_update_cursor(plane, crtc_state, NULL); 10993 } 10994 10995 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, 10996 enum pipe *pipe) 10997 { 10998 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 10999 enum intel_display_power_domain power_domain; 11000 intel_wakeref_t wakeref; 11001 bool ret; 11002 u32 val; 11003 11004 /* 11005 * Not 100% correct for planes that can move between pipes, 11006 * but that's only the case for gen2-3 which don't have any 11007 * display power wells. 11008 */ 11009 power_domain = POWER_DOMAIN_PIPE(plane->pipe); 11010 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 11011 if (!wakeref) 11012 return false; 11013 11014 val = I915_READ(CURCNTR(plane->pipe)); 11015 11016 ret = val & MCURSOR_MODE; 11017 11018 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 11019 *pipe = plane->pipe; 11020 else 11021 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >> 11022 MCURSOR_PIPE_SELECT_SHIFT; 11023 11024 intel_display_power_put(dev_priv, power_domain, wakeref); 11025 11026 return ret; 11027 } 11028 11029 /* VESA 640x480x72Hz mode to set on the pipe */ 11030 static const struct drm_display_mode load_detect_mode = { 11031 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 11032 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 11033 }; 11034 11035 struct drm_framebuffer * 11036 intel_framebuffer_create(struct drm_i915_gem_object *obj, 11037 struct drm_mode_fb_cmd2 *mode_cmd) 11038 { 11039 struct intel_framebuffer *intel_fb; 11040 int ret; 11041 11042 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 11043 if (!intel_fb) 11044 return ERR_PTR(-ENOMEM); 11045 11046 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd); 11047 if (ret) 11048 goto err; 11049 11050 return &intel_fb->base; 11051 11052 err: 11053 kfree(intel_fb); 11054 return ERR_PTR(ret); 11055 } 11056 11057 static int intel_modeset_disable_planes(struct drm_atomic_state *state, 11058 struct drm_crtc *crtc) 11059 { 11060 struct drm_plane *plane; 11061 struct drm_plane_state *plane_state; 11062 int ret, i; 11063 11064 ret = drm_atomic_add_affected_planes(state, crtc); 11065 if (ret) 11066 return ret; 11067 11068 for_each_new_plane_in_state(state, plane, plane_state, i) { 11069 if (plane_state->crtc != crtc) 11070 continue; 11071 11072 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); 11073 if (ret) 11074 return ret; 11075 11076 drm_atomic_set_fb_for_plane(plane_state, NULL); 11077 } 11078 11079 return 0; 11080 } 11081 11082 int intel_get_load_detect_pipe(struct drm_connector *connector, 11083 const struct drm_display_mode *mode, 11084 struct intel_load_detect_pipe *old, 11085 struct drm_modeset_acquire_ctx *ctx) 11086 { 11087 struct intel_crtc *intel_crtc; 11088 struct intel_encoder *intel_encoder = 11089 intel_attached_encoder(connector); 11090 struct drm_crtc *possible_crtc; 11091 struct drm_encoder *encoder = &intel_encoder->base; 11092 struct drm_crtc *crtc = NULL; 11093 struct drm_device *dev = encoder->dev; 11094 struct drm_i915_private *dev_priv = to_i915(dev); 11095 struct drm_mode_config *config = &dev->mode_config; 11096 struct drm_atomic_state *state = NULL, *restore_state = NULL; 11097 struct drm_connector_state *connector_state; 11098 struct intel_crtc_state *crtc_state; 11099 int ret, i = -1; 11100 11101 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 11102 connector->base.id, connector->name, 11103 encoder->base.id, encoder->name); 11104 11105 old->restore_state = NULL; 11106 11107 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); 11108 11109 /* 11110 * Algorithm gets a little messy: 11111 * 11112 * - if the connector already has an assigned crtc, use it (but make 11113 * sure it's on first) 11114 * 11115 * - try to find the first unused crtc that can drive this connector, 11116 * and use that if we find one 11117 */ 11118 11119 /* See if we already have a CRTC for this connector */ 11120 if (connector->state->crtc) { 11121 crtc = connector->state->crtc; 11122 11123 ret = drm_modeset_lock(&crtc->mutex, ctx); 11124 if (ret) 11125 goto fail; 11126 11127 /* Make sure the crtc and connector are running */ 11128 goto found; 11129 } 11130 11131 /* Find an unused one (if possible) */ 11132 for_each_crtc(dev, possible_crtc) { 11133 i++; 11134 if (!(encoder->possible_crtcs & (1 << i))) 11135 continue; 11136 11137 ret = drm_modeset_lock(&possible_crtc->mutex, ctx); 11138 if (ret) 11139 goto fail; 11140 11141 if (possible_crtc->state->enable) { 11142 drm_modeset_unlock(&possible_crtc->mutex); 11143 continue; 11144 } 11145 11146 crtc = possible_crtc; 11147 break; 11148 } 11149 11150 /* 11151 * If we didn't find an unused CRTC, don't use any. 11152 */ 11153 if (!crtc) { 11154 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 11155 ret = -ENODEV; 11156 goto fail; 11157 } 11158 11159 found: 11160 intel_crtc = to_intel_crtc(crtc); 11161 11162 state = drm_atomic_state_alloc(dev); 11163 restore_state = drm_atomic_state_alloc(dev); 11164 if (!state || !restore_state) { 11165 ret = -ENOMEM; 11166 goto fail; 11167 } 11168 11169 state->acquire_ctx = ctx; 11170 restore_state->acquire_ctx = ctx; 11171 11172 connector_state = drm_atomic_get_connector_state(state, connector); 11173 if (IS_ERR(connector_state)) { 11174 ret = PTR_ERR(connector_state); 11175 goto fail; 11176 } 11177 11178 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc); 11179 if (ret) 11180 goto fail; 11181 11182 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 11183 if (IS_ERR(crtc_state)) { 11184 ret = PTR_ERR(crtc_state); 11185 goto fail; 11186 } 11187 11188 crtc_state->base.active = crtc_state->base.enable = true; 11189 11190 if (!mode) 11191 mode = &load_detect_mode; 11192 11193 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode); 11194 if (ret) 11195 goto fail; 11196 11197 ret = intel_modeset_disable_planes(state, crtc); 11198 if (ret) 11199 goto fail; 11200 11201 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); 11202 if (!ret) 11203 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc)); 11204 if (!ret) 11205 ret = drm_atomic_add_affected_planes(restore_state, crtc); 11206 if (ret) { 11207 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret); 11208 goto fail; 11209 } 11210 11211 ret = drm_atomic_commit(state); 11212 if (ret) { 11213 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 11214 goto fail; 11215 } 11216 11217 old->restore_state = restore_state; 11218 drm_atomic_state_put(state); 11219 11220 /* let the connector get through one full cycle before testing */ 11221 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 11222 return true; 11223 11224 fail: 11225 if (state) { 11226 drm_atomic_state_put(state); 11227 state = NULL; 11228 } 11229 if (restore_state) { 11230 drm_atomic_state_put(restore_state); 11231 restore_state = NULL; 11232 } 11233 11234 if (ret == -EDEADLK) 11235 return ret; 11236 11237 return false; 11238 } 11239 11240 void intel_release_load_detect_pipe(struct drm_connector *connector, 11241 struct intel_load_detect_pipe *old, 11242 struct drm_modeset_acquire_ctx *ctx) 11243 { 11244 struct intel_encoder *intel_encoder = 11245 intel_attached_encoder(connector); 11246 struct drm_encoder *encoder = &intel_encoder->base; 11247 struct drm_atomic_state *state = old->restore_state; 11248 int ret; 11249 11250 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 11251 connector->base.id, connector->name, 11252 encoder->base.id, encoder->name); 11253 11254 if (!state) 11255 return; 11256 11257 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 11258 if (ret) 11259 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret); 11260 drm_atomic_state_put(state); 11261 } 11262 11263 static int i9xx_pll_refclk(struct drm_device *dev, 11264 const struct intel_crtc_state *pipe_config) 11265 { 11266 struct drm_i915_private *dev_priv = to_i915(dev); 11267 u32 dpll = pipe_config->dpll_hw_state.dpll; 11268 11269 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 11270 return dev_priv->vbt.lvds_ssc_freq; 11271 else if (HAS_PCH_SPLIT(dev_priv)) 11272 return 120000; 11273 else if (!IS_GEN(dev_priv, 2)) 11274 return 96000; 11275 else 11276 return 48000; 11277 } 11278 11279 /* Returns the clock of the currently programmed mode of the given pipe. */ 11280 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 11281 struct intel_crtc_state *pipe_config) 11282 { 11283 struct drm_device *dev = crtc->base.dev; 11284 struct drm_i915_private *dev_priv = to_i915(dev); 11285 int pipe = pipe_config->cpu_transcoder; 11286 u32 dpll = pipe_config->dpll_hw_state.dpll; 11287 u32 fp; 11288 struct dpll clock; 11289 int port_clock; 11290 int refclk = i9xx_pll_refclk(dev, pipe_config); 11291 11292 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 11293 fp = pipe_config->dpll_hw_state.fp0; 11294 else 11295 fp = pipe_config->dpll_hw_state.fp1; 11296 11297 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 11298 if (IS_PINEVIEW(dev_priv)) { 11299 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 11300 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 11301 } else { 11302 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 11303 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 11304 } 11305 11306 if (!IS_GEN(dev_priv, 2)) { 11307 if (IS_PINEVIEW(dev_priv)) 11308 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 11309 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 11310 else 11311 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 11312 DPLL_FPA01_P1_POST_DIV_SHIFT); 11313 11314 switch (dpll & DPLL_MODE_MASK) { 11315 case DPLLB_MODE_DAC_SERIAL: 11316 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 11317 5 : 10; 11318 break; 11319 case DPLLB_MODE_LVDS: 11320 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 11321 7 : 14; 11322 break; 11323 default: 11324 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 11325 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 11326 return; 11327 } 11328 11329 if (IS_PINEVIEW(dev_priv)) 11330 port_clock = pnv_calc_dpll_params(refclk, &clock); 11331 else 11332 port_clock = i9xx_calc_dpll_params(refclk, &clock); 11333 } else { 11334 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS); 11335 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 11336 11337 if (is_lvds) { 11338 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 11339 DPLL_FPA01_P1_POST_DIV_SHIFT); 11340 11341 if (lvds & LVDS_CLKB_POWER_UP) 11342 clock.p2 = 7; 11343 else 11344 clock.p2 = 14; 11345 } else { 11346 if (dpll & PLL_P1_DIVIDE_BY_TWO) 11347 clock.p1 = 2; 11348 else { 11349 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 11350 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 11351 } 11352 if (dpll & PLL_P2_DIVIDE_BY_4) 11353 clock.p2 = 4; 11354 else 11355 clock.p2 = 2; 11356 } 11357 11358 port_clock = i9xx_calc_dpll_params(refclk, &clock); 11359 } 11360 11361 /* 11362 * This value includes pixel_multiplier. We will use 11363 * port_clock to compute adjusted_mode.crtc_clock in the 11364 * encoder's get_config() function. 11365 */ 11366 pipe_config->port_clock = port_clock; 11367 } 11368 11369 int intel_dotclock_calculate(int link_freq, 11370 const struct intel_link_m_n *m_n) 11371 { 11372 /* 11373 * The calculation for the data clock is: 11374 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 11375 * But we want to avoid losing precison if possible, so: 11376 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 11377 * 11378 * and the link clock is simpler: 11379 * link_clock = (m * link_clock) / n 11380 */ 11381 11382 if (!m_n->link_n) 11383 return 0; 11384 11385 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); 11386 } 11387 11388 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 11389 struct intel_crtc_state *pipe_config) 11390 { 11391 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11392 11393 /* read out port_clock from the DPLL */ 11394 i9xx_crtc_clock_get(crtc, pipe_config); 11395 11396 /* 11397 * In case there is an active pipe without active ports, 11398 * we may need some idea for the dotclock anyway. 11399 * Calculate one based on the FDI configuration. 11400 */ 11401 pipe_config->base.adjusted_mode.crtc_clock = 11402 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 11403 &pipe_config->fdi_m_n); 11404 } 11405 11406 /* Returns the currently programmed mode of the given encoder. */ 11407 struct drm_display_mode * 11408 intel_encoder_current_mode(struct intel_encoder *encoder) 11409 { 11410 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 11411 struct intel_crtc_state *crtc_state; 11412 struct drm_display_mode *mode; 11413 struct intel_crtc *crtc; 11414 enum pipe pipe; 11415 11416 if (!encoder->get_hw_state(encoder, &pipe)) 11417 return NULL; 11418 11419 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 11420 11421 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 11422 if (!mode) 11423 return NULL; 11424 11425 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 11426 if (!crtc_state) { 11427 kfree(mode); 11428 return NULL; 11429 } 11430 11431 crtc_state->base.crtc = &crtc->base; 11432 11433 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) { 11434 kfree(crtc_state); 11435 kfree(mode); 11436 return NULL; 11437 } 11438 11439 encoder->get_config(encoder, crtc_state); 11440 11441 intel_mode_from_pipe_config(mode, crtc_state); 11442 11443 kfree(crtc_state); 11444 11445 return mode; 11446 } 11447 11448 static void intel_crtc_destroy(struct drm_crtc *crtc) 11449 { 11450 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11451 11452 drm_crtc_cleanup(crtc); 11453 kfree(intel_crtc); 11454 } 11455 11456 /** 11457 * intel_wm_need_update - Check whether watermarks need updating 11458 * @cur: current plane state 11459 * @new: new plane state 11460 * 11461 * Check current plane state versus the new one to determine whether 11462 * watermarks need to be recalculated. 11463 * 11464 * Returns true or false. 11465 */ 11466 static bool intel_wm_need_update(const struct intel_plane_state *cur, 11467 struct intel_plane_state *new) 11468 { 11469 /* Update watermarks on tiling or size changes. */ 11470 if (new->base.visible != cur->base.visible) 11471 return true; 11472 11473 if (!cur->base.fb || !new->base.fb) 11474 return false; 11475 11476 if (cur->base.fb->modifier != new->base.fb->modifier || 11477 cur->base.rotation != new->base.rotation || 11478 drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) || 11479 drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) || 11480 drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) || 11481 drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst)) 11482 return true; 11483 11484 return false; 11485 } 11486 11487 static bool needs_scaling(const struct intel_plane_state *state) 11488 { 11489 int src_w = drm_rect_width(&state->base.src) >> 16; 11490 int src_h = drm_rect_height(&state->base.src) >> 16; 11491 int dst_w = drm_rect_width(&state->base.dst); 11492 int dst_h = drm_rect_height(&state->base.dst); 11493 11494 return (src_w != dst_w || src_h != dst_h); 11495 } 11496 11497 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, 11498 struct intel_crtc_state *crtc_state, 11499 const struct intel_plane_state *old_plane_state, 11500 struct intel_plane_state *plane_state) 11501 { 11502 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 11503 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 11504 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11505 bool mode_changed = needs_modeset(crtc_state); 11506 bool was_crtc_enabled = old_crtc_state->base.active; 11507 bool is_crtc_enabled = crtc_state->base.active; 11508 bool turn_off, turn_on, visible, was_visible; 11509 struct drm_framebuffer *fb = plane_state->base.fb; 11510 int ret; 11511 11512 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { 11513 ret = skl_update_scaler_plane(crtc_state, plane_state); 11514 if (ret) 11515 return ret; 11516 } 11517 11518 was_visible = old_plane_state->base.visible; 11519 visible = plane_state->base.visible; 11520 11521 if (!was_crtc_enabled && WARN_ON(was_visible)) 11522 was_visible = false; 11523 11524 /* 11525 * Visibility is calculated as if the crtc was on, but 11526 * after scaler setup everything depends on it being off 11527 * when the crtc isn't active. 11528 * 11529 * FIXME this is wrong for watermarks. Watermarks should also 11530 * be computed as if the pipe would be active. Perhaps move 11531 * per-plane wm computation to the .check_plane() hook, and 11532 * only combine the results from all planes in the current place? 11533 */ 11534 if (!is_crtc_enabled) { 11535 plane_state->base.visible = visible = false; 11536 crtc_state->active_planes &= ~BIT(plane->id); 11537 crtc_state->data_rate[plane->id] = 0; 11538 } 11539 11540 if (!was_visible && !visible) 11541 return 0; 11542 11543 if (fb != old_plane_state->base.fb) 11544 crtc_state->fb_changed = true; 11545 11546 turn_off = was_visible && (!visible || mode_changed); 11547 turn_on = visible && (!was_visible || mode_changed); 11548 11549 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n", 11550 crtc->base.base.id, crtc->base.name, 11551 plane->base.base.id, plane->base.name, 11552 fb ? fb->base.id : -1); 11553 11554 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", 11555 plane->base.base.id, plane->base.name, 11556 was_visible, visible, 11557 turn_off, turn_on, mode_changed); 11558 11559 if (turn_on) { 11560 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 11561 crtc_state->update_wm_pre = true; 11562 11563 /* must disable cxsr around plane enable/disable */ 11564 if (plane->id != PLANE_CURSOR) 11565 crtc_state->disable_cxsr = true; 11566 } else if (turn_off) { 11567 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 11568 crtc_state->update_wm_post = true; 11569 11570 /* must disable cxsr around plane enable/disable */ 11571 if (plane->id != PLANE_CURSOR) 11572 crtc_state->disable_cxsr = true; 11573 } else if (intel_wm_need_update(old_plane_state, plane_state)) { 11574 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) { 11575 /* FIXME bollocks */ 11576 crtc_state->update_wm_pre = true; 11577 crtc_state->update_wm_post = true; 11578 } 11579 } 11580 11581 if (visible || was_visible) 11582 crtc_state->fb_bits |= plane->frontbuffer_bit; 11583 11584 /* 11585 * ILK/SNB DVSACNTR/Sprite Enable 11586 * IVB SPR_CTL/Sprite Enable 11587 * "When in Self Refresh Big FIFO mode, a write to enable the 11588 * plane will be internally buffered and delayed while Big FIFO 11589 * mode is exiting." 11590 * 11591 * Which means that enabling the sprite can take an extra frame 11592 * when we start in big FIFO mode (LP1+). Thus we need to drop 11593 * down to LP0 and wait for vblank in order to make sure the 11594 * sprite gets enabled on the next vblank after the register write. 11595 * Doing otherwise would risk enabling the sprite one frame after 11596 * we've already signalled flip completion. We can resume LP1+ 11597 * once the sprite has been enabled. 11598 * 11599 * 11600 * WaCxSRDisabledForSpriteScaling:ivb 11601 * IVB SPR_SCALE/Scaling Enable 11602 * "Low Power watermarks must be disabled for at least one 11603 * frame before enabling sprite scaling, and kept disabled 11604 * until sprite scaling is disabled." 11605 * 11606 * ILK/SNB DVSASCALE/Scaling Enable 11607 * "When in Self Refresh Big FIFO mode, scaling enable will be 11608 * masked off while Big FIFO mode is exiting." 11609 * 11610 * Despite the w/a only being listed for IVB we assume that 11611 * the ILK/SNB note has similar ramifications, hence we apply 11612 * the w/a on all three platforms. 11613 * 11614 * With experimental results seems this is needed also for primary 11615 * plane, not only sprite plane. 11616 */ 11617 if (plane->id != PLANE_CURSOR && 11618 (IS_GEN_RANGE(dev_priv, 5, 6) || 11619 IS_IVYBRIDGE(dev_priv)) && 11620 (turn_on || (!needs_scaling(old_plane_state) && 11621 needs_scaling(plane_state)))) 11622 crtc_state->disable_lp_wm = true; 11623 11624 return 0; 11625 } 11626 11627 static bool encoders_cloneable(const struct intel_encoder *a, 11628 const struct intel_encoder *b) 11629 { 11630 /* masks could be asymmetric, so check both ways */ 11631 return a == b || (a->cloneable & (1 << b->type) && 11632 b->cloneable & (1 << a->type)); 11633 } 11634 11635 static bool check_single_encoder_cloning(struct drm_atomic_state *state, 11636 struct intel_crtc *crtc, 11637 struct intel_encoder *encoder) 11638 { 11639 struct intel_encoder *source_encoder; 11640 struct drm_connector *connector; 11641 struct drm_connector_state *connector_state; 11642 int i; 11643 11644 for_each_new_connector_in_state(state, connector, connector_state, i) { 11645 if (connector_state->crtc != &crtc->base) 11646 continue; 11647 11648 source_encoder = 11649 to_intel_encoder(connector_state->best_encoder); 11650 if (!encoders_cloneable(encoder, source_encoder)) 11651 return false; 11652 } 11653 11654 return true; 11655 } 11656 11657 static int icl_add_linked_planes(struct intel_atomic_state *state) 11658 { 11659 struct intel_plane *plane, *linked; 11660 struct intel_plane_state *plane_state, *linked_plane_state; 11661 int i; 11662 11663 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 11664 linked = plane_state->linked_plane; 11665 11666 if (!linked) 11667 continue; 11668 11669 linked_plane_state = intel_atomic_get_plane_state(state, linked); 11670 if (IS_ERR(linked_plane_state)) 11671 return PTR_ERR(linked_plane_state); 11672 11673 WARN_ON(linked_plane_state->linked_plane != plane); 11674 WARN_ON(linked_plane_state->slave == plane_state->slave); 11675 } 11676 11677 return 0; 11678 } 11679 11680 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) 11681 { 11682 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 11683 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11684 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state); 11685 struct intel_plane *plane, *linked; 11686 struct intel_plane_state *plane_state; 11687 int i; 11688 11689 if (INTEL_GEN(dev_priv) < 11) 11690 return 0; 11691 11692 /* 11693 * Destroy all old plane links and make the slave plane invisible 11694 * in the crtc_state->active_planes mask. 11695 */ 11696 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 11697 if (plane->pipe != crtc->pipe || !plane_state->linked_plane) 11698 continue; 11699 11700 plane_state->linked_plane = NULL; 11701 if (plane_state->slave && !plane_state->base.visible) { 11702 crtc_state->active_planes &= ~BIT(plane->id); 11703 crtc_state->update_planes |= BIT(plane->id); 11704 } 11705 11706 plane_state->slave = false; 11707 } 11708 11709 if (!crtc_state->nv12_planes) 11710 return 0; 11711 11712 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 11713 struct intel_plane_state *linked_state = NULL; 11714 11715 if (plane->pipe != crtc->pipe || 11716 !(crtc_state->nv12_planes & BIT(plane->id))) 11717 continue; 11718 11719 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { 11720 if (!icl_is_nv12_y_plane(linked->id)) 11721 continue; 11722 11723 if (crtc_state->active_planes & BIT(linked->id)) 11724 continue; 11725 11726 linked_state = intel_atomic_get_plane_state(state, linked); 11727 if (IS_ERR(linked_state)) 11728 return PTR_ERR(linked_state); 11729 11730 break; 11731 } 11732 11733 if (!linked_state) { 11734 DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n", 11735 hweight8(crtc_state->nv12_planes)); 11736 11737 return -EINVAL; 11738 } 11739 11740 plane_state->linked_plane = linked; 11741 11742 linked_state->slave = true; 11743 linked_state->linked_plane = plane; 11744 crtc_state->active_planes |= BIT(linked->id); 11745 crtc_state->update_planes |= BIT(linked->id); 11746 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name); 11747 } 11748 11749 return 0; 11750 } 11751 11752 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) 11753 { 11754 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 11755 struct intel_atomic_state *state = 11756 to_intel_atomic_state(new_crtc_state->base.state); 11757 const struct intel_crtc_state *old_crtc_state = 11758 intel_atomic_get_old_crtc_state(state, crtc); 11759 11760 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; 11761 } 11762 11763 static int intel_crtc_atomic_check(struct drm_crtc *crtc, 11764 struct drm_crtc_state *crtc_state) 11765 { 11766 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 11767 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11768 struct intel_crtc_state *pipe_config = 11769 to_intel_crtc_state(crtc_state); 11770 int ret; 11771 bool mode_changed = needs_modeset(pipe_config); 11772 11773 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) && 11774 mode_changed && !crtc_state->active) 11775 pipe_config->update_wm_post = true; 11776 11777 if (mode_changed && crtc_state->enable && 11778 dev_priv->display.crtc_compute_clock && 11779 !WARN_ON(pipe_config->shared_dpll)) { 11780 ret = dev_priv->display.crtc_compute_clock(intel_crtc, 11781 pipe_config); 11782 if (ret) 11783 return ret; 11784 } 11785 11786 /* 11787 * May need to update pipe gamma enable bits 11788 * when C8 planes are getting enabled/disabled. 11789 */ 11790 if (c8_planes_changed(pipe_config)) 11791 crtc_state->color_mgmt_changed = true; 11792 11793 if (mode_changed || pipe_config->update_pipe || 11794 crtc_state->color_mgmt_changed) { 11795 ret = intel_color_check(pipe_config); 11796 if (ret) 11797 return ret; 11798 } 11799 11800 ret = 0; 11801 if (dev_priv->display.compute_pipe_wm) { 11802 ret = dev_priv->display.compute_pipe_wm(pipe_config); 11803 if (ret) { 11804 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n"); 11805 return ret; 11806 } 11807 } 11808 11809 if (dev_priv->display.compute_intermediate_wm) { 11810 if (WARN_ON(!dev_priv->display.compute_pipe_wm)) 11811 return 0; 11812 11813 /* 11814 * Calculate 'intermediate' watermarks that satisfy both the 11815 * old state and the new state. We can program these 11816 * immediately. 11817 */ 11818 ret = dev_priv->display.compute_intermediate_wm(pipe_config); 11819 if (ret) { 11820 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); 11821 return ret; 11822 } 11823 } 11824 11825 if (INTEL_GEN(dev_priv) >= 9) { 11826 if (mode_changed || pipe_config->update_pipe) 11827 ret = skl_update_scaler_crtc(pipe_config); 11828 11829 if (!ret) 11830 ret = icl_check_nv12_planes(pipe_config); 11831 if (!ret) 11832 ret = skl_check_pipe_max_pixel_rate(intel_crtc, 11833 pipe_config); 11834 if (!ret) 11835 ret = intel_atomic_setup_scalers(dev_priv, intel_crtc, 11836 pipe_config); 11837 } 11838 11839 if (HAS_IPS(dev_priv)) 11840 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config); 11841 11842 return ret; 11843 } 11844 11845 static const struct drm_crtc_helper_funcs intel_helper_funcs = { 11846 .atomic_check = intel_crtc_atomic_check, 11847 }; 11848 11849 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 11850 { 11851 struct intel_connector *connector; 11852 struct drm_connector_list_iter conn_iter; 11853 11854 drm_connector_list_iter_begin(dev, &conn_iter); 11855 for_each_intel_connector_iter(connector, &conn_iter) { 11856 if (connector->base.state->crtc) 11857 drm_connector_put(&connector->base); 11858 11859 if (connector->base.encoder) { 11860 connector->base.state->best_encoder = 11861 connector->base.encoder; 11862 connector->base.state->crtc = 11863 connector->base.encoder->crtc; 11864 11865 drm_connector_get(&connector->base); 11866 } else { 11867 connector->base.state->best_encoder = NULL; 11868 connector->base.state->crtc = NULL; 11869 } 11870 } 11871 drm_connector_list_iter_end(&conn_iter); 11872 } 11873 11874 static int 11875 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 11876 struct intel_crtc_state *pipe_config) 11877 { 11878 struct drm_connector *connector = conn_state->connector; 11879 const struct drm_display_info *info = &connector->display_info; 11880 int bpp; 11881 11882 switch (conn_state->max_bpc) { 11883 case 6 ... 7: 11884 bpp = 6 * 3; 11885 break; 11886 case 8 ... 9: 11887 bpp = 8 * 3; 11888 break; 11889 case 10 ... 11: 11890 bpp = 10 * 3; 11891 break; 11892 case 12: 11893 bpp = 12 * 3; 11894 break; 11895 default: 11896 return -EINVAL; 11897 } 11898 11899 if (bpp < pipe_config->pipe_bpp) { 11900 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of " 11901 "EDID bpp %d, requested bpp %d, max platform bpp %d\n", 11902 connector->base.id, connector->name, 11903 bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc, 11904 pipe_config->pipe_bpp); 11905 11906 pipe_config->pipe_bpp = bpp; 11907 } 11908 11909 return 0; 11910 } 11911 11912 static int 11913 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 11914 struct intel_crtc_state *pipe_config) 11915 { 11916 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11917 struct drm_atomic_state *state = pipe_config->base.state; 11918 struct drm_connector *connector; 11919 struct drm_connector_state *connector_state; 11920 int bpp, i; 11921 11922 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 11923 IS_CHERRYVIEW(dev_priv))) 11924 bpp = 10*3; 11925 else if (INTEL_GEN(dev_priv) >= 5) 11926 bpp = 12*3; 11927 else 11928 bpp = 8*3; 11929 11930 pipe_config->pipe_bpp = bpp; 11931 11932 /* Clamp display bpp to connector max bpp */ 11933 for_each_new_connector_in_state(state, connector, connector_state, i) { 11934 int ret; 11935 11936 if (connector_state->crtc != &crtc->base) 11937 continue; 11938 11939 ret = compute_sink_pipe_bpp(connector_state, pipe_config); 11940 if (ret) 11941 return ret; 11942 } 11943 11944 return 0; 11945 } 11946 11947 static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 11948 { 11949 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 11950 "type: 0x%x flags: 0x%x\n", 11951 mode->crtc_clock, 11952 mode->crtc_hdisplay, mode->crtc_hsync_start, 11953 mode->crtc_hsync_end, mode->crtc_htotal, 11954 mode->crtc_vdisplay, mode->crtc_vsync_start, 11955 mode->crtc_vsync_end, mode->crtc_vtotal, 11956 mode->type, mode->flags); 11957 } 11958 11959 static inline void 11960 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config, 11961 const char *id, unsigned int lane_count, 11962 const struct intel_link_m_n *m_n) 11963 { 11964 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 11965 id, lane_count, 11966 m_n->gmch_m, m_n->gmch_n, 11967 m_n->link_m, m_n->link_n, m_n->tu); 11968 } 11969 11970 static void 11971 intel_dump_infoframe(struct drm_i915_private *dev_priv, 11972 const union hdmi_infoframe *frame) 11973 { 11974 if ((drm_debug & DRM_UT_KMS) == 0) 11975 return; 11976 11977 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame); 11978 } 11979 11980 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x 11981 11982 static const char * const output_type_str[] = { 11983 OUTPUT_TYPE(UNUSED), 11984 OUTPUT_TYPE(ANALOG), 11985 OUTPUT_TYPE(DVO), 11986 OUTPUT_TYPE(SDVO), 11987 OUTPUT_TYPE(LVDS), 11988 OUTPUT_TYPE(TVOUT), 11989 OUTPUT_TYPE(HDMI), 11990 OUTPUT_TYPE(DP), 11991 OUTPUT_TYPE(EDP), 11992 OUTPUT_TYPE(DSI), 11993 OUTPUT_TYPE(DDI), 11994 OUTPUT_TYPE(DP_MST), 11995 }; 11996 11997 #undef OUTPUT_TYPE 11998 11999 static void snprintf_output_types(char *buf, size_t len, 12000 unsigned int output_types) 12001 { 12002 char *str = buf; 12003 int i; 12004 12005 str[0] = '\0'; 12006 12007 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) { 12008 int r; 12009 12010 if ((output_types & BIT(i)) == 0) 12011 continue; 12012 12013 r = snprintf(str, len, "%s%s", 12014 str != buf ? "," : "", output_type_str[i]); 12015 if (r >= len) 12016 break; 12017 str += r; 12018 len -= r; 12019 12020 output_types &= ~BIT(i); 12021 } 12022 12023 WARN_ON_ONCE(output_types != 0); 12024 } 12025 12026 static const char * const output_format_str[] = { 12027 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid", 12028 [INTEL_OUTPUT_FORMAT_RGB] = "RGB", 12029 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0", 12030 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4", 12031 }; 12032 12033 static const char *output_formats(enum intel_output_format format) 12034 { 12035 if (format >= ARRAY_SIZE(output_format_str)) 12036 format = INTEL_OUTPUT_FORMAT_INVALID; 12037 return output_format_str[format]; 12038 } 12039 12040 static void intel_dump_plane_state(const struct intel_plane_state *plane_state) 12041 { 12042 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 12043 const struct drm_framebuffer *fb = plane_state->base.fb; 12044 struct drm_format_name_buf format_name; 12045 12046 if (!fb) { 12047 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n", 12048 plane->base.base.id, plane->base.name, 12049 yesno(plane_state->base.visible)); 12050 return; 12051 } 12052 12053 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n", 12054 plane->base.base.id, plane->base.name, 12055 fb->base.id, fb->width, fb->height, 12056 drm_get_format_name(fb->format->format, &format_name), 12057 yesno(plane_state->base.visible)); 12058 DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n", 12059 plane_state->base.rotation, plane_state->scaler_id); 12060 if (plane_state->base.visible) 12061 DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n", 12062 DRM_RECT_FP_ARG(&plane_state->base.src), 12063 DRM_RECT_ARG(&plane_state->base.dst)); 12064 } 12065 12066 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, 12067 struct intel_atomic_state *state, 12068 const char *context) 12069 { 12070 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); 12071 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12072 const struct intel_plane_state *plane_state; 12073 struct intel_plane *plane; 12074 char buf[64]; 12075 int i; 12076 12077 DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n", 12078 crtc->base.base.id, crtc->base.name, 12079 yesno(pipe_config->base.enable), context); 12080 12081 if (!pipe_config->base.enable) 12082 goto dump_planes; 12083 12084 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types); 12085 DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n", 12086 yesno(pipe_config->base.active), 12087 buf, pipe_config->output_types, 12088 output_formats(pipe_config->output_format)); 12089 12090 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", 12091 transcoder_name(pipe_config->cpu_transcoder), 12092 pipe_config->pipe_bpp, pipe_config->dither); 12093 12094 if (pipe_config->has_pch_encoder) 12095 intel_dump_m_n_config(pipe_config, "fdi", 12096 pipe_config->fdi_lanes, 12097 &pipe_config->fdi_m_n); 12098 12099 if (intel_crtc_has_dp_encoder(pipe_config)) { 12100 intel_dump_m_n_config(pipe_config, "dp m_n", 12101 pipe_config->lane_count, &pipe_config->dp_m_n); 12102 if (pipe_config->has_drrs) 12103 intel_dump_m_n_config(pipe_config, "dp m2_n2", 12104 pipe_config->lane_count, 12105 &pipe_config->dp_m2_n2); 12106 } 12107 12108 DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n", 12109 pipe_config->has_audio, pipe_config->has_infoframe, 12110 pipe_config->infoframes.enable); 12111 12112 if (pipe_config->infoframes.enable & 12113 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) 12114 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp); 12115 if (pipe_config->infoframes.enable & 12116 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) 12117 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi); 12118 if (pipe_config->infoframes.enable & 12119 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD)) 12120 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd); 12121 if (pipe_config->infoframes.enable & 12122 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR)) 12123 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi); 12124 12125 DRM_DEBUG_KMS("requested mode:\n"); 12126 drm_mode_debug_printmodeline(&pipe_config->base.mode); 12127 DRM_DEBUG_KMS("adjusted mode:\n"); 12128 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode); 12129 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode); 12130 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n", 12131 pipe_config->port_clock, 12132 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 12133 pipe_config->pixel_rate); 12134 12135 if (INTEL_GEN(dev_priv) >= 9) 12136 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 12137 crtc->num_scalers, 12138 pipe_config->scaler_state.scaler_users, 12139 pipe_config->scaler_state.scaler_id); 12140 12141 if (HAS_GMCH(dev_priv)) 12142 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 12143 pipe_config->gmch_pfit.control, 12144 pipe_config->gmch_pfit.pgm_ratios, 12145 pipe_config->gmch_pfit.lvds_border_bits); 12146 else 12147 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n", 12148 pipe_config->pch_pfit.pos, 12149 pipe_config->pch_pfit.size, 12150 enableddisabled(pipe_config->pch_pfit.enabled), 12151 yesno(pipe_config->pch_pfit.force_thru)); 12152 12153 DRM_DEBUG_KMS("ips: %i, double wide: %i\n", 12154 pipe_config->ips_enabled, pipe_config->double_wide); 12155 12156 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state); 12157 12158 dump_planes: 12159 if (!state) 12160 return; 12161 12162 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12163 if (plane->pipe == crtc->pipe) 12164 intel_dump_plane_state(plane_state); 12165 } 12166 } 12167 12168 static bool check_digital_port_conflicts(struct intel_atomic_state *state) 12169 { 12170 struct drm_device *dev = state->base.dev; 12171 struct drm_connector *connector; 12172 struct drm_connector_list_iter conn_iter; 12173 unsigned int used_ports = 0; 12174 unsigned int used_mst_ports = 0; 12175 bool ret = true; 12176 12177 /* 12178 * Walk the connector list instead of the encoder 12179 * list to detect the problem on ddi platforms 12180 * where there's just one encoder per digital port. 12181 */ 12182 drm_connector_list_iter_begin(dev, &conn_iter); 12183 drm_for_each_connector_iter(connector, &conn_iter) { 12184 struct drm_connector_state *connector_state; 12185 struct intel_encoder *encoder; 12186 12187 connector_state = 12188 drm_atomic_get_new_connector_state(&state->base, 12189 connector); 12190 if (!connector_state) 12191 connector_state = connector->state; 12192 12193 if (!connector_state->best_encoder) 12194 continue; 12195 12196 encoder = to_intel_encoder(connector_state->best_encoder); 12197 12198 WARN_ON(!connector_state->crtc); 12199 12200 switch (encoder->type) { 12201 unsigned int port_mask; 12202 case INTEL_OUTPUT_DDI: 12203 if (WARN_ON(!HAS_DDI(to_i915(dev)))) 12204 break; 12205 /* else, fall through */ 12206 case INTEL_OUTPUT_DP: 12207 case INTEL_OUTPUT_HDMI: 12208 case INTEL_OUTPUT_EDP: 12209 port_mask = 1 << encoder->port; 12210 12211 /* the same port mustn't appear more than once */ 12212 if (used_ports & port_mask) 12213 ret = false; 12214 12215 used_ports |= port_mask; 12216 break; 12217 case INTEL_OUTPUT_DP_MST: 12218 used_mst_ports |= 12219 1 << encoder->port; 12220 break; 12221 default: 12222 break; 12223 } 12224 } 12225 drm_connector_list_iter_end(&conn_iter); 12226 12227 /* can't mix MST and SST/HDMI on the same port */ 12228 if (used_ports & used_mst_ports) 12229 return false; 12230 12231 return ret; 12232 } 12233 12234 static int 12235 clear_intel_crtc_state(struct intel_crtc_state *crtc_state) 12236 { 12237 struct drm_i915_private *dev_priv = 12238 to_i915(crtc_state->base.crtc->dev); 12239 struct intel_crtc_state *saved_state; 12240 12241 saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL); 12242 if (!saved_state) 12243 return -ENOMEM; 12244 12245 /* FIXME: before the switch to atomic started, a new pipe_config was 12246 * kzalloc'd. Code that depends on any field being zero should be 12247 * fixed, so that the crtc_state can be safely duplicated. For now, 12248 * only fields that are know to not cause problems are preserved. */ 12249 12250 saved_state->scaler_state = crtc_state->scaler_state; 12251 saved_state->shared_dpll = crtc_state->shared_dpll; 12252 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 12253 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 12254 sizeof(saved_state->icl_port_dplls)); 12255 saved_state->crc_enabled = crtc_state->crc_enabled; 12256 if (IS_G4X(dev_priv) || 12257 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 12258 saved_state->wm = crtc_state->wm; 12259 12260 /* Keep base drm_crtc_state intact, only clear our extended struct */ 12261 BUILD_BUG_ON(offsetof(struct intel_crtc_state, base)); 12262 memcpy(&crtc_state->base + 1, &saved_state->base + 1, 12263 sizeof(*crtc_state) - sizeof(crtc_state->base)); 12264 12265 kfree(saved_state); 12266 return 0; 12267 } 12268 12269 static int 12270 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config) 12271 { 12272 struct drm_crtc *crtc = pipe_config->base.crtc; 12273 struct drm_atomic_state *state = pipe_config->base.state; 12274 struct intel_encoder *encoder; 12275 struct drm_connector *connector; 12276 struct drm_connector_state *connector_state; 12277 int base_bpp, ret; 12278 int i; 12279 bool retry = true; 12280 12281 ret = clear_intel_crtc_state(pipe_config); 12282 if (ret) 12283 return ret; 12284 12285 pipe_config->cpu_transcoder = 12286 (enum transcoder) to_intel_crtc(crtc)->pipe; 12287 12288 /* 12289 * Sanitize sync polarity flags based on requested ones. If neither 12290 * positive or negative polarity is requested, treat this as meaning 12291 * negative polarity. 12292 */ 12293 if (!(pipe_config->base.adjusted_mode.flags & 12294 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 12295 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 12296 12297 if (!(pipe_config->base.adjusted_mode.flags & 12298 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 12299 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 12300 12301 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 12302 pipe_config); 12303 if (ret) 12304 return ret; 12305 12306 base_bpp = pipe_config->pipe_bpp; 12307 12308 /* 12309 * Determine the real pipe dimensions. Note that stereo modes can 12310 * increase the actual pipe size due to the frame doubling and 12311 * insertion of additional space for blanks between the frame. This 12312 * is stored in the crtc timings. We use the requested mode to do this 12313 * computation to clearly distinguish it from the adjusted mode, which 12314 * can be changed by the connectors in the below retry loop. 12315 */ 12316 drm_mode_get_hv_timing(&pipe_config->base.mode, 12317 &pipe_config->pipe_src_w, 12318 &pipe_config->pipe_src_h); 12319 12320 for_each_new_connector_in_state(state, connector, connector_state, i) { 12321 if (connector_state->crtc != crtc) 12322 continue; 12323 12324 encoder = to_intel_encoder(connector_state->best_encoder); 12325 12326 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { 12327 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 12328 return -EINVAL; 12329 } 12330 12331 /* 12332 * Determine output_types before calling the .compute_config() 12333 * hooks so that the hooks can use this information safely. 12334 */ 12335 if (encoder->compute_output_type) 12336 pipe_config->output_types |= 12337 BIT(encoder->compute_output_type(encoder, pipe_config, 12338 connector_state)); 12339 else 12340 pipe_config->output_types |= BIT(encoder->type); 12341 } 12342 12343 encoder_retry: 12344 /* Ensure the port clock defaults are reset when retrying. */ 12345 pipe_config->port_clock = 0; 12346 pipe_config->pixel_multiplier = 1; 12347 12348 /* Fill in default crtc timings, allow encoders to overwrite them. */ 12349 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode, 12350 CRTC_STEREO_DOUBLE); 12351 12352 /* Pass our mode to the connectors and the CRTC to give them a chance to 12353 * adjust it according to limitations or connector properties, and also 12354 * a chance to reject the mode entirely. 12355 */ 12356 for_each_new_connector_in_state(state, connector, connector_state, i) { 12357 if (connector_state->crtc != crtc) 12358 continue; 12359 12360 encoder = to_intel_encoder(connector_state->best_encoder); 12361 ret = encoder->compute_config(encoder, pipe_config, 12362 connector_state); 12363 if (ret < 0) { 12364 if (ret != -EDEADLK) 12365 DRM_DEBUG_KMS("Encoder config failure: %d\n", 12366 ret); 12367 return ret; 12368 } 12369 } 12370 12371 /* Set default port clock if not overwritten by the encoder. Needs to be 12372 * done afterwards in case the encoder adjusts the mode. */ 12373 if (!pipe_config->port_clock) 12374 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock 12375 * pipe_config->pixel_multiplier; 12376 12377 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 12378 if (ret == -EDEADLK) 12379 return ret; 12380 if (ret < 0) { 12381 DRM_DEBUG_KMS("CRTC fixup failed\n"); 12382 return ret; 12383 } 12384 12385 if (ret == RETRY) { 12386 if (WARN(!retry, "loop in pipe configuration computation\n")) 12387 return -EINVAL; 12388 12389 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 12390 retry = false; 12391 goto encoder_retry; 12392 } 12393 12394 /* Dithering seems to not pass-through bits correctly when it should, so 12395 * only enable it on 6bpc panels and when its not a compliance 12396 * test requesting 6bpc video pattern. 12397 */ 12398 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) && 12399 !pipe_config->dither_force_disable; 12400 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 12401 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 12402 12403 return 0; 12404 } 12405 12406 bool intel_fuzzy_clock_check(int clock1, int clock2) 12407 { 12408 int diff; 12409 12410 if (clock1 == clock2) 12411 return true; 12412 12413 if (!clock1 || !clock2) 12414 return false; 12415 12416 diff = abs(clock1 - clock2); 12417 12418 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 12419 return true; 12420 12421 return false; 12422 } 12423 12424 static bool 12425 intel_compare_m_n(unsigned int m, unsigned int n, 12426 unsigned int m2, unsigned int n2, 12427 bool exact) 12428 { 12429 if (m == m2 && n == n2) 12430 return true; 12431 12432 if (exact || !m || !n || !m2 || !n2) 12433 return false; 12434 12435 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 12436 12437 if (n > n2) { 12438 while (n > n2) { 12439 m2 <<= 1; 12440 n2 <<= 1; 12441 } 12442 } else if (n < n2) { 12443 while (n < n2) { 12444 m <<= 1; 12445 n <<= 1; 12446 } 12447 } 12448 12449 if (n != n2) 12450 return false; 12451 12452 return intel_fuzzy_clock_check(m, m2); 12453 } 12454 12455 static bool 12456 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 12457 const struct intel_link_m_n *m2_n2, 12458 bool exact) 12459 { 12460 return m_n->tu == m2_n2->tu && 12461 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, 12462 m2_n2->gmch_m, m2_n2->gmch_n, exact) && 12463 intel_compare_m_n(m_n->link_m, m_n->link_n, 12464 m2_n2->link_m, m2_n2->link_n, exact); 12465 } 12466 12467 static bool 12468 intel_compare_infoframe(const union hdmi_infoframe *a, 12469 const union hdmi_infoframe *b) 12470 { 12471 return memcmp(a, b, sizeof(*a)) == 0; 12472 } 12473 12474 static void 12475 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, 12476 bool fastset, const char *name, 12477 const union hdmi_infoframe *a, 12478 const union hdmi_infoframe *b) 12479 { 12480 if (fastset) { 12481 if ((drm_debug & DRM_UT_KMS) == 0) 12482 return; 12483 12484 drm_dbg(DRM_UT_KMS, "fastset mismatch in %s infoframe", name); 12485 drm_dbg(DRM_UT_KMS, "expected:"); 12486 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); 12487 drm_dbg(DRM_UT_KMS, "found"); 12488 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); 12489 } else { 12490 drm_err("mismatch in %s infoframe", name); 12491 drm_err("expected:"); 12492 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); 12493 drm_err("found"); 12494 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); 12495 } 12496 } 12497 12498 static void __printf(3, 4) 12499 pipe_config_mismatch(bool fastset, const char *name, const char *format, ...) 12500 { 12501 struct va_format vaf; 12502 va_list args; 12503 12504 va_start(args, format); 12505 vaf.fmt = format; 12506 vaf.va = &args; 12507 12508 if (fastset) 12509 drm_dbg(DRM_UT_KMS, "fastset mismatch in %s %pV", name, &vaf); 12510 else 12511 drm_err("mismatch in %s %pV", name, &vaf); 12512 12513 va_end(args); 12514 } 12515 12516 static bool fastboot_enabled(struct drm_i915_private *dev_priv) 12517 { 12518 if (i915_modparams.fastboot != -1) 12519 return i915_modparams.fastboot; 12520 12521 /* Enable fastboot by default on Skylake and newer */ 12522 if (INTEL_GEN(dev_priv) >= 9) 12523 return true; 12524 12525 /* Enable fastboot by default on VLV and CHV */ 12526 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 12527 return true; 12528 12529 /* Disabled by default on all others */ 12530 return false; 12531 } 12532 12533 static bool 12534 intel_pipe_config_compare(const struct intel_crtc_state *current_config, 12535 const struct intel_crtc_state *pipe_config, 12536 bool fastset) 12537 { 12538 struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev); 12539 bool ret = true; 12540 bool fixup_inherited = fastset && 12541 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) && 12542 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED); 12543 12544 if (fixup_inherited && !fastboot_enabled(dev_priv)) { 12545 DRM_DEBUG_KMS("initial modeset and fastboot not set\n"); 12546 ret = false; 12547 } 12548 12549 #define PIPE_CONF_CHECK_X(name) do { \ 12550 if (current_config->name != pipe_config->name) { \ 12551 pipe_config_mismatch(fastset, __stringify(name), \ 12552 "(expected 0x%08x, found 0x%08x)\n", \ 12553 current_config->name, \ 12554 pipe_config->name); \ 12555 ret = false; \ 12556 } \ 12557 } while (0) 12558 12559 #define PIPE_CONF_CHECK_I(name) do { \ 12560 if (current_config->name != pipe_config->name) { \ 12561 pipe_config_mismatch(fastset, __stringify(name), \ 12562 "(expected %i, found %i)\n", \ 12563 current_config->name, \ 12564 pipe_config->name); \ 12565 ret = false; \ 12566 } \ 12567 } while (0) 12568 12569 #define PIPE_CONF_CHECK_BOOL(name) do { \ 12570 if (current_config->name != pipe_config->name) { \ 12571 pipe_config_mismatch(fastset, __stringify(name), \ 12572 "(expected %s, found %s)\n", \ 12573 yesno(current_config->name), \ 12574 yesno(pipe_config->name)); \ 12575 ret = false; \ 12576 } \ 12577 } while (0) 12578 12579 /* 12580 * Checks state where we only read out the enabling, but not the entire 12581 * state itself (like full infoframes or ELD for audio). These states 12582 * require a full modeset on bootup to fix up. 12583 */ 12584 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \ 12585 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \ 12586 PIPE_CONF_CHECK_BOOL(name); \ 12587 } else { \ 12588 pipe_config_mismatch(fastset, __stringify(name), \ 12589 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \ 12590 yesno(current_config->name), \ 12591 yesno(pipe_config->name)); \ 12592 ret = false; \ 12593 } \ 12594 } while (0) 12595 12596 #define PIPE_CONF_CHECK_P(name) do { \ 12597 if (current_config->name != pipe_config->name) { \ 12598 pipe_config_mismatch(fastset, __stringify(name), \ 12599 "(expected %p, found %p)\n", \ 12600 current_config->name, \ 12601 pipe_config->name); \ 12602 ret = false; \ 12603 } \ 12604 } while (0) 12605 12606 #define PIPE_CONF_CHECK_M_N(name) do { \ 12607 if (!intel_compare_link_m_n(¤t_config->name, \ 12608 &pipe_config->name,\ 12609 !fastset)) { \ 12610 pipe_config_mismatch(fastset, __stringify(name), \ 12611 "(expected tu %i gmch %i/%i link %i/%i, " \ 12612 "found tu %i, gmch %i/%i link %i/%i)\n", \ 12613 current_config->name.tu, \ 12614 current_config->name.gmch_m, \ 12615 current_config->name.gmch_n, \ 12616 current_config->name.link_m, \ 12617 current_config->name.link_n, \ 12618 pipe_config->name.tu, \ 12619 pipe_config->name.gmch_m, \ 12620 pipe_config->name.gmch_n, \ 12621 pipe_config->name.link_m, \ 12622 pipe_config->name.link_n); \ 12623 ret = false; \ 12624 } \ 12625 } while (0) 12626 12627 /* This is required for BDW+ where there is only one set of registers for 12628 * switching between high and low RR. 12629 * This macro can be used whenever a comparison has to be made between one 12630 * hw state and multiple sw state variables. 12631 */ 12632 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \ 12633 if (!intel_compare_link_m_n(¤t_config->name, \ 12634 &pipe_config->name, !fastset) && \ 12635 !intel_compare_link_m_n(¤t_config->alt_name, \ 12636 &pipe_config->name, !fastset)) { \ 12637 pipe_config_mismatch(fastset, __stringify(name), \ 12638 "(expected tu %i gmch %i/%i link %i/%i, " \ 12639 "or tu %i gmch %i/%i link %i/%i, " \ 12640 "found tu %i, gmch %i/%i link %i/%i)\n", \ 12641 current_config->name.tu, \ 12642 current_config->name.gmch_m, \ 12643 current_config->name.gmch_n, \ 12644 current_config->name.link_m, \ 12645 current_config->name.link_n, \ 12646 current_config->alt_name.tu, \ 12647 current_config->alt_name.gmch_m, \ 12648 current_config->alt_name.gmch_n, \ 12649 current_config->alt_name.link_m, \ 12650 current_config->alt_name.link_n, \ 12651 pipe_config->name.tu, \ 12652 pipe_config->name.gmch_m, \ 12653 pipe_config->name.gmch_n, \ 12654 pipe_config->name.link_m, \ 12655 pipe_config->name.link_n); \ 12656 ret = false; \ 12657 } \ 12658 } while (0) 12659 12660 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 12661 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 12662 pipe_config_mismatch(fastset, __stringify(name), \ 12663 "(%x) (expected %i, found %i)\n", \ 12664 (mask), \ 12665 current_config->name & (mask), \ 12666 pipe_config->name & (mask)); \ 12667 ret = false; \ 12668 } \ 12669 } while (0) 12670 12671 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \ 12672 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 12673 pipe_config_mismatch(fastset, __stringify(name), \ 12674 "(expected %i, found %i)\n", \ 12675 current_config->name, \ 12676 pipe_config->name); \ 12677 ret = false; \ 12678 } \ 12679 } while (0) 12680 12681 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 12682 if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 12683 &pipe_config->infoframes.name)) { \ 12684 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \ 12685 ¤t_config->infoframes.name, \ 12686 &pipe_config->infoframes.name); \ 12687 ret = false; \ 12688 } \ 12689 } while (0) 12690 12691 #define PIPE_CONF_QUIRK(quirk) \ 12692 ((current_config->quirks | pipe_config->quirks) & (quirk)) 12693 12694 PIPE_CONF_CHECK_I(cpu_transcoder); 12695 12696 PIPE_CONF_CHECK_BOOL(has_pch_encoder); 12697 PIPE_CONF_CHECK_I(fdi_lanes); 12698 PIPE_CONF_CHECK_M_N(fdi_m_n); 12699 12700 PIPE_CONF_CHECK_I(lane_count); 12701 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 12702 12703 if (INTEL_GEN(dev_priv) < 8) { 12704 PIPE_CONF_CHECK_M_N(dp_m_n); 12705 12706 if (current_config->has_drrs) 12707 PIPE_CONF_CHECK_M_N(dp_m2_n2); 12708 } else 12709 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 12710 12711 PIPE_CONF_CHECK_X(output_types); 12712 12713 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); 12714 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); 12715 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start); 12716 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end); 12717 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start); 12718 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end); 12719 12720 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay); 12721 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal); 12722 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start); 12723 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end); 12724 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start); 12725 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); 12726 12727 PIPE_CONF_CHECK_I(pixel_multiplier); 12728 PIPE_CONF_CHECK_I(output_format); 12729 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 12730 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 12731 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 12732 PIPE_CONF_CHECK_BOOL(limited_color_range); 12733 12734 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 12735 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 12736 PIPE_CONF_CHECK_BOOL(has_infoframe); 12737 12738 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); 12739 12740 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12741 DRM_MODE_FLAG_INTERLACE); 12742 12743 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 12744 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12745 DRM_MODE_FLAG_PHSYNC); 12746 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12747 DRM_MODE_FLAG_NHSYNC); 12748 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12749 DRM_MODE_FLAG_PVSYNC); 12750 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12751 DRM_MODE_FLAG_NVSYNC); 12752 } 12753 12754 PIPE_CONF_CHECK_X(gmch_pfit.control); 12755 /* pfit ratios are autocomputed by the hw on gen4+ */ 12756 if (INTEL_GEN(dev_priv) < 4) 12757 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 12758 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 12759 12760 /* 12761 * Changing the EDP transcoder input mux 12762 * (A_ONOFF vs. A_ON) requires a full modeset. 12763 */ 12764 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 12765 12766 if (!fastset) { 12767 PIPE_CONF_CHECK_I(pipe_src_w); 12768 PIPE_CONF_CHECK_I(pipe_src_h); 12769 12770 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 12771 if (current_config->pch_pfit.enabled) { 12772 PIPE_CONF_CHECK_X(pch_pfit.pos); 12773 PIPE_CONF_CHECK_X(pch_pfit.size); 12774 } 12775 12776 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 12777 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); 12778 12779 PIPE_CONF_CHECK_X(gamma_mode); 12780 if (IS_CHERRYVIEW(dev_priv)) 12781 PIPE_CONF_CHECK_X(cgm_mode); 12782 else 12783 PIPE_CONF_CHECK_X(csc_mode); 12784 PIPE_CONF_CHECK_BOOL(gamma_enable); 12785 PIPE_CONF_CHECK_BOOL(csc_enable); 12786 } 12787 12788 PIPE_CONF_CHECK_BOOL(double_wide); 12789 12790 PIPE_CONF_CHECK_P(shared_dpll); 12791 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 12792 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 12793 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 12794 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 12795 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 12796 PIPE_CONF_CHECK_X(dpll_hw_state.spll); 12797 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 12798 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 12799 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 12800 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0); 12801 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0); 12802 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4); 12803 PIPE_CONF_CHECK_X(dpll_hw_state.pll0); 12804 PIPE_CONF_CHECK_X(dpll_hw_state.pll1); 12805 PIPE_CONF_CHECK_X(dpll_hw_state.pll2); 12806 PIPE_CONF_CHECK_X(dpll_hw_state.pll3); 12807 PIPE_CONF_CHECK_X(dpll_hw_state.pll6); 12808 PIPE_CONF_CHECK_X(dpll_hw_state.pll8); 12809 PIPE_CONF_CHECK_X(dpll_hw_state.pll9); 12810 PIPE_CONF_CHECK_X(dpll_hw_state.pll10); 12811 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12); 12812 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl); 12813 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1); 12814 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl); 12815 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0); 12816 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1); 12817 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf); 12818 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock); 12819 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc); 12820 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias); 12821 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); 12822 12823 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 12824 PIPE_CONF_CHECK_X(dsi_pll.div); 12825 12826 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) 12827 PIPE_CONF_CHECK_I(pipe_bpp); 12828 12829 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock); 12830 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 12831 12832 PIPE_CONF_CHECK_I(min_voltage_level); 12833 12834 PIPE_CONF_CHECK_X(infoframes.enable); 12835 PIPE_CONF_CHECK_X(infoframes.gcp); 12836 PIPE_CONF_CHECK_INFOFRAME(avi); 12837 PIPE_CONF_CHECK_INFOFRAME(spd); 12838 PIPE_CONF_CHECK_INFOFRAME(hdmi); 12839 PIPE_CONF_CHECK_INFOFRAME(drm); 12840 12841 #undef PIPE_CONF_CHECK_X 12842 #undef PIPE_CONF_CHECK_I 12843 #undef PIPE_CONF_CHECK_BOOL 12844 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE 12845 #undef PIPE_CONF_CHECK_P 12846 #undef PIPE_CONF_CHECK_FLAGS 12847 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 12848 #undef PIPE_CONF_QUIRK 12849 12850 return ret; 12851 } 12852 12853 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, 12854 const struct intel_crtc_state *pipe_config) 12855 { 12856 if (pipe_config->has_pch_encoder) { 12857 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 12858 &pipe_config->fdi_m_n); 12859 int dotclock = pipe_config->base.adjusted_mode.crtc_clock; 12860 12861 /* 12862 * FDI already provided one idea for the dotclock. 12863 * Yell if the encoder disagrees. 12864 */ 12865 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock), 12866 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 12867 fdi_dotclock, dotclock); 12868 } 12869 } 12870 12871 static void verify_wm_state(struct intel_crtc *crtc, 12872 struct intel_crtc_state *new_crtc_state) 12873 { 12874 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12875 struct skl_hw_state { 12876 struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; 12877 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES]; 12878 struct skl_ddb_allocation ddb; 12879 struct skl_pipe_wm wm; 12880 } *hw; 12881 struct skl_ddb_allocation *sw_ddb; 12882 struct skl_pipe_wm *sw_wm; 12883 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; 12884 const enum pipe pipe = crtc->pipe; 12885 int plane, level, max_level = ilk_wm_max_level(dev_priv); 12886 12887 if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->base.active) 12888 return; 12889 12890 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 12891 if (!hw) 12892 return; 12893 12894 skl_pipe_wm_get_hw_state(crtc, &hw->wm); 12895 sw_wm = &new_crtc_state->wm.skl.optimal; 12896 12897 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv); 12898 12899 skl_ddb_get_hw_state(dev_priv, &hw->ddb); 12900 sw_ddb = &dev_priv->wm.skl_hw.ddb; 12901 12902 if (INTEL_GEN(dev_priv) >= 11 && 12903 hw->ddb.enabled_slices != sw_ddb->enabled_slices) 12904 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n", 12905 sw_ddb->enabled_slices, 12906 hw->ddb.enabled_slices); 12907 12908 /* planes */ 12909 for_each_universal_plane(dev_priv, pipe, plane) { 12910 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 12911 12912 hw_plane_wm = &hw->wm.planes[plane]; 12913 sw_plane_wm = &sw_wm->planes[plane]; 12914 12915 /* Watermarks */ 12916 for (level = 0; level <= max_level; level++) { 12917 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 12918 &sw_plane_wm->wm[level])) 12919 continue; 12920 12921 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 12922 pipe_name(pipe), plane + 1, level, 12923 sw_plane_wm->wm[level].plane_en, 12924 sw_plane_wm->wm[level].plane_res_b, 12925 sw_plane_wm->wm[level].plane_res_l, 12926 hw_plane_wm->wm[level].plane_en, 12927 hw_plane_wm->wm[level].plane_res_b, 12928 hw_plane_wm->wm[level].plane_res_l); 12929 } 12930 12931 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 12932 &sw_plane_wm->trans_wm)) { 12933 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 12934 pipe_name(pipe), plane + 1, 12935 sw_plane_wm->trans_wm.plane_en, 12936 sw_plane_wm->trans_wm.plane_res_b, 12937 sw_plane_wm->trans_wm.plane_res_l, 12938 hw_plane_wm->trans_wm.plane_en, 12939 hw_plane_wm->trans_wm.plane_res_b, 12940 hw_plane_wm->trans_wm.plane_res_l); 12941 } 12942 12943 /* DDB */ 12944 hw_ddb_entry = &hw->ddb_y[plane]; 12945 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane]; 12946 12947 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 12948 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", 12949 pipe_name(pipe), plane + 1, 12950 sw_ddb_entry->start, sw_ddb_entry->end, 12951 hw_ddb_entry->start, hw_ddb_entry->end); 12952 } 12953 } 12954 12955 /* 12956 * cursor 12957 * If the cursor plane isn't active, we may not have updated it's ddb 12958 * allocation. In that case since the ddb allocation will be updated 12959 * once the plane becomes visible, we can skip this check 12960 */ 12961 if (1) { 12962 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 12963 12964 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR]; 12965 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR]; 12966 12967 /* Watermarks */ 12968 for (level = 0; level <= max_level; level++) { 12969 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 12970 &sw_plane_wm->wm[level])) 12971 continue; 12972 12973 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 12974 pipe_name(pipe), level, 12975 sw_plane_wm->wm[level].plane_en, 12976 sw_plane_wm->wm[level].plane_res_b, 12977 sw_plane_wm->wm[level].plane_res_l, 12978 hw_plane_wm->wm[level].plane_en, 12979 hw_plane_wm->wm[level].plane_res_b, 12980 hw_plane_wm->wm[level].plane_res_l); 12981 } 12982 12983 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 12984 &sw_plane_wm->trans_wm)) { 12985 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 12986 pipe_name(pipe), 12987 sw_plane_wm->trans_wm.plane_en, 12988 sw_plane_wm->trans_wm.plane_res_b, 12989 sw_plane_wm->trans_wm.plane_res_l, 12990 hw_plane_wm->trans_wm.plane_en, 12991 hw_plane_wm->trans_wm.plane_res_b, 12992 hw_plane_wm->trans_wm.plane_res_l); 12993 } 12994 12995 /* DDB */ 12996 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR]; 12997 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; 12998 12999 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 13000 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", 13001 pipe_name(pipe), 13002 sw_ddb_entry->start, sw_ddb_entry->end, 13003 hw_ddb_entry->start, hw_ddb_entry->end); 13004 } 13005 } 13006 13007 kfree(hw); 13008 } 13009 13010 static void 13011 verify_connector_state(struct intel_atomic_state *state, 13012 struct intel_crtc *crtc) 13013 { 13014 struct drm_connector *connector; 13015 struct drm_connector_state *new_conn_state; 13016 int i; 13017 13018 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { 13019 struct drm_encoder *encoder = connector->encoder; 13020 struct intel_crtc_state *crtc_state = NULL; 13021 13022 if (new_conn_state->crtc != &crtc->base) 13023 continue; 13024 13025 if (crtc) 13026 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 13027 13028 intel_connector_verify_state(crtc_state, new_conn_state); 13029 13030 I915_STATE_WARN(new_conn_state->best_encoder != encoder, 13031 "connector's atomic encoder doesn't match legacy encoder\n"); 13032 } 13033 } 13034 13035 static void 13036 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state) 13037 { 13038 struct intel_encoder *encoder; 13039 struct drm_connector *connector; 13040 struct drm_connector_state *old_conn_state, *new_conn_state; 13041 int i; 13042 13043 for_each_intel_encoder(&dev_priv->drm, encoder) { 13044 bool enabled = false, found = false; 13045 enum pipe pipe; 13046 13047 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 13048 encoder->base.base.id, 13049 encoder->base.name); 13050 13051 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state, 13052 new_conn_state, i) { 13053 if (old_conn_state->best_encoder == &encoder->base) 13054 found = true; 13055 13056 if (new_conn_state->best_encoder != &encoder->base) 13057 continue; 13058 found = enabled = true; 13059 13060 I915_STATE_WARN(new_conn_state->crtc != 13061 encoder->base.crtc, 13062 "connector's crtc doesn't match encoder crtc\n"); 13063 } 13064 13065 if (!found) 13066 continue; 13067 13068 I915_STATE_WARN(!!encoder->base.crtc != enabled, 13069 "encoder's enabled state mismatch " 13070 "(expected %i, found %i)\n", 13071 !!encoder->base.crtc, enabled); 13072 13073 if (!encoder->base.crtc) { 13074 bool active; 13075 13076 active = encoder->get_hw_state(encoder, &pipe); 13077 I915_STATE_WARN(active, 13078 "encoder detached but still enabled on pipe %c.\n", 13079 pipe_name(pipe)); 13080 } 13081 } 13082 } 13083 13084 static void 13085 verify_crtc_state(struct intel_crtc *crtc, 13086 struct intel_crtc_state *old_crtc_state, 13087 struct intel_crtc_state *new_crtc_state) 13088 { 13089 struct drm_device *dev = crtc->base.dev; 13090 struct drm_i915_private *dev_priv = to_i915(dev); 13091 struct intel_encoder *encoder; 13092 struct intel_crtc_state *pipe_config; 13093 struct drm_atomic_state *state; 13094 bool active; 13095 13096 state = old_crtc_state->base.state; 13097 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->base); 13098 pipe_config = old_crtc_state; 13099 memset(pipe_config, 0, sizeof(*pipe_config)); 13100 pipe_config->base.crtc = &crtc->base; 13101 pipe_config->base.state = state; 13102 13103 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name); 13104 13105 active = dev_priv->display.get_pipe_config(crtc, pipe_config); 13106 13107 /* we keep both pipes enabled on 830 */ 13108 if (IS_I830(dev_priv)) 13109 active = new_crtc_state->base.active; 13110 13111 I915_STATE_WARN(new_crtc_state->base.active != active, 13112 "crtc active state doesn't match with hw state " 13113 "(expected %i, found %i)\n", new_crtc_state->base.active, active); 13114 13115 I915_STATE_WARN(crtc->active != new_crtc_state->base.active, 13116 "transitional active state does not match atomic hw state " 13117 "(expected %i, found %i)\n", new_crtc_state->base.active, crtc->active); 13118 13119 for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 13120 enum pipe pipe; 13121 13122 active = encoder->get_hw_state(encoder, &pipe); 13123 I915_STATE_WARN(active != new_crtc_state->base.active, 13124 "[ENCODER:%i] active %i with crtc active %i\n", 13125 encoder->base.base.id, active, new_crtc_state->base.active); 13126 13127 I915_STATE_WARN(active && crtc->pipe != pipe, 13128 "Encoder connected to wrong pipe %c\n", 13129 pipe_name(pipe)); 13130 13131 if (active) 13132 encoder->get_config(encoder, pipe_config); 13133 } 13134 13135 intel_crtc_compute_pixel_rate(pipe_config); 13136 13137 if (!new_crtc_state->base.active) 13138 return; 13139 13140 intel_pipe_config_sanity_check(dev_priv, pipe_config); 13141 13142 if (!intel_pipe_config_compare(new_crtc_state, 13143 pipe_config, false)) { 13144 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 13145 intel_dump_pipe_config(pipe_config, NULL, "[hw state]"); 13146 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]"); 13147 } 13148 } 13149 13150 static void 13151 intel_verify_planes(struct intel_atomic_state *state) 13152 { 13153 struct intel_plane *plane; 13154 const struct intel_plane_state *plane_state; 13155 int i; 13156 13157 for_each_new_intel_plane_in_state(state, plane, 13158 plane_state, i) 13159 assert_plane(plane, plane_state->slave || 13160 plane_state->base.visible); 13161 } 13162 13163 static void 13164 verify_single_dpll_state(struct drm_i915_private *dev_priv, 13165 struct intel_shared_dpll *pll, 13166 struct intel_crtc *crtc, 13167 struct intel_crtc_state *new_crtc_state) 13168 { 13169 struct intel_dpll_hw_state dpll_hw_state; 13170 unsigned int crtc_mask; 13171 bool active; 13172 13173 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 13174 13175 DRM_DEBUG_KMS("%s\n", pll->info->name); 13176 13177 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state); 13178 13179 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) { 13180 I915_STATE_WARN(!pll->on && pll->active_mask, 13181 "pll in active use but not on in sw tracking\n"); 13182 I915_STATE_WARN(pll->on && !pll->active_mask, 13183 "pll is on but not used by any active crtc\n"); 13184 I915_STATE_WARN(pll->on != active, 13185 "pll on state mismatch (expected %i, found %i)\n", 13186 pll->on, active); 13187 } 13188 13189 if (!crtc) { 13190 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask, 13191 "more active pll users than references: %x vs %x\n", 13192 pll->active_mask, pll->state.crtc_mask); 13193 13194 return; 13195 } 13196 13197 crtc_mask = drm_crtc_mask(&crtc->base); 13198 13199 if (new_crtc_state->base.active) 13200 I915_STATE_WARN(!(pll->active_mask & crtc_mask), 13201 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", 13202 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); 13203 else 13204 I915_STATE_WARN(pll->active_mask & crtc_mask, 13205 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", 13206 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); 13207 13208 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), 13209 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", 13210 crtc_mask, pll->state.crtc_mask); 13211 13212 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state, 13213 &dpll_hw_state, 13214 sizeof(dpll_hw_state)), 13215 "pll hw state mismatch\n"); 13216 } 13217 13218 static void 13219 verify_shared_dpll_state(struct intel_crtc *crtc, 13220 struct intel_crtc_state *old_crtc_state, 13221 struct intel_crtc_state *new_crtc_state) 13222 { 13223 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13224 13225 if (new_crtc_state->shared_dpll) 13226 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state); 13227 13228 if (old_crtc_state->shared_dpll && 13229 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) { 13230 unsigned int crtc_mask = drm_crtc_mask(&crtc->base); 13231 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll; 13232 13233 I915_STATE_WARN(pll->active_mask & crtc_mask, 13234 "pll active mismatch (didn't expect pipe %c in active mask)\n", 13235 pipe_name(drm_crtc_index(&crtc->base))); 13236 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, 13237 "pll enabled crtcs mismatch (found %x in enabled mask)\n", 13238 pipe_name(drm_crtc_index(&crtc->base))); 13239 } 13240 } 13241 13242 static void 13243 intel_modeset_verify_crtc(struct intel_crtc *crtc, 13244 struct intel_atomic_state *state, 13245 struct intel_crtc_state *old_crtc_state, 13246 struct intel_crtc_state *new_crtc_state) 13247 { 13248 if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) 13249 return; 13250 13251 verify_wm_state(crtc, new_crtc_state); 13252 verify_connector_state(state, crtc); 13253 verify_crtc_state(crtc, old_crtc_state, new_crtc_state); 13254 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state); 13255 } 13256 13257 static void 13258 verify_disabled_dpll_state(struct drm_i915_private *dev_priv) 13259 { 13260 int i; 13261 13262 for (i = 0; i < dev_priv->num_shared_dpll; i++) 13263 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL); 13264 } 13265 13266 static void 13267 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv, 13268 struct intel_atomic_state *state) 13269 { 13270 verify_encoder_state(dev_priv, state); 13271 verify_connector_state(state, NULL); 13272 verify_disabled_dpll_state(dev_priv); 13273 } 13274 13275 static void update_scanline_offset(const struct intel_crtc_state *crtc_state) 13276 { 13277 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 13278 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13279 13280 /* 13281 * The scanline counter increments at the leading edge of hsync. 13282 * 13283 * On most platforms it starts counting from vtotal-1 on the 13284 * first active line. That means the scanline counter value is 13285 * always one less than what we would expect. Ie. just after 13286 * start of vblank, which also occurs at start of hsync (on the 13287 * last active line), the scanline counter will read vblank_start-1. 13288 * 13289 * On gen2 the scanline counter starts counting from 1 instead 13290 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 13291 * to keep the value positive), instead of adding one. 13292 * 13293 * On HSW+ the behaviour of the scanline counter depends on the output 13294 * type. For DP ports it behaves like most other platforms, but on HDMI 13295 * there's an extra 1 line difference. So we need to add two instead of 13296 * one to the value. 13297 * 13298 * On VLV/CHV DSI the scanline counter would appear to increment 13299 * approx. 1/3 of a scanline before start of vblank. Unfortunately 13300 * that means we can't tell whether we're in vblank or not while 13301 * we're on that particular line. We must still set scanline_offset 13302 * to 1 so that the vblank timestamps come out correct when we query 13303 * the scanline counter from within the vblank interrupt handler. 13304 * However if queried just before the start of vblank we'll get an 13305 * answer that's slightly in the future. 13306 */ 13307 if (IS_GEN(dev_priv, 2)) { 13308 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; 13309 int vtotal; 13310 13311 vtotal = adjusted_mode->crtc_vtotal; 13312 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 13313 vtotal /= 2; 13314 13315 crtc->scanline_offset = vtotal - 1; 13316 } else if (HAS_DDI(dev_priv) && 13317 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 13318 crtc->scanline_offset = 2; 13319 } else 13320 crtc->scanline_offset = 1; 13321 } 13322 13323 static void intel_modeset_clear_plls(struct intel_atomic_state *state) 13324 { 13325 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 13326 struct intel_crtc_state *new_crtc_state; 13327 struct intel_crtc *crtc; 13328 int i; 13329 13330 if (!dev_priv->display.crtc_compute_clock) 13331 return; 13332 13333 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 13334 if (!needs_modeset(new_crtc_state)) 13335 continue; 13336 13337 intel_release_shared_dplls(state, crtc); 13338 } 13339 } 13340 13341 /* 13342 * This implements the workaround described in the "notes" section of the mode 13343 * set sequence documentation. When going from no pipes or single pipe to 13344 * multiple pipes, and planes are enabled after the pipe, we need to wait at 13345 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 13346 */ 13347 static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state) 13348 { 13349 struct intel_crtc_state *crtc_state; 13350 struct intel_crtc *crtc; 13351 struct intel_crtc_state *first_crtc_state = NULL; 13352 struct intel_crtc_state *other_crtc_state = NULL; 13353 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 13354 int i; 13355 13356 /* look at all crtc's that are going to be enabled in during modeset */ 13357 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 13358 if (!crtc_state->base.active || 13359 !needs_modeset(crtc_state)) 13360 continue; 13361 13362 if (first_crtc_state) { 13363 other_crtc_state = crtc_state; 13364 break; 13365 } else { 13366 first_crtc_state = crtc_state; 13367 first_pipe = crtc->pipe; 13368 } 13369 } 13370 13371 /* No workaround needed? */ 13372 if (!first_crtc_state) 13373 return 0; 13374 13375 /* w/a possibly needed, check how many crtc's are already enabled. */ 13376 for_each_intel_crtc(state->base.dev, crtc) { 13377 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 13378 if (IS_ERR(crtc_state)) 13379 return PTR_ERR(crtc_state); 13380 13381 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 13382 13383 if (!crtc_state->base.active || 13384 needs_modeset(crtc_state)) 13385 continue; 13386 13387 /* 2 or more enabled crtcs means no need for w/a */ 13388 if (enabled_pipe != INVALID_PIPE) 13389 return 0; 13390 13391 enabled_pipe = crtc->pipe; 13392 } 13393 13394 if (enabled_pipe != INVALID_PIPE) 13395 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 13396 else if (other_crtc_state) 13397 other_crtc_state->hsw_workaround_pipe = first_pipe; 13398 13399 return 0; 13400 } 13401 13402 static int intel_lock_all_pipes(struct intel_atomic_state *state) 13403 { 13404 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 13405 struct intel_crtc *crtc; 13406 13407 /* Add all pipes to the state */ 13408 for_each_intel_crtc(&dev_priv->drm, crtc) { 13409 struct intel_crtc_state *crtc_state; 13410 13411 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 13412 if (IS_ERR(crtc_state)) 13413 return PTR_ERR(crtc_state); 13414 } 13415 13416 return 0; 13417 } 13418 13419 static int intel_modeset_all_pipes(struct intel_atomic_state *state) 13420 { 13421 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 13422 struct intel_crtc *crtc; 13423 13424 /* 13425 * Add all pipes to the state, and force 13426 * a modeset on all the active ones. 13427 */ 13428 for_each_intel_crtc(&dev_priv->drm, crtc) { 13429 struct intel_crtc_state *crtc_state; 13430 int ret; 13431 13432 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 13433 if (IS_ERR(crtc_state)) 13434 return PTR_ERR(crtc_state); 13435 13436 if (!crtc_state->base.active || needs_modeset(crtc_state)) 13437 continue; 13438 13439 crtc_state->base.mode_changed = true; 13440 13441 ret = drm_atomic_add_affected_connectors(&state->base, 13442 &crtc->base); 13443 if (ret) 13444 return ret; 13445 13446 ret = drm_atomic_add_affected_planes(&state->base, 13447 &crtc->base); 13448 if (ret) 13449 return ret; 13450 } 13451 13452 return 0; 13453 } 13454 13455 static int intel_modeset_checks(struct intel_atomic_state *state) 13456 { 13457 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 13458 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 13459 struct intel_crtc *crtc; 13460 int ret = 0, i; 13461 13462 if (!check_digital_port_conflicts(state)) { 13463 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); 13464 return -EINVAL; 13465 } 13466 13467 /* keep the current setting */ 13468 if (!state->cdclk.force_min_cdclk_changed) 13469 state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk; 13470 13471 state->modeset = true; 13472 state->active_crtcs = dev_priv->active_crtcs; 13473 state->cdclk.logical = dev_priv->cdclk.logical; 13474 state->cdclk.actual = dev_priv->cdclk.actual; 13475 state->cdclk.pipe = INVALID_PIPE; 13476 13477 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13478 new_crtc_state, i) { 13479 if (new_crtc_state->base.active) 13480 state->active_crtcs |= 1 << i; 13481 else 13482 state->active_crtcs &= ~(1 << i); 13483 13484 if (old_crtc_state->base.active != new_crtc_state->base.active) 13485 state->active_pipe_changes |= drm_crtc_mask(&crtc->base); 13486 } 13487 13488 /* 13489 * See if the config requires any additional preparation, e.g. 13490 * to adjust global state with pipes off. We need to do this 13491 * here so we can get the modeset_pipe updated config for the new 13492 * mode set on this crtc. For other crtcs we need to use the 13493 * adjusted_mode bits in the crtc directly. 13494 */ 13495 if (dev_priv->display.modeset_calc_cdclk) { 13496 enum pipe pipe; 13497 13498 ret = dev_priv->display.modeset_calc_cdclk(state); 13499 if (ret < 0) 13500 return ret; 13501 13502 /* 13503 * Writes to dev_priv->cdclk.logical must protected by 13504 * holding all the crtc locks, even if we don't end up 13505 * touching the hardware 13506 */ 13507 if (intel_cdclk_changed(&dev_priv->cdclk.logical, 13508 &state->cdclk.logical)) { 13509 ret = intel_lock_all_pipes(state); 13510 if (ret < 0) 13511 return ret; 13512 } 13513 13514 if (is_power_of_2(state->active_crtcs)) { 13515 struct intel_crtc *crtc; 13516 struct intel_crtc_state *crtc_state; 13517 13518 pipe = ilog2(state->active_crtcs); 13519 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 13520 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 13521 if (crtc_state && needs_modeset(crtc_state)) 13522 pipe = INVALID_PIPE; 13523 } else { 13524 pipe = INVALID_PIPE; 13525 } 13526 13527 /* All pipes must be switched off while we change the cdclk. */ 13528 if (pipe != INVALID_PIPE && 13529 intel_cdclk_needs_cd2x_update(dev_priv, 13530 &dev_priv->cdclk.actual, 13531 &state->cdclk.actual)) { 13532 ret = intel_lock_all_pipes(state); 13533 if (ret < 0) 13534 return ret; 13535 13536 state->cdclk.pipe = pipe; 13537 } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual, 13538 &state->cdclk.actual)) { 13539 ret = intel_modeset_all_pipes(state); 13540 if (ret < 0) 13541 return ret; 13542 13543 state->cdclk.pipe = INVALID_PIPE; 13544 } 13545 13546 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n", 13547 state->cdclk.logical.cdclk, 13548 state->cdclk.actual.cdclk); 13549 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n", 13550 state->cdclk.logical.voltage_level, 13551 state->cdclk.actual.voltage_level); 13552 } 13553 13554 intel_modeset_clear_plls(state); 13555 13556 if (IS_HASWELL(dev_priv)) 13557 return haswell_mode_set_planes_workaround(state); 13558 13559 return 0; 13560 } 13561 13562 /* 13563 * Handle calculation of various watermark data at the end of the atomic check 13564 * phase. The code here should be run after the per-crtc and per-plane 'check' 13565 * handlers to ensure that all derived state has been updated. 13566 */ 13567 static int calc_watermark_data(struct intel_atomic_state *state) 13568 { 13569 struct drm_device *dev = state->base.dev; 13570 struct drm_i915_private *dev_priv = to_i915(dev); 13571 13572 /* Is there platform-specific watermark information to calculate? */ 13573 if (dev_priv->display.compute_global_watermarks) 13574 return dev_priv->display.compute_global_watermarks(state); 13575 13576 return 0; 13577 } 13578 13579 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 13580 struct intel_crtc_state *new_crtc_state) 13581 { 13582 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) 13583 return; 13584 13585 new_crtc_state->base.mode_changed = false; 13586 new_crtc_state->update_pipe = true; 13587 13588 /* 13589 * If we're not doing the full modeset we want to 13590 * keep the current M/N values as they may be 13591 * sufficiently different to the computed values 13592 * to cause problems. 13593 * 13594 * FIXME: should really copy more fuzzy state here 13595 */ 13596 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n; 13597 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n; 13598 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2; 13599 new_crtc_state->has_drrs = old_crtc_state->has_drrs; 13600 } 13601 13602 /** 13603 * intel_atomic_check - validate state object 13604 * @dev: drm device 13605 * @_state: state to validate 13606 */ 13607 static int intel_atomic_check(struct drm_device *dev, 13608 struct drm_atomic_state *_state) 13609 { 13610 struct drm_i915_private *dev_priv = to_i915(dev); 13611 struct intel_atomic_state *state = to_intel_atomic_state(_state); 13612 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 13613 struct intel_crtc *crtc; 13614 int ret, i; 13615 bool any_ms = state->cdclk.force_min_cdclk_changed; 13616 13617 /* Catch I915_MODE_FLAG_INHERITED */ 13618 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13619 new_crtc_state, i) { 13620 if (new_crtc_state->base.mode.private_flags != 13621 old_crtc_state->base.mode.private_flags) 13622 new_crtc_state->base.mode_changed = true; 13623 } 13624 13625 ret = drm_atomic_helper_check_modeset(dev, &state->base); 13626 if (ret) 13627 goto fail; 13628 13629 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13630 new_crtc_state, i) { 13631 if (!needs_modeset(new_crtc_state)) 13632 continue; 13633 13634 if (!new_crtc_state->base.enable) { 13635 any_ms = true; 13636 continue; 13637 } 13638 13639 ret = intel_modeset_pipe_config(new_crtc_state); 13640 if (ret) 13641 goto fail; 13642 13643 intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 13644 13645 if (needs_modeset(new_crtc_state)) 13646 any_ms = true; 13647 } 13648 13649 ret = drm_dp_mst_atomic_check(&state->base); 13650 if (ret) 13651 goto fail; 13652 13653 if (any_ms) { 13654 ret = intel_modeset_checks(state); 13655 if (ret) 13656 goto fail; 13657 } else { 13658 state->cdclk.logical = dev_priv->cdclk.logical; 13659 } 13660 13661 ret = icl_add_linked_planes(state); 13662 if (ret) 13663 goto fail; 13664 13665 ret = drm_atomic_helper_check_planes(dev, &state->base); 13666 if (ret) 13667 goto fail; 13668 13669 intel_fbc_choose_crtc(dev_priv, state); 13670 ret = calc_watermark_data(state); 13671 if (ret) 13672 goto fail; 13673 13674 ret = intel_bw_atomic_check(state); 13675 if (ret) 13676 goto fail; 13677 13678 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13679 new_crtc_state, i) { 13680 if (!needs_modeset(new_crtc_state) && 13681 !new_crtc_state->update_pipe) 13682 continue; 13683 13684 intel_dump_pipe_config(new_crtc_state, state, 13685 needs_modeset(new_crtc_state) ? 13686 "[modeset]" : "[fastset]"); 13687 } 13688 13689 return 0; 13690 13691 fail: 13692 if (ret == -EDEADLK) 13693 return ret; 13694 13695 /* 13696 * FIXME would probably be nice to know which crtc specifically 13697 * caused the failure, in cases where we can pinpoint it. 13698 */ 13699 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13700 new_crtc_state, i) 13701 intel_dump_pipe_config(new_crtc_state, state, "[failed]"); 13702 13703 return ret; 13704 } 13705 13706 static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 13707 { 13708 return drm_atomic_helper_prepare_planes(state->base.dev, 13709 &state->base); 13710 } 13711 13712 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) 13713 { 13714 struct drm_device *dev = crtc->base.dev; 13715 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)]; 13716 13717 if (!vblank->max_vblank_count) 13718 return (u32)drm_crtc_accurate_vblank_count(&crtc->base); 13719 13720 return crtc->base.funcs->get_vblank_counter(&crtc->base); 13721 } 13722 13723 static void intel_update_crtc(struct intel_crtc *crtc, 13724 struct intel_atomic_state *state, 13725 struct intel_crtc_state *old_crtc_state, 13726 struct intel_crtc_state *new_crtc_state) 13727 { 13728 struct drm_device *dev = state->base.dev; 13729 struct drm_i915_private *dev_priv = to_i915(dev); 13730 bool modeset = needs_modeset(new_crtc_state); 13731 struct intel_plane_state *new_plane_state = 13732 intel_atomic_get_new_plane_state(state, 13733 to_intel_plane(crtc->base.primary)); 13734 13735 if (modeset) { 13736 update_scanline_offset(new_crtc_state); 13737 dev_priv->display.crtc_enable(new_crtc_state, state); 13738 13739 /* vblanks work again, re-enable pipe CRC. */ 13740 intel_crtc_enable_pipe_crc(crtc); 13741 } else { 13742 intel_pre_plane_update(old_crtc_state, new_crtc_state); 13743 13744 if (new_crtc_state->update_pipe) 13745 intel_encoders_update_pipe(crtc, new_crtc_state, state); 13746 } 13747 13748 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) 13749 intel_fbc_disable(crtc); 13750 else if (new_plane_state) 13751 intel_fbc_enable(crtc, new_crtc_state, new_plane_state); 13752 13753 intel_begin_crtc_commit(state, crtc); 13754 13755 if (INTEL_GEN(dev_priv) >= 9) 13756 skl_update_planes_on_crtc(state, crtc); 13757 else 13758 i9xx_update_planes_on_crtc(state, crtc); 13759 13760 intel_finish_crtc_commit(state, crtc); 13761 } 13762 13763 static void intel_update_crtcs(struct intel_atomic_state *state) 13764 { 13765 struct intel_crtc *crtc; 13766 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 13767 int i; 13768 13769 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 13770 if (!new_crtc_state->base.active) 13771 continue; 13772 13773 intel_update_crtc(crtc, state, old_crtc_state, 13774 new_crtc_state); 13775 } 13776 } 13777 13778 static void skl_update_crtcs(struct intel_atomic_state *state) 13779 { 13780 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 13781 struct intel_crtc *crtc; 13782 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 13783 unsigned int updated = 0; 13784 bool progress; 13785 enum pipe pipe; 13786 int i; 13787 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; 13788 u8 required_slices = state->wm_results.ddb.enabled_slices; 13789 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 13790 13791 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) 13792 /* ignore allocations for crtc's that have been turned off. */ 13793 if (new_crtc_state->base.active) 13794 entries[i] = old_crtc_state->wm.skl.ddb; 13795 13796 /* If 2nd DBuf slice required, enable it here */ 13797 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices) 13798 icl_dbuf_slices_update(dev_priv, required_slices); 13799 13800 /* 13801 * Whenever the number of active pipes changes, we need to make sure we 13802 * update the pipes in the right order so that their ddb allocations 13803 * never overlap with eachother inbetween CRTC updates. Otherwise we'll 13804 * cause pipe underruns and other bad stuff. 13805 */ 13806 do { 13807 progress = false; 13808 13809 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 13810 bool vbl_wait = false; 13811 unsigned int cmask = drm_crtc_mask(&crtc->base); 13812 13813 pipe = crtc->pipe; 13814 13815 if (updated & cmask || !new_crtc_state->base.active) 13816 continue; 13817 13818 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 13819 entries, 13820 INTEL_INFO(dev_priv)->num_pipes, i)) 13821 continue; 13822 13823 updated |= cmask; 13824 entries[i] = new_crtc_state->wm.skl.ddb; 13825 13826 /* 13827 * If this is an already active pipe, it's DDB changed, 13828 * and this isn't the last pipe that needs updating 13829 * then we need to wait for a vblank to pass for the 13830 * new ddb allocation to take effect. 13831 */ 13832 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 13833 &old_crtc_state->wm.skl.ddb) && 13834 !new_crtc_state->base.active_changed && 13835 state->wm_results.dirty_pipes != updated) 13836 vbl_wait = true; 13837 13838 intel_update_crtc(crtc, state, old_crtc_state, 13839 new_crtc_state); 13840 13841 if (vbl_wait) 13842 intel_wait_for_vblank(dev_priv, pipe); 13843 13844 progress = true; 13845 } 13846 } while (progress); 13847 13848 /* If 2nd DBuf slice is no more required disable it */ 13849 if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices) 13850 icl_dbuf_slices_update(dev_priv, required_slices); 13851 } 13852 13853 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) 13854 { 13855 struct intel_atomic_state *state, *next; 13856 struct llist_node *freed; 13857 13858 freed = llist_del_all(&dev_priv->atomic_helper.free_list); 13859 llist_for_each_entry_safe(state, next, freed, freed) 13860 drm_atomic_state_put(&state->base); 13861 } 13862 13863 static void intel_atomic_helper_free_state_worker(struct work_struct *work) 13864 { 13865 struct drm_i915_private *dev_priv = 13866 container_of(work, typeof(*dev_priv), atomic_helper.free_work); 13867 13868 intel_atomic_helper_free_state(dev_priv); 13869 } 13870 13871 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 13872 { 13873 struct wait_queue_entry wait_fence, wait_reset; 13874 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); 13875 13876 init_wait_entry(&wait_fence, 0); 13877 init_wait_entry(&wait_reset, 0); 13878 for (;;) { 13879 prepare_to_wait(&intel_state->commit_ready.wait, 13880 &wait_fence, TASK_UNINTERRUPTIBLE); 13881 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags, 13882 I915_RESET_MODESET), 13883 &wait_reset, TASK_UNINTERRUPTIBLE); 13884 13885 13886 if (i915_sw_fence_done(&intel_state->commit_ready) || 13887 test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) 13888 break; 13889 13890 schedule(); 13891 } 13892 finish_wait(&intel_state->commit_ready.wait, &wait_fence); 13893 finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags, 13894 I915_RESET_MODESET), 13895 &wait_reset); 13896 } 13897 13898 static void intel_atomic_cleanup_work(struct work_struct *work) 13899 { 13900 struct drm_atomic_state *state = 13901 container_of(work, struct drm_atomic_state, commit_work); 13902 struct drm_i915_private *i915 = to_i915(state->dev); 13903 13904 drm_atomic_helper_cleanup_planes(&i915->drm, state); 13905 drm_atomic_helper_commit_cleanup_done(state); 13906 drm_atomic_state_put(state); 13907 13908 intel_atomic_helper_free_state(i915); 13909 } 13910 13911 static void intel_atomic_commit_tail(struct intel_atomic_state *state) 13912 { 13913 struct drm_device *dev = state->base.dev; 13914 struct drm_i915_private *dev_priv = to_i915(dev); 13915 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 13916 struct intel_crtc *crtc; 13917 u64 put_domains[I915_MAX_PIPES] = {}; 13918 intel_wakeref_t wakeref = 0; 13919 int i; 13920 13921 intel_atomic_commit_fence_wait(state); 13922 13923 drm_atomic_helper_wait_for_dependencies(&state->base); 13924 13925 if (state->modeset) 13926 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 13927 13928 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 13929 if (needs_modeset(new_crtc_state) || 13930 new_crtc_state->update_pipe) { 13931 13932 put_domains[crtc->pipe] = 13933 modeset_get_crtc_power_domains(new_crtc_state); 13934 } 13935 13936 if (!needs_modeset(new_crtc_state)) 13937 continue; 13938 13939 intel_pre_plane_update(old_crtc_state, new_crtc_state); 13940 13941 if (old_crtc_state->base.active) { 13942 intel_crtc_disable_planes(state, crtc); 13943 13944 /* 13945 * We need to disable pipe CRC before disabling the pipe, 13946 * or we race against vblank off. 13947 */ 13948 intel_crtc_disable_pipe_crc(crtc); 13949 13950 dev_priv->display.crtc_disable(old_crtc_state, state); 13951 crtc->active = false; 13952 intel_fbc_disable(crtc); 13953 intel_disable_shared_dpll(old_crtc_state); 13954 13955 /* 13956 * Underruns don't always raise 13957 * interrupts, so check manually. 13958 */ 13959 intel_check_cpu_fifo_underruns(dev_priv); 13960 intel_check_pch_fifo_underruns(dev_priv); 13961 13962 /* FIXME unify this for all platforms */ 13963 if (!new_crtc_state->base.active && 13964 !HAS_GMCH(dev_priv) && 13965 dev_priv->display.initial_watermarks) 13966 dev_priv->display.initial_watermarks(state, 13967 new_crtc_state); 13968 } 13969 } 13970 13971 /* FIXME: Eventually get rid of our crtc->config pointer */ 13972 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 13973 crtc->config = new_crtc_state; 13974 13975 if (state->modeset) { 13976 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); 13977 13978 intel_set_cdclk_pre_plane_update(dev_priv, 13979 &state->cdclk.actual, 13980 &dev_priv->cdclk.actual, 13981 state->cdclk.pipe); 13982 13983 /* 13984 * SKL workaround: bspec recommends we disable the SAGV when we 13985 * have more then one pipe enabled 13986 */ 13987 if (!intel_can_enable_sagv(state)) 13988 intel_disable_sagv(dev_priv); 13989 13990 intel_modeset_verify_disabled(dev_priv, state); 13991 } 13992 13993 /* Complete the events for pipes that have now been disabled */ 13994 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 13995 bool modeset = needs_modeset(new_crtc_state); 13996 13997 /* Complete events for now disable pipes here. */ 13998 if (modeset && !new_crtc_state->base.active && new_crtc_state->base.event) { 13999 spin_lock_irq(&dev->event_lock); 14000 drm_crtc_send_vblank_event(&crtc->base, new_crtc_state->base.event); 14001 spin_unlock_irq(&dev->event_lock); 14002 14003 new_crtc_state->base.event = NULL; 14004 } 14005 } 14006 14007 if (state->modeset) 14008 intel_encoders_update_prepare(state); 14009 14010 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 14011 dev_priv->display.update_crtcs(state); 14012 14013 if (state->modeset) { 14014 intel_encoders_update_complete(state); 14015 14016 intel_set_cdclk_post_plane_update(dev_priv, 14017 &state->cdclk.actual, 14018 &dev_priv->cdclk.actual, 14019 state->cdclk.pipe); 14020 } 14021 14022 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 14023 * already, but still need the state for the delayed optimization. To 14024 * fix this: 14025 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 14026 * - schedule that vblank worker _before_ calling hw_done 14027 * - at the start of commit_tail, cancel it _synchrously 14028 * - switch over to the vblank wait helper in the core after that since 14029 * we don't need out special handling any more. 14030 */ 14031 drm_atomic_helper_wait_for_flip_done(dev, &state->base); 14032 14033 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14034 if (new_crtc_state->base.active && 14035 !needs_modeset(new_crtc_state) && 14036 (new_crtc_state->base.color_mgmt_changed || 14037 new_crtc_state->update_pipe)) 14038 intel_color_load_luts(new_crtc_state); 14039 } 14040 14041 /* 14042 * Now that the vblank has passed, we can go ahead and program the 14043 * optimal watermarks on platforms that need two-step watermark 14044 * programming. 14045 * 14046 * TODO: Move this (and other cleanup) to an async worker eventually. 14047 */ 14048 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14049 if (dev_priv->display.optimize_watermarks) 14050 dev_priv->display.optimize_watermarks(state, 14051 new_crtc_state); 14052 } 14053 14054 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 14055 intel_post_plane_update(old_crtc_state); 14056 14057 if (put_domains[i]) 14058 modeset_put_power_domains(dev_priv, put_domains[i]); 14059 14060 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); 14061 } 14062 14063 if (state->modeset) 14064 intel_verify_planes(state); 14065 14066 if (state->modeset && intel_can_enable_sagv(state)) 14067 intel_enable_sagv(dev_priv); 14068 14069 drm_atomic_helper_commit_hw_done(&state->base); 14070 14071 if (state->modeset) { 14072 /* As one of the primary mmio accessors, KMS has a high 14073 * likelihood of triggering bugs in unclaimed access. After we 14074 * finish modesetting, see if an error has been flagged, and if 14075 * so enable debugging for the next modeset - and hope we catch 14076 * the culprit. 14077 */ 14078 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 14079 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); 14080 } 14081 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 14082 14083 /* 14084 * Defer the cleanup of the old state to a separate worker to not 14085 * impede the current task (userspace for blocking modesets) that 14086 * are executed inline. For out-of-line asynchronous modesets/flips, 14087 * deferring to a new worker seems overkill, but we would place a 14088 * schedule point (cond_resched()) here anyway to keep latencies 14089 * down. 14090 */ 14091 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work); 14092 queue_work(system_highpri_wq, &state->base.commit_work); 14093 } 14094 14095 static void intel_atomic_commit_work(struct work_struct *work) 14096 { 14097 struct intel_atomic_state *state = 14098 container_of(work, struct intel_atomic_state, base.commit_work); 14099 14100 intel_atomic_commit_tail(state); 14101 } 14102 14103 static int __i915_sw_fence_call 14104 intel_atomic_commit_ready(struct i915_sw_fence *fence, 14105 enum i915_sw_fence_notify notify) 14106 { 14107 struct intel_atomic_state *state = 14108 container_of(fence, struct intel_atomic_state, commit_ready); 14109 14110 switch (notify) { 14111 case FENCE_COMPLETE: 14112 /* we do blocking waits in the worker, nothing to do here */ 14113 break; 14114 case FENCE_FREE: 14115 { 14116 struct intel_atomic_helper *helper = 14117 &to_i915(state->base.dev)->atomic_helper; 14118 14119 if (llist_add(&state->freed, &helper->free_list)) 14120 schedule_work(&helper->free_work); 14121 break; 14122 } 14123 } 14124 14125 return NOTIFY_DONE; 14126 } 14127 14128 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 14129 { 14130 struct intel_plane_state *old_plane_state, *new_plane_state; 14131 struct intel_plane *plane; 14132 int i; 14133 14134 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 14135 new_plane_state, i) 14136 i915_gem_track_fb(intel_fb_obj(old_plane_state->base.fb), 14137 intel_fb_obj(new_plane_state->base.fb), 14138 plane->frontbuffer_bit); 14139 } 14140 14141 static int intel_atomic_commit(struct drm_device *dev, 14142 struct drm_atomic_state *_state, 14143 bool nonblock) 14144 { 14145 struct intel_atomic_state *state = to_intel_atomic_state(_state); 14146 struct drm_i915_private *dev_priv = to_i915(dev); 14147 int ret = 0; 14148 14149 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 14150 14151 drm_atomic_state_get(&state->base); 14152 i915_sw_fence_init(&state->commit_ready, 14153 intel_atomic_commit_ready); 14154 14155 /* 14156 * The intel_legacy_cursor_update() fast path takes care 14157 * of avoiding the vblank waits for simple cursor 14158 * movement and flips. For cursor on/off and size changes, 14159 * we want to perform the vblank waits so that watermark 14160 * updates happen during the correct frames. Gen9+ have 14161 * double buffered watermarks and so shouldn't need this. 14162 * 14163 * Unset state->legacy_cursor_update before the call to 14164 * drm_atomic_helper_setup_commit() because otherwise 14165 * drm_atomic_helper_wait_for_flip_done() is a noop and 14166 * we get FIFO underruns because we didn't wait 14167 * for vblank. 14168 * 14169 * FIXME doing watermarks and fb cleanup from a vblank worker 14170 * (assuming we had any) would solve these problems. 14171 */ 14172 if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) { 14173 struct intel_crtc_state *new_crtc_state; 14174 struct intel_crtc *crtc; 14175 int i; 14176 14177 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 14178 if (new_crtc_state->wm.need_postvbl_update || 14179 new_crtc_state->update_wm_post) 14180 state->base.legacy_cursor_update = false; 14181 } 14182 14183 ret = intel_atomic_prepare_commit(state); 14184 if (ret) { 14185 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); 14186 i915_sw_fence_commit(&state->commit_ready); 14187 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 14188 return ret; 14189 } 14190 14191 ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 14192 if (!ret) 14193 ret = drm_atomic_helper_swap_state(&state->base, true); 14194 14195 if (ret) { 14196 i915_sw_fence_commit(&state->commit_ready); 14197 14198 drm_atomic_helper_cleanup_planes(dev, &state->base); 14199 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 14200 return ret; 14201 } 14202 dev_priv->wm.distrust_bios_wm = false; 14203 intel_shared_dpll_swap_state(state); 14204 intel_atomic_track_fbs(state); 14205 14206 if (state->modeset) { 14207 memcpy(dev_priv->min_cdclk, state->min_cdclk, 14208 sizeof(state->min_cdclk)); 14209 memcpy(dev_priv->min_voltage_level, state->min_voltage_level, 14210 sizeof(state->min_voltage_level)); 14211 dev_priv->active_crtcs = state->active_crtcs; 14212 dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk; 14213 14214 intel_cdclk_swap_state(state); 14215 } 14216 14217 drm_atomic_state_get(&state->base); 14218 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 14219 14220 i915_sw_fence_commit(&state->commit_ready); 14221 if (nonblock && state->modeset) { 14222 queue_work(dev_priv->modeset_wq, &state->base.commit_work); 14223 } else if (nonblock) { 14224 queue_work(system_unbound_wq, &state->base.commit_work); 14225 } else { 14226 if (state->modeset) 14227 flush_workqueue(dev_priv->modeset_wq); 14228 intel_atomic_commit_tail(state); 14229 } 14230 14231 return 0; 14232 } 14233 14234 struct wait_rps_boost { 14235 struct wait_queue_entry wait; 14236 14237 struct drm_crtc *crtc; 14238 struct i915_request *request; 14239 }; 14240 14241 static int do_rps_boost(struct wait_queue_entry *_wait, 14242 unsigned mode, int sync, void *key) 14243 { 14244 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); 14245 struct i915_request *rq = wait->request; 14246 14247 /* 14248 * If we missed the vblank, but the request is already running it 14249 * is reasonable to assume that it will complete before the next 14250 * vblank without our intervention, so leave RPS alone. 14251 */ 14252 if (!i915_request_started(rq)) 14253 gen6_rps_boost(rq); 14254 i915_request_put(rq); 14255 14256 drm_crtc_vblank_put(wait->crtc); 14257 14258 list_del(&wait->wait.entry); 14259 kfree(wait); 14260 return 1; 14261 } 14262 14263 static void add_rps_boost_after_vblank(struct drm_crtc *crtc, 14264 struct dma_fence *fence) 14265 { 14266 struct wait_rps_boost *wait; 14267 14268 if (!dma_fence_is_i915(fence)) 14269 return; 14270 14271 if (INTEL_GEN(to_i915(crtc->dev)) < 6) 14272 return; 14273 14274 if (drm_crtc_vblank_get(crtc)) 14275 return; 14276 14277 wait = kmalloc(sizeof(*wait), GFP_KERNEL); 14278 if (!wait) { 14279 drm_crtc_vblank_put(crtc); 14280 return; 14281 } 14282 14283 wait->request = to_request(dma_fence_get(fence)); 14284 wait->crtc = crtc; 14285 14286 wait->wait.func = do_rps_boost; 14287 wait->wait.flags = 0; 14288 14289 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); 14290 } 14291 14292 static int intel_plane_pin_fb(struct intel_plane_state *plane_state) 14293 { 14294 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 14295 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 14296 struct drm_framebuffer *fb = plane_state->base.fb; 14297 struct i915_vma *vma; 14298 14299 if (plane->id == PLANE_CURSOR && 14300 INTEL_INFO(dev_priv)->display.cursor_needs_physical) { 14301 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 14302 const int align = intel_cursor_alignment(dev_priv); 14303 int err; 14304 14305 err = i915_gem_object_attach_phys(obj, align); 14306 if (err) 14307 return err; 14308 } 14309 14310 vma = intel_pin_and_fence_fb_obj(fb, 14311 &plane_state->view, 14312 intel_plane_uses_fence(plane_state), 14313 &plane_state->flags); 14314 if (IS_ERR(vma)) 14315 return PTR_ERR(vma); 14316 14317 plane_state->vma = vma; 14318 14319 return 0; 14320 } 14321 14322 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) 14323 { 14324 struct i915_vma *vma; 14325 14326 vma = fetch_and_zero(&old_plane_state->vma); 14327 if (vma) 14328 intel_unpin_fb_vma(vma, old_plane_state->flags); 14329 } 14330 14331 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj) 14332 { 14333 struct i915_sched_attr attr = { 14334 .priority = I915_PRIORITY_DISPLAY, 14335 }; 14336 14337 i915_gem_object_wait_priority(obj, 0, &attr); 14338 } 14339 14340 /** 14341 * intel_prepare_plane_fb - Prepare fb for usage on plane 14342 * @plane: drm plane to prepare for 14343 * @new_state: the plane state being prepared 14344 * 14345 * Prepares a framebuffer for usage on a display plane. Generally this 14346 * involves pinning the underlying object and updating the frontbuffer tracking 14347 * bits. Some older platforms need special physical address handling for 14348 * cursor planes. 14349 * 14350 * Must be called with struct_mutex held. 14351 * 14352 * Returns 0 on success, negative error code on failure. 14353 */ 14354 int 14355 intel_prepare_plane_fb(struct drm_plane *plane, 14356 struct drm_plane_state *new_state) 14357 { 14358 struct intel_atomic_state *intel_state = 14359 to_intel_atomic_state(new_state->state); 14360 struct drm_i915_private *dev_priv = to_i915(plane->dev); 14361 struct drm_framebuffer *fb = new_state->fb; 14362 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 14363 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); 14364 int ret; 14365 14366 if (old_obj) { 14367 struct intel_crtc_state *crtc_state = 14368 intel_atomic_get_new_crtc_state(intel_state, 14369 to_intel_crtc(plane->state->crtc)); 14370 14371 /* Big Hammer, we also need to ensure that any pending 14372 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 14373 * current scanout is retired before unpinning the old 14374 * framebuffer. Note that we rely on userspace rendering 14375 * into the buffer attached to the pipe they are waiting 14376 * on. If not, userspace generates a GPU hang with IPEHR 14377 * point to the MI_WAIT_FOR_EVENT. 14378 * 14379 * This should only fail upon a hung GPU, in which case we 14380 * can safely continue. 14381 */ 14382 if (needs_modeset(crtc_state)) { 14383 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, 14384 old_obj->base.resv, NULL, 14385 false, 0, 14386 GFP_KERNEL); 14387 if (ret < 0) 14388 return ret; 14389 } 14390 } 14391 14392 if (new_state->fence) { /* explicit fencing */ 14393 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready, 14394 new_state->fence, 14395 I915_FENCE_TIMEOUT, 14396 GFP_KERNEL); 14397 if (ret < 0) 14398 return ret; 14399 } 14400 14401 if (!obj) 14402 return 0; 14403 14404 ret = i915_gem_object_pin_pages(obj); 14405 if (ret) 14406 return ret; 14407 14408 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex); 14409 if (ret) { 14410 i915_gem_object_unpin_pages(obj); 14411 return ret; 14412 } 14413 14414 ret = intel_plane_pin_fb(to_intel_plane_state(new_state)); 14415 14416 mutex_unlock(&dev_priv->drm.struct_mutex); 14417 i915_gem_object_unpin_pages(obj); 14418 if (ret) 14419 return ret; 14420 14421 fb_obj_bump_render_priority(obj); 14422 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); 14423 14424 if (!new_state->fence) { /* implicit fencing */ 14425 struct dma_fence *fence; 14426 14427 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, 14428 obj->base.resv, NULL, 14429 false, I915_FENCE_TIMEOUT, 14430 GFP_KERNEL); 14431 if (ret < 0) 14432 return ret; 14433 14434 fence = reservation_object_get_excl_rcu(obj->base.resv); 14435 if (fence) { 14436 add_rps_boost_after_vblank(new_state->crtc, fence); 14437 dma_fence_put(fence); 14438 } 14439 } else { 14440 add_rps_boost_after_vblank(new_state->crtc, new_state->fence); 14441 } 14442 14443 /* 14444 * We declare pageflips to be interactive and so merit a small bias 14445 * towards upclocking to deliver the frame on time. By only changing 14446 * the RPS thresholds to sample more regularly and aim for higher 14447 * clocks we can hopefully deliver low power workloads (like kodi) 14448 * that are not quite steady state without resorting to forcing 14449 * maximum clocks following a vblank miss (see do_rps_boost()). 14450 */ 14451 if (!intel_state->rps_interactive) { 14452 intel_rps_mark_interactive(dev_priv, true); 14453 intel_state->rps_interactive = true; 14454 } 14455 14456 return 0; 14457 } 14458 14459 /** 14460 * intel_cleanup_plane_fb - Cleans up an fb after plane use 14461 * @plane: drm plane to clean up for 14462 * @old_state: the state from the previous modeset 14463 * 14464 * Cleans up a framebuffer that has just been removed from a plane. 14465 * 14466 * Must be called with struct_mutex held. 14467 */ 14468 void 14469 intel_cleanup_plane_fb(struct drm_plane *plane, 14470 struct drm_plane_state *old_state) 14471 { 14472 struct intel_atomic_state *intel_state = 14473 to_intel_atomic_state(old_state->state); 14474 struct drm_i915_private *dev_priv = to_i915(plane->dev); 14475 14476 if (intel_state->rps_interactive) { 14477 intel_rps_mark_interactive(dev_priv, false); 14478 intel_state->rps_interactive = false; 14479 } 14480 14481 /* Should only be called after a successful intel_prepare_plane_fb()! */ 14482 mutex_lock(&dev_priv->drm.struct_mutex); 14483 intel_plane_unpin_fb(to_intel_plane_state(old_state)); 14484 mutex_unlock(&dev_priv->drm.struct_mutex); 14485 } 14486 14487 int 14488 skl_max_scale(const struct intel_crtc_state *crtc_state, 14489 u32 pixel_format) 14490 { 14491 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 14492 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14493 int max_scale, mult; 14494 int crtc_clock, max_dotclk, tmpclk1, tmpclk2; 14495 14496 if (!crtc_state->base.enable) 14497 return DRM_PLANE_HELPER_NO_SCALING; 14498 14499 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; 14500 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk; 14501 14502 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) 14503 max_dotclk *= 2; 14504 14505 if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock)) 14506 return DRM_PLANE_HELPER_NO_SCALING; 14507 14508 /* 14509 * skl max scale is lower of: 14510 * close to 3 but not 3, -1 is for that purpose 14511 * or 14512 * cdclk/crtc_clock 14513 */ 14514 mult = is_planar_yuv_format(pixel_format) ? 2 : 3; 14515 tmpclk1 = (1 << 16) * mult - 1; 14516 tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock); 14517 max_scale = min(tmpclk1, tmpclk2); 14518 14519 return max_scale; 14520 } 14521 14522 static void intel_begin_crtc_commit(struct intel_atomic_state *state, 14523 struct intel_crtc *crtc) 14524 { 14525 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14526 struct intel_crtc_state *old_crtc_state = 14527 intel_atomic_get_old_crtc_state(state, crtc); 14528 struct intel_crtc_state *new_crtc_state = 14529 intel_atomic_get_new_crtc_state(state, crtc); 14530 bool modeset = needs_modeset(new_crtc_state); 14531 14532 /* Perform vblank evasion around commit operation */ 14533 intel_pipe_update_start(new_crtc_state); 14534 14535 if (modeset) 14536 goto out; 14537 14538 if (new_crtc_state->base.color_mgmt_changed || 14539 new_crtc_state->update_pipe) 14540 intel_color_commit(new_crtc_state); 14541 14542 if (new_crtc_state->update_pipe) 14543 intel_update_pipe_config(old_crtc_state, new_crtc_state); 14544 else if (INTEL_GEN(dev_priv) >= 9) 14545 skl_detach_scalers(new_crtc_state); 14546 14547 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 14548 bdw_set_pipemisc(new_crtc_state); 14549 14550 out: 14551 if (dev_priv->display.atomic_update_watermarks) 14552 dev_priv->display.atomic_update_watermarks(state, 14553 new_crtc_state); 14554 } 14555 14556 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 14557 struct intel_crtc_state *crtc_state) 14558 { 14559 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14560 14561 if (!IS_GEN(dev_priv, 2)) 14562 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 14563 14564 if (crtc_state->has_pch_encoder) { 14565 enum pipe pch_transcoder = 14566 intel_crtc_pch_transcoder(crtc); 14567 14568 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); 14569 } 14570 } 14571 14572 static void intel_finish_crtc_commit(struct intel_atomic_state *state, 14573 struct intel_crtc *crtc) 14574 { 14575 struct intel_crtc_state *old_crtc_state = 14576 intel_atomic_get_old_crtc_state(state, crtc); 14577 struct intel_crtc_state *new_crtc_state = 14578 intel_atomic_get_new_crtc_state(state, crtc); 14579 14580 intel_pipe_update_end(new_crtc_state); 14581 14582 if (new_crtc_state->update_pipe && 14583 !needs_modeset(new_crtc_state) && 14584 old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED) 14585 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 14586 } 14587 14588 /** 14589 * intel_plane_destroy - destroy a plane 14590 * @plane: plane to destroy 14591 * 14592 * Common destruction function for all types of planes (primary, cursor, 14593 * sprite). 14594 */ 14595 void intel_plane_destroy(struct drm_plane *plane) 14596 { 14597 drm_plane_cleanup(plane); 14598 kfree(to_intel_plane(plane)); 14599 } 14600 14601 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, 14602 u32 format, u64 modifier) 14603 { 14604 switch (modifier) { 14605 case DRM_FORMAT_MOD_LINEAR: 14606 case I915_FORMAT_MOD_X_TILED: 14607 break; 14608 default: 14609 return false; 14610 } 14611 14612 switch (format) { 14613 case DRM_FORMAT_C8: 14614 case DRM_FORMAT_RGB565: 14615 case DRM_FORMAT_XRGB1555: 14616 case DRM_FORMAT_XRGB8888: 14617 return modifier == DRM_FORMAT_MOD_LINEAR || 14618 modifier == I915_FORMAT_MOD_X_TILED; 14619 default: 14620 return false; 14621 } 14622 } 14623 14624 static bool i965_plane_format_mod_supported(struct drm_plane *_plane, 14625 u32 format, u64 modifier) 14626 { 14627 switch (modifier) { 14628 case DRM_FORMAT_MOD_LINEAR: 14629 case I915_FORMAT_MOD_X_TILED: 14630 break; 14631 default: 14632 return false; 14633 } 14634 14635 switch (format) { 14636 case DRM_FORMAT_C8: 14637 case DRM_FORMAT_RGB565: 14638 case DRM_FORMAT_XRGB8888: 14639 case DRM_FORMAT_XBGR8888: 14640 case DRM_FORMAT_XRGB2101010: 14641 case DRM_FORMAT_XBGR2101010: 14642 return modifier == DRM_FORMAT_MOD_LINEAR || 14643 modifier == I915_FORMAT_MOD_X_TILED; 14644 default: 14645 return false; 14646 } 14647 } 14648 14649 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, 14650 u32 format, u64 modifier) 14651 { 14652 return modifier == DRM_FORMAT_MOD_LINEAR && 14653 format == DRM_FORMAT_ARGB8888; 14654 } 14655 14656 static const struct drm_plane_funcs i965_plane_funcs = { 14657 .update_plane = drm_atomic_helper_update_plane, 14658 .disable_plane = drm_atomic_helper_disable_plane, 14659 .destroy = intel_plane_destroy, 14660 .atomic_duplicate_state = intel_plane_duplicate_state, 14661 .atomic_destroy_state = intel_plane_destroy_state, 14662 .format_mod_supported = i965_plane_format_mod_supported, 14663 }; 14664 14665 static const struct drm_plane_funcs i8xx_plane_funcs = { 14666 .update_plane = drm_atomic_helper_update_plane, 14667 .disable_plane = drm_atomic_helper_disable_plane, 14668 .destroy = intel_plane_destroy, 14669 .atomic_duplicate_state = intel_plane_duplicate_state, 14670 .atomic_destroy_state = intel_plane_destroy_state, 14671 .format_mod_supported = i8xx_plane_format_mod_supported, 14672 }; 14673 14674 static int 14675 intel_legacy_cursor_update(struct drm_plane *plane, 14676 struct drm_crtc *crtc, 14677 struct drm_framebuffer *fb, 14678 int crtc_x, int crtc_y, 14679 unsigned int crtc_w, unsigned int crtc_h, 14680 u32 src_x, u32 src_y, 14681 u32 src_w, u32 src_h, 14682 struct drm_modeset_acquire_ctx *ctx) 14683 { 14684 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 14685 int ret; 14686 struct drm_plane_state *old_plane_state, *new_plane_state; 14687 struct intel_plane *intel_plane = to_intel_plane(plane); 14688 struct drm_framebuffer *old_fb; 14689 struct intel_crtc_state *crtc_state = 14690 to_intel_crtc_state(crtc->state); 14691 struct intel_crtc_state *new_crtc_state; 14692 14693 /* 14694 * When crtc is inactive or there is a modeset pending, 14695 * wait for it to complete in the slowpath 14696 */ 14697 if (!crtc_state->base.active || needs_modeset(crtc_state) || 14698 crtc_state->update_pipe) 14699 goto slow; 14700 14701 old_plane_state = plane->state; 14702 /* 14703 * Don't do an async update if there is an outstanding commit modifying 14704 * the plane. This prevents our async update's changes from getting 14705 * overridden by a previous synchronous update's state. 14706 */ 14707 if (old_plane_state->commit && 14708 !try_wait_for_completion(&old_plane_state->commit->hw_done)) 14709 goto slow; 14710 14711 /* 14712 * If any parameters change that may affect watermarks, 14713 * take the slowpath. Only changing fb or position should be 14714 * in the fastpath. 14715 */ 14716 if (old_plane_state->crtc != crtc || 14717 old_plane_state->src_w != src_w || 14718 old_plane_state->src_h != src_h || 14719 old_plane_state->crtc_w != crtc_w || 14720 old_plane_state->crtc_h != crtc_h || 14721 !old_plane_state->fb != !fb) 14722 goto slow; 14723 14724 new_plane_state = intel_plane_duplicate_state(plane); 14725 if (!new_plane_state) 14726 return -ENOMEM; 14727 14728 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc)); 14729 if (!new_crtc_state) { 14730 ret = -ENOMEM; 14731 goto out_free; 14732 } 14733 14734 drm_atomic_set_fb_for_plane(new_plane_state, fb); 14735 14736 new_plane_state->src_x = src_x; 14737 new_plane_state->src_y = src_y; 14738 new_plane_state->src_w = src_w; 14739 new_plane_state->src_h = src_h; 14740 new_plane_state->crtc_x = crtc_x; 14741 new_plane_state->crtc_y = crtc_y; 14742 new_plane_state->crtc_w = crtc_w; 14743 new_plane_state->crtc_h = crtc_h; 14744 14745 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state, 14746 to_intel_plane_state(old_plane_state), 14747 to_intel_plane_state(new_plane_state)); 14748 if (ret) 14749 goto out_free; 14750 14751 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex); 14752 if (ret) 14753 goto out_free; 14754 14755 ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state)); 14756 if (ret) 14757 goto out_unlock; 14758 14759 intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP); 14760 14761 old_fb = old_plane_state->fb; 14762 i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb), 14763 intel_plane->frontbuffer_bit); 14764 14765 /* Swap plane state */ 14766 plane->state = new_plane_state; 14767 14768 /* 14769 * We cannot swap crtc_state as it may be in use by an atomic commit or 14770 * page flip that's running simultaneously. If we swap crtc_state and 14771 * destroy the old state, we will cause a use-after-free there. 14772 * 14773 * Only update active_planes, which is needed for our internal 14774 * bookkeeping. Either value will do the right thing when updating 14775 * planes atomically. If the cursor was part of the atomic update then 14776 * we would have taken the slowpath. 14777 */ 14778 crtc_state->active_planes = new_crtc_state->active_planes; 14779 14780 if (plane->state->visible) 14781 intel_update_plane(intel_plane, crtc_state, 14782 to_intel_plane_state(plane->state)); 14783 else 14784 intel_disable_plane(intel_plane, crtc_state); 14785 14786 intel_plane_unpin_fb(to_intel_plane_state(old_plane_state)); 14787 14788 out_unlock: 14789 mutex_unlock(&dev_priv->drm.struct_mutex); 14790 out_free: 14791 if (new_crtc_state) 14792 intel_crtc_destroy_state(crtc, &new_crtc_state->base); 14793 if (ret) 14794 intel_plane_destroy_state(plane, new_plane_state); 14795 else 14796 intel_plane_destroy_state(plane, old_plane_state); 14797 return ret; 14798 14799 slow: 14800 return drm_atomic_helper_update_plane(plane, crtc, fb, 14801 crtc_x, crtc_y, crtc_w, crtc_h, 14802 src_x, src_y, src_w, src_h, ctx); 14803 } 14804 14805 static const struct drm_plane_funcs intel_cursor_plane_funcs = { 14806 .update_plane = intel_legacy_cursor_update, 14807 .disable_plane = drm_atomic_helper_disable_plane, 14808 .destroy = intel_plane_destroy, 14809 .atomic_duplicate_state = intel_plane_duplicate_state, 14810 .atomic_destroy_state = intel_plane_destroy_state, 14811 .format_mod_supported = intel_cursor_format_mod_supported, 14812 }; 14813 14814 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, 14815 enum i9xx_plane_id i9xx_plane) 14816 { 14817 if (!HAS_FBC(dev_priv)) 14818 return false; 14819 14820 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 14821 return i9xx_plane == PLANE_A; /* tied to pipe A */ 14822 else if (IS_IVYBRIDGE(dev_priv)) 14823 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B || 14824 i9xx_plane == PLANE_C; 14825 else if (INTEL_GEN(dev_priv) >= 4) 14826 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B; 14827 else 14828 return i9xx_plane == PLANE_A; 14829 } 14830 14831 static struct intel_plane * 14832 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) 14833 { 14834 struct intel_plane *plane; 14835 const struct drm_plane_funcs *plane_funcs; 14836 unsigned int supported_rotations; 14837 unsigned int possible_crtcs; 14838 const u64 *modifiers; 14839 const u32 *formats; 14840 int num_formats; 14841 int ret; 14842 14843 if (INTEL_GEN(dev_priv) >= 9) 14844 return skl_universal_plane_create(dev_priv, pipe, 14845 PLANE_PRIMARY); 14846 14847 plane = intel_plane_alloc(); 14848 if (IS_ERR(plane)) 14849 return plane; 14850 14851 plane->pipe = pipe; 14852 /* 14853 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS 14854 * port is hooked to pipe B. Hence we want plane A feeding pipe B. 14855 */ 14856 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) 14857 plane->i9xx_plane = (enum i9xx_plane_id) !pipe; 14858 else 14859 plane->i9xx_plane = (enum i9xx_plane_id) pipe; 14860 plane->id = PLANE_PRIMARY; 14861 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); 14862 14863 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane); 14864 if (plane->has_fbc) { 14865 struct intel_fbc *fbc = &dev_priv->fbc; 14866 14867 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; 14868 } 14869 14870 if (INTEL_GEN(dev_priv) >= 4) { 14871 formats = i965_primary_formats; 14872 num_formats = ARRAY_SIZE(i965_primary_formats); 14873 modifiers = i9xx_format_modifiers; 14874 14875 plane->max_stride = i9xx_plane_max_stride; 14876 plane->update_plane = i9xx_update_plane; 14877 plane->disable_plane = i9xx_disable_plane; 14878 plane->get_hw_state = i9xx_plane_get_hw_state; 14879 plane->check_plane = i9xx_plane_check; 14880 14881 plane_funcs = &i965_plane_funcs; 14882 } else { 14883 formats = i8xx_primary_formats; 14884 num_formats = ARRAY_SIZE(i8xx_primary_formats); 14885 modifiers = i9xx_format_modifiers; 14886 14887 plane->max_stride = i9xx_plane_max_stride; 14888 plane->update_plane = i9xx_update_plane; 14889 plane->disable_plane = i9xx_disable_plane; 14890 plane->get_hw_state = i9xx_plane_get_hw_state; 14891 plane->check_plane = i9xx_plane_check; 14892 14893 plane_funcs = &i8xx_plane_funcs; 14894 } 14895 14896 possible_crtcs = BIT(pipe); 14897 14898 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 14899 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 14900 possible_crtcs, plane_funcs, 14901 formats, num_formats, modifiers, 14902 DRM_PLANE_TYPE_PRIMARY, 14903 "primary %c", pipe_name(pipe)); 14904 else 14905 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 14906 possible_crtcs, plane_funcs, 14907 formats, num_formats, modifiers, 14908 DRM_PLANE_TYPE_PRIMARY, 14909 "plane %c", 14910 plane_name(plane->i9xx_plane)); 14911 if (ret) 14912 goto fail; 14913 14914 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 14915 supported_rotations = 14916 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | 14917 DRM_MODE_REFLECT_X; 14918 } else if (INTEL_GEN(dev_priv) >= 4) { 14919 supported_rotations = 14920 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; 14921 } else { 14922 supported_rotations = DRM_MODE_ROTATE_0; 14923 } 14924 14925 if (INTEL_GEN(dev_priv) >= 4) 14926 drm_plane_create_rotation_property(&plane->base, 14927 DRM_MODE_ROTATE_0, 14928 supported_rotations); 14929 14930 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); 14931 14932 return plane; 14933 14934 fail: 14935 intel_plane_free(plane); 14936 14937 return ERR_PTR(ret); 14938 } 14939 14940 static struct intel_plane * 14941 intel_cursor_plane_create(struct drm_i915_private *dev_priv, 14942 enum pipe pipe) 14943 { 14944 unsigned int possible_crtcs; 14945 struct intel_plane *cursor; 14946 int ret; 14947 14948 cursor = intel_plane_alloc(); 14949 if (IS_ERR(cursor)) 14950 return cursor; 14951 14952 cursor->pipe = pipe; 14953 cursor->i9xx_plane = (enum i9xx_plane_id) pipe; 14954 cursor->id = PLANE_CURSOR; 14955 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id); 14956 14957 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 14958 cursor->max_stride = i845_cursor_max_stride; 14959 cursor->update_plane = i845_update_cursor; 14960 cursor->disable_plane = i845_disable_cursor; 14961 cursor->get_hw_state = i845_cursor_get_hw_state; 14962 cursor->check_plane = i845_check_cursor; 14963 } else { 14964 cursor->max_stride = i9xx_cursor_max_stride; 14965 cursor->update_plane = i9xx_update_cursor; 14966 cursor->disable_plane = i9xx_disable_cursor; 14967 cursor->get_hw_state = i9xx_cursor_get_hw_state; 14968 cursor->check_plane = i9xx_check_cursor; 14969 } 14970 14971 cursor->cursor.base = ~0; 14972 cursor->cursor.cntl = ~0; 14973 14974 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) 14975 cursor->cursor.size = ~0; 14976 14977 possible_crtcs = BIT(pipe); 14978 14979 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 14980 possible_crtcs, &intel_cursor_plane_funcs, 14981 intel_cursor_formats, 14982 ARRAY_SIZE(intel_cursor_formats), 14983 cursor_format_modifiers, 14984 DRM_PLANE_TYPE_CURSOR, 14985 "cursor %c", pipe_name(pipe)); 14986 if (ret) 14987 goto fail; 14988 14989 if (INTEL_GEN(dev_priv) >= 4) 14990 drm_plane_create_rotation_property(&cursor->base, 14991 DRM_MODE_ROTATE_0, 14992 DRM_MODE_ROTATE_0 | 14993 DRM_MODE_ROTATE_180); 14994 14995 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 14996 14997 return cursor; 14998 14999 fail: 15000 intel_plane_free(cursor); 15001 15002 return ERR_PTR(ret); 15003 } 15004 15005 static void intel_crtc_init_scalers(struct intel_crtc *crtc, 15006 struct intel_crtc_state *crtc_state) 15007 { 15008 struct intel_crtc_scaler_state *scaler_state = 15009 &crtc_state->scaler_state; 15010 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 15011 int i; 15012 15013 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe]; 15014 if (!crtc->num_scalers) 15015 return; 15016 15017 for (i = 0; i < crtc->num_scalers; i++) { 15018 struct intel_scaler *scaler = &scaler_state->scalers[i]; 15019 15020 scaler->in_use = 0; 15021 scaler->mode = 0; 15022 } 15023 15024 scaler_state->scaler_id = -1; 15025 } 15026 15027 #define INTEL_CRTC_FUNCS \ 15028 .gamma_set = drm_atomic_helper_legacy_gamma_set, \ 15029 .set_config = drm_atomic_helper_set_config, \ 15030 .destroy = intel_crtc_destroy, \ 15031 .page_flip = drm_atomic_helper_page_flip, \ 15032 .atomic_duplicate_state = intel_crtc_duplicate_state, \ 15033 .atomic_destroy_state = intel_crtc_destroy_state, \ 15034 .set_crc_source = intel_crtc_set_crc_source, \ 15035 .verify_crc_source = intel_crtc_verify_crc_source, \ 15036 .get_crc_sources = intel_crtc_get_crc_sources 15037 15038 static const struct drm_crtc_funcs bdw_crtc_funcs = { 15039 INTEL_CRTC_FUNCS, 15040 15041 .get_vblank_counter = g4x_get_vblank_counter, 15042 .enable_vblank = bdw_enable_vblank, 15043 .disable_vblank = bdw_disable_vblank, 15044 }; 15045 15046 static const struct drm_crtc_funcs ilk_crtc_funcs = { 15047 INTEL_CRTC_FUNCS, 15048 15049 .get_vblank_counter = g4x_get_vblank_counter, 15050 .enable_vblank = ilk_enable_vblank, 15051 .disable_vblank = ilk_disable_vblank, 15052 }; 15053 15054 static const struct drm_crtc_funcs g4x_crtc_funcs = { 15055 INTEL_CRTC_FUNCS, 15056 15057 .get_vblank_counter = g4x_get_vblank_counter, 15058 .enable_vblank = i965_enable_vblank, 15059 .disable_vblank = i965_disable_vblank, 15060 }; 15061 15062 static const struct drm_crtc_funcs i965_crtc_funcs = { 15063 INTEL_CRTC_FUNCS, 15064 15065 .get_vblank_counter = i915_get_vblank_counter, 15066 .enable_vblank = i965_enable_vblank, 15067 .disable_vblank = i965_disable_vblank, 15068 }; 15069 15070 static const struct drm_crtc_funcs i945gm_crtc_funcs = { 15071 INTEL_CRTC_FUNCS, 15072 15073 .get_vblank_counter = i915_get_vblank_counter, 15074 .enable_vblank = i945gm_enable_vblank, 15075 .disable_vblank = i945gm_disable_vblank, 15076 }; 15077 15078 static const struct drm_crtc_funcs i915_crtc_funcs = { 15079 INTEL_CRTC_FUNCS, 15080 15081 .get_vblank_counter = i915_get_vblank_counter, 15082 .enable_vblank = i8xx_enable_vblank, 15083 .disable_vblank = i8xx_disable_vblank, 15084 }; 15085 15086 static const struct drm_crtc_funcs i8xx_crtc_funcs = { 15087 INTEL_CRTC_FUNCS, 15088 15089 /* no hw vblank counter */ 15090 .enable_vblank = i8xx_enable_vblank, 15091 .disable_vblank = i8xx_disable_vblank, 15092 }; 15093 15094 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) 15095 { 15096 const struct drm_crtc_funcs *funcs; 15097 struct intel_crtc *intel_crtc; 15098 struct intel_crtc_state *crtc_state = NULL; 15099 struct intel_plane *primary = NULL; 15100 struct intel_plane *cursor = NULL; 15101 int sprite, ret; 15102 15103 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); 15104 if (!intel_crtc) 15105 return -ENOMEM; 15106 15107 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 15108 if (!crtc_state) { 15109 ret = -ENOMEM; 15110 goto fail; 15111 } 15112 __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base); 15113 intel_crtc->config = crtc_state; 15114 15115 primary = intel_primary_plane_create(dev_priv, pipe); 15116 if (IS_ERR(primary)) { 15117 ret = PTR_ERR(primary); 15118 goto fail; 15119 } 15120 intel_crtc->plane_ids_mask |= BIT(primary->id); 15121 15122 for_each_sprite(dev_priv, pipe, sprite) { 15123 struct intel_plane *plane; 15124 15125 plane = intel_sprite_plane_create(dev_priv, pipe, sprite); 15126 if (IS_ERR(plane)) { 15127 ret = PTR_ERR(plane); 15128 goto fail; 15129 } 15130 intel_crtc->plane_ids_mask |= BIT(plane->id); 15131 } 15132 15133 cursor = intel_cursor_plane_create(dev_priv, pipe); 15134 if (IS_ERR(cursor)) { 15135 ret = PTR_ERR(cursor); 15136 goto fail; 15137 } 15138 intel_crtc->plane_ids_mask |= BIT(cursor->id); 15139 15140 if (HAS_GMCH(dev_priv)) { 15141 if (IS_CHERRYVIEW(dev_priv) || 15142 IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv)) 15143 funcs = &g4x_crtc_funcs; 15144 else if (IS_GEN(dev_priv, 4)) 15145 funcs = &i965_crtc_funcs; 15146 else if (IS_I945GM(dev_priv)) 15147 funcs = &i945gm_crtc_funcs; 15148 else if (IS_GEN(dev_priv, 3)) 15149 funcs = &i915_crtc_funcs; 15150 else 15151 funcs = &i8xx_crtc_funcs; 15152 } else { 15153 if (INTEL_GEN(dev_priv) >= 8) 15154 funcs = &bdw_crtc_funcs; 15155 else 15156 funcs = &ilk_crtc_funcs; 15157 } 15158 15159 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base, 15160 &primary->base, &cursor->base, 15161 funcs, "pipe %c", pipe_name(pipe)); 15162 if (ret) 15163 goto fail; 15164 15165 intel_crtc->pipe = pipe; 15166 15167 /* initialize shared scalers */ 15168 intel_crtc_init_scalers(intel_crtc, crtc_state); 15169 15170 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) || 15171 dev_priv->pipe_to_crtc_mapping[pipe] != NULL); 15172 dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc; 15173 15174 if (INTEL_GEN(dev_priv) < 9) { 15175 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane; 15176 15177 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 15178 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL); 15179 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc; 15180 } 15181 15182 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 15183 15184 intel_color_init(intel_crtc); 15185 15186 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 15187 15188 return 0; 15189 15190 fail: 15191 /* 15192 * drm_mode_config_cleanup() will free up any 15193 * crtcs/planes already initialized. 15194 */ 15195 kfree(crtc_state); 15196 kfree(intel_crtc); 15197 15198 return ret; 15199 } 15200 15201 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 15202 struct drm_file *file) 15203 { 15204 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 15205 struct drm_crtc *drmmode_crtc; 15206 struct intel_crtc *crtc; 15207 15208 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); 15209 if (!drmmode_crtc) 15210 return -ENOENT; 15211 15212 crtc = to_intel_crtc(drmmode_crtc); 15213 pipe_from_crtc_id->pipe = crtc->pipe; 15214 15215 return 0; 15216 } 15217 15218 static int intel_encoder_clones(struct intel_encoder *encoder) 15219 { 15220 struct drm_device *dev = encoder->base.dev; 15221 struct intel_encoder *source_encoder; 15222 int index_mask = 0; 15223 int entry = 0; 15224 15225 for_each_intel_encoder(dev, source_encoder) { 15226 if (encoders_cloneable(encoder, source_encoder)) 15227 index_mask |= (1 << entry); 15228 15229 entry++; 15230 } 15231 15232 return index_mask; 15233 } 15234 15235 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) 15236 { 15237 if (!IS_MOBILE(dev_priv)) 15238 return false; 15239 15240 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 15241 return false; 15242 15243 if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 15244 return false; 15245 15246 return true; 15247 } 15248 15249 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) 15250 { 15251 if (INTEL_GEN(dev_priv) >= 9) 15252 return false; 15253 15254 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) 15255 return false; 15256 15257 if (HAS_PCH_LPT_H(dev_priv) && 15258 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 15259 return false; 15260 15261 /* DDI E can't be used if DDI A requires 4 lanes */ 15262 if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 15263 return false; 15264 15265 if (!dev_priv->vbt.int_crt_support) 15266 return false; 15267 15268 return true; 15269 } 15270 15271 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) 15272 { 15273 int pps_num; 15274 int pps_idx; 15275 15276 if (HAS_DDI(dev_priv)) 15277 return; 15278 /* 15279 * This w/a is needed at least on CPT/PPT, but to be sure apply it 15280 * everywhere where registers can be write protected. 15281 */ 15282 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 15283 pps_num = 2; 15284 else 15285 pps_num = 1; 15286 15287 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { 15288 u32 val = I915_READ(PP_CONTROL(pps_idx)); 15289 15290 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS; 15291 I915_WRITE(PP_CONTROL(pps_idx), val); 15292 } 15293 } 15294 15295 static void intel_pps_init(struct drm_i915_private *dev_priv) 15296 { 15297 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv)) 15298 dev_priv->pps_mmio_base = PCH_PPS_BASE; 15299 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 15300 dev_priv->pps_mmio_base = VLV_PPS_BASE; 15301 else 15302 dev_priv->pps_mmio_base = PPS_BASE; 15303 15304 intel_pps_unlock_regs_wa(dev_priv); 15305 } 15306 15307 static void intel_setup_outputs(struct drm_i915_private *dev_priv) 15308 { 15309 struct intel_encoder *encoder; 15310 bool dpd_is_edp = false; 15311 15312 intel_pps_init(dev_priv); 15313 15314 if (!HAS_DISPLAY(dev_priv)) 15315 return; 15316 15317 if (INTEL_GEN(dev_priv) >= 12) { 15318 /* TODO: initialize TC ports as well */ 15319 intel_ddi_init(dev_priv, PORT_A); 15320 intel_ddi_init(dev_priv, PORT_B); 15321 intel_ddi_init(dev_priv, PORT_C); 15322 } else if (IS_ELKHARTLAKE(dev_priv)) { 15323 intel_ddi_init(dev_priv, PORT_A); 15324 intel_ddi_init(dev_priv, PORT_B); 15325 intel_ddi_init(dev_priv, PORT_C); 15326 intel_ddi_init(dev_priv, PORT_D); 15327 icl_dsi_init(dev_priv); 15328 } else if (IS_GEN(dev_priv, 11)) { 15329 intel_ddi_init(dev_priv, PORT_A); 15330 intel_ddi_init(dev_priv, PORT_B); 15331 intel_ddi_init(dev_priv, PORT_C); 15332 intel_ddi_init(dev_priv, PORT_D); 15333 intel_ddi_init(dev_priv, PORT_E); 15334 /* 15335 * On some ICL SKUs port F is not present. No strap bits for 15336 * this, so rely on VBT. 15337 * Work around broken VBTs on SKUs known to have no port F. 15338 */ 15339 if (IS_ICL_WITH_PORT_F(dev_priv) && 15340 intel_bios_is_port_present(dev_priv, PORT_F)) 15341 intel_ddi_init(dev_priv, PORT_F); 15342 15343 icl_dsi_init(dev_priv); 15344 } else if (IS_GEN9_LP(dev_priv)) { 15345 /* 15346 * FIXME: Broxton doesn't support port detection via the 15347 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 15348 * detect the ports. 15349 */ 15350 intel_ddi_init(dev_priv, PORT_A); 15351 intel_ddi_init(dev_priv, PORT_B); 15352 intel_ddi_init(dev_priv, PORT_C); 15353 15354 vlv_dsi_init(dev_priv); 15355 } else if (HAS_DDI(dev_priv)) { 15356 int found; 15357 15358 if (intel_ddi_crt_present(dev_priv)) 15359 intel_crt_init(dev_priv); 15360 15361 /* 15362 * Haswell uses DDI functions to detect digital outputs. 15363 * On SKL pre-D0 the strap isn't connected, so we assume 15364 * it's there. 15365 */ 15366 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 15367 /* WaIgnoreDDIAStrap: skl */ 15368 if (found || IS_GEN9_BC(dev_priv)) 15369 intel_ddi_init(dev_priv, PORT_A); 15370 15371 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP 15372 * register */ 15373 found = I915_READ(SFUSE_STRAP); 15374 15375 if (found & SFUSE_STRAP_DDIB_DETECTED) 15376 intel_ddi_init(dev_priv, PORT_B); 15377 if (found & SFUSE_STRAP_DDIC_DETECTED) 15378 intel_ddi_init(dev_priv, PORT_C); 15379 if (found & SFUSE_STRAP_DDID_DETECTED) 15380 intel_ddi_init(dev_priv, PORT_D); 15381 if (found & SFUSE_STRAP_DDIF_DETECTED) 15382 intel_ddi_init(dev_priv, PORT_F); 15383 /* 15384 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 15385 */ 15386 if (IS_GEN9_BC(dev_priv) && 15387 intel_bios_is_port_present(dev_priv, PORT_E)) 15388 intel_ddi_init(dev_priv, PORT_E); 15389 15390 } else if (HAS_PCH_SPLIT(dev_priv)) { 15391 int found; 15392 15393 /* 15394 * intel_edp_init_connector() depends on this completing first, 15395 * to prevent the registration of both eDP and LVDS and the 15396 * incorrect sharing of the PPS. 15397 */ 15398 intel_lvds_init(dev_priv); 15399 intel_crt_init(dev_priv); 15400 15401 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); 15402 15403 if (ilk_has_edp_a(dev_priv)) 15404 intel_dp_init(dev_priv, DP_A, PORT_A); 15405 15406 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 15407 /* PCH SDVOB multiplex with HDMIB */ 15408 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 15409 if (!found) 15410 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 15411 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 15412 intel_dp_init(dev_priv, PCH_DP_B, PORT_B); 15413 } 15414 15415 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 15416 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 15417 15418 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 15419 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 15420 15421 if (I915_READ(PCH_DP_C) & DP_DETECTED) 15422 intel_dp_init(dev_priv, PCH_DP_C, PORT_C); 15423 15424 if (I915_READ(PCH_DP_D) & DP_DETECTED) 15425 intel_dp_init(dev_priv, PCH_DP_D, PORT_D); 15426 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 15427 bool has_edp, has_port; 15428 15429 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support) 15430 intel_crt_init(dev_priv); 15431 15432 /* 15433 * The DP_DETECTED bit is the latched state of the DDC 15434 * SDA pin at boot. However since eDP doesn't require DDC 15435 * (no way to plug in a DP->HDMI dongle) the DDC pins for 15436 * eDP ports may have been muxed to an alternate function. 15437 * Thus we can't rely on the DP_DETECTED bit alone to detect 15438 * eDP ports. Consult the VBT as well as DP_DETECTED to 15439 * detect eDP ports. 15440 * 15441 * Sadly the straps seem to be missing sometimes even for HDMI 15442 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 15443 * and VBT for the presence of the port. Additionally we can't 15444 * trust the port type the VBT declares as we've seen at least 15445 * HDMI ports that the VBT claim are DP or eDP. 15446 */ 15447 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); 15448 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 15449 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) 15450 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B); 15451 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 15452 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 15453 15454 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); 15455 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 15456 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) 15457 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C); 15458 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 15459 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 15460 15461 if (IS_CHERRYVIEW(dev_priv)) { 15462 /* 15463 * eDP not supported on port D, 15464 * so no need to worry about it 15465 */ 15466 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 15467 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) 15468 intel_dp_init(dev_priv, CHV_DP_D, PORT_D); 15469 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) 15470 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 15471 } 15472 15473 vlv_dsi_init(dev_priv); 15474 } else if (IS_PINEVIEW(dev_priv)) { 15475 intel_lvds_init(dev_priv); 15476 intel_crt_init(dev_priv); 15477 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) { 15478 bool found = false; 15479 15480 if (IS_MOBILE(dev_priv)) 15481 intel_lvds_init(dev_priv); 15482 15483 intel_crt_init(dev_priv); 15484 15485 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 15486 DRM_DEBUG_KMS("probing SDVOB\n"); 15487 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 15488 if (!found && IS_G4X(dev_priv)) { 15489 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 15490 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 15491 } 15492 15493 if (!found && IS_G4X(dev_priv)) 15494 intel_dp_init(dev_priv, DP_B, PORT_B); 15495 } 15496 15497 /* Before G4X SDVOC doesn't have its own detect register */ 15498 15499 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 15500 DRM_DEBUG_KMS("probing SDVOC\n"); 15501 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 15502 } 15503 15504 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 15505 15506 if (IS_G4X(dev_priv)) { 15507 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 15508 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 15509 } 15510 if (IS_G4X(dev_priv)) 15511 intel_dp_init(dev_priv, DP_C, PORT_C); 15512 } 15513 15514 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) 15515 intel_dp_init(dev_priv, DP_D, PORT_D); 15516 15517 if (SUPPORTS_TV(dev_priv)) 15518 intel_tv_init(dev_priv); 15519 } else if (IS_GEN(dev_priv, 2)) { 15520 if (IS_I85X(dev_priv)) 15521 intel_lvds_init(dev_priv); 15522 15523 intel_crt_init(dev_priv); 15524 intel_dvo_init(dev_priv); 15525 } 15526 15527 intel_psr_init(dev_priv); 15528 15529 for_each_intel_encoder(&dev_priv->drm, encoder) { 15530 encoder->base.possible_crtcs = encoder->crtc_mask; 15531 encoder->base.possible_clones = 15532 intel_encoder_clones(encoder); 15533 } 15534 15535 intel_init_pch_refclk(dev_priv); 15536 15537 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 15538 } 15539 15540 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 15541 { 15542 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 15543 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 15544 15545 drm_framebuffer_cleanup(fb); 15546 15547 i915_gem_object_lock(obj); 15548 WARN_ON(!obj->framebuffer_references--); 15549 i915_gem_object_unlock(obj); 15550 15551 i915_gem_object_put(obj); 15552 15553 kfree(intel_fb); 15554 } 15555 15556 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 15557 struct drm_file *file, 15558 unsigned int *handle) 15559 { 15560 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 15561 15562 if (obj->userptr.mm) { 15563 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); 15564 return -EINVAL; 15565 } 15566 15567 return drm_gem_handle_create(file, &obj->base, handle); 15568 } 15569 15570 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, 15571 struct drm_file *file, 15572 unsigned flags, unsigned color, 15573 struct drm_clip_rect *clips, 15574 unsigned num_clips) 15575 { 15576 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 15577 15578 i915_gem_object_flush_if_display(obj); 15579 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); 15580 15581 return 0; 15582 } 15583 15584 static const struct drm_framebuffer_funcs intel_fb_funcs = { 15585 .destroy = intel_user_framebuffer_destroy, 15586 .create_handle = intel_user_framebuffer_create_handle, 15587 .dirty = intel_user_framebuffer_dirty, 15588 }; 15589 15590 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, 15591 struct drm_i915_gem_object *obj, 15592 struct drm_mode_fb_cmd2 *mode_cmd) 15593 { 15594 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 15595 struct drm_framebuffer *fb = &intel_fb->base; 15596 u32 max_stride; 15597 unsigned int tiling, stride; 15598 int ret = -EINVAL; 15599 int i; 15600 15601 i915_gem_object_lock(obj); 15602 obj->framebuffer_references++; 15603 tiling = i915_gem_object_get_tiling(obj); 15604 stride = i915_gem_object_get_stride(obj); 15605 i915_gem_object_unlock(obj); 15606 15607 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 15608 /* 15609 * If there's a fence, enforce that 15610 * the fb modifier and tiling mode match. 15611 */ 15612 if (tiling != I915_TILING_NONE && 15613 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 15614 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n"); 15615 goto err; 15616 } 15617 } else { 15618 if (tiling == I915_TILING_X) { 15619 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 15620 } else if (tiling == I915_TILING_Y) { 15621 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n"); 15622 goto err; 15623 } 15624 } 15625 15626 if (!drm_any_plane_has_format(&dev_priv->drm, 15627 mode_cmd->pixel_format, 15628 mode_cmd->modifier[0])) { 15629 struct drm_format_name_buf format_name; 15630 15631 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n", 15632 drm_get_format_name(mode_cmd->pixel_format, 15633 &format_name), 15634 mode_cmd->modifier[0]); 15635 goto err; 15636 } 15637 15638 /* 15639 * gen2/3 display engine uses the fence if present, 15640 * so the tiling mode must match the fb modifier exactly. 15641 */ 15642 if (INTEL_GEN(dev_priv) < 4 && 15643 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 15644 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n"); 15645 goto err; 15646 } 15647 15648 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format, 15649 mode_cmd->modifier[0]); 15650 if (mode_cmd->pitches[0] > max_stride) { 15651 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n", 15652 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? 15653 "tiled" : "linear", 15654 mode_cmd->pitches[0], max_stride); 15655 goto err; 15656 } 15657 15658 /* 15659 * If there's a fence, enforce that 15660 * the fb pitch and fence stride match. 15661 */ 15662 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) { 15663 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n", 15664 mode_cmd->pitches[0], stride); 15665 goto err; 15666 } 15667 15668 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 15669 if (mode_cmd->offsets[0] != 0) 15670 goto err; 15671 15672 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd); 15673 15674 for (i = 0; i < fb->format->num_planes; i++) { 15675 u32 stride_alignment; 15676 15677 if (mode_cmd->handles[i] != mode_cmd->handles[0]) { 15678 DRM_DEBUG_KMS("bad plane %d handle\n", i); 15679 goto err; 15680 } 15681 15682 stride_alignment = intel_fb_stride_alignment(fb, i); 15683 15684 /* 15685 * Display WA #0531: skl,bxt,kbl,glk 15686 * 15687 * Render decompression and plane width > 3840 15688 * combined with horizontal panning requires the 15689 * plane stride to be a multiple of 4. We'll just 15690 * require the entire fb to accommodate that to avoid 15691 * potential runtime errors at plane configuration time. 15692 */ 15693 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 && 15694 is_ccs_modifier(fb->modifier)) 15695 stride_alignment *= 4; 15696 15697 if (fb->pitches[i] & (stride_alignment - 1)) { 15698 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n", 15699 i, fb->pitches[i], stride_alignment); 15700 goto err; 15701 } 15702 15703 fb->obj[i] = &obj->base; 15704 } 15705 15706 ret = intel_fill_fb_info(dev_priv, fb); 15707 if (ret) 15708 goto err; 15709 15710 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs); 15711 if (ret) { 15712 DRM_ERROR("framebuffer init failed %d\n", ret); 15713 goto err; 15714 } 15715 15716 return 0; 15717 15718 err: 15719 i915_gem_object_lock(obj); 15720 obj->framebuffer_references--; 15721 i915_gem_object_unlock(obj); 15722 return ret; 15723 } 15724 15725 static struct drm_framebuffer * 15726 intel_user_framebuffer_create(struct drm_device *dev, 15727 struct drm_file *filp, 15728 const struct drm_mode_fb_cmd2 *user_mode_cmd) 15729 { 15730 struct drm_framebuffer *fb; 15731 struct drm_i915_gem_object *obj; 15732 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 15733 15734 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); 15735 if (!obj) 15736 return ERR_PTR(-ENOENT); 15737 15738 fb = intel_framebuffer_create(obj, &mode_cmd); 15739 if (IS_ERR(fb)) 15740 i915_gem_object_put(obj); 15741 15742 return fb; 15743 } 15744 15745 static void intel_atomic_state_free(struct drm_atomic_state *state) 15746 { 15747 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 15748 15749 drm_atomic_state_default_release(state); 15750 15751 i915_sw_fence_fini(&intel_state->commit_ready); 15752 15753 kfree(state); 15754 } 15755 15756 static enum drm_mode_status 15757 intel_mode_valid(struct drm_device *dev, 15758 const struct drm_display_mode *mode) 15759 { 15760 struct drm_i915_private *dev_priv = to_i915(dev); 15761 int hdisplay_max, htotal_max; 15762 int vdisplay_max, vtotal_max; 15763 15764 /* 15765 * Can't reject DBLSCAN here because Xorg ddxen can add piles 15766 * of DBLSCAN modes to the output's mode list when they detect 15767 * the scaling mode property on the connector. And they don't 15768 * ask the kernel to validate those modes in any way until 15769 * modeset time at which point the client gets a protocol error. 15770 * So in order to not upset those clients we silently ignore the 15771 * DBLSCAN flag on such connectors. For other connectors we will 15772 * reject modes with the DBLSCAN flag in encoder->compute_config(). 15773 * And we always reject DBLSCAN modes in connector->mode_valid() 15774 * as we never want such modes on the connector's mode list. 15775 */ 15776 15777 if (mode->vscan > 1) 15778 return MODE_NO_VSCAN; 15779 15780 if (mode->flags & DRM_MODE_FLAG_HSKEW) 15781 return MODE_H_ILLEGAL; 15782 15783 if (mode->flags & (DRM_MODE_FLAG_CSYNC | 15784 DRM_MODE_FLAG_NCSYNC | 15785 DRM_MODE_FLAG_PCSYNC)) 15786 return MODE_HSYNC; 15787 15788 if (mode->flags & (DRM_MODE_FLAG_BCAST | 15789 DRM_MODE_FLAG_PIXMUX | 15790 DRM_MODE_FLAG_CLKDIV2)) 15791 return MODE_BAD; 15792 15793 if (INTEL_GEN(dev_priv) >= 9 || 15794 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 15795 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 15796 vdisplay_max = 4096; 15797 htotal_max = 8192; 15798 vtotal_max = 8192; 15799 } else if (INTEL_GEN(dev_priv) >= 3) { 15800 hdisplay_max = 4096; 15801 vdisplay_max = 4096; 15802 htotal_max = 8192; 15803 vtotal_max = 8192; 15804 } else { 15805 hdisplay_max = 2048; 15806 vdisplay_max = 2048; 15807 htotal_max = 4096; 15808 vtotal_max = 4096; 15809 } 15810 15811 if (mode->hdisplay > hdisplay_max || 15812 mode->hsync_start > htotal_max || 15813 mode->hsync_end > htotal_max || 15814 mode->htotal > htotal_max) 15815 return MODE_H_ILLEGAL; 15816 15817 if (mode->vdisplay > vdisplay_max || 15818 mode->vsync_start > vtotal_max || 15819 mode->vsync_end > vtotal_max || 15820 mode->vtotal > vtotal_max) 15821 return MODE_V_ILLEGAL; 15822 15823 return MODE_OK; 15824 } 15825 15826 static const struct drm_mode_config_funcs intel_mode_funcs = { 15827 .fb_create = intel_user_framebuffer_create, 15828 .get_format_info = intel_get_format_info, 15829 .output_poll_changed = intel_fbdev_output_poll_changed, 15830 .mode_valid = intel_mode_valid, 15831 .atomic_check = intel_atomic_check, 15832 .atomic_commit = intel_atomic_commit, 15833 .atomic_state_alloc = intel_atomic_state_alloc, 15834 .atomic_state_clear = intel_atomic_state_clear, 15835 .atomic_state_free = intel_atomic_state_free, 15836 }; 15837 15838 /** 15839 * intel_init_display_hooks - initialize the display modesetting hooks 15840 * @dev_priv: device private 15841 */ 15842 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 15843 { 15844 intel_init_cdclk_hooks(dev_priv); 15845 15846 if (INTEL_GEN(dev_priv) >= 9) { 15847 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 15848 dev_priv->display.get_initial_plane_config = 15849 skylake_get_initial_plane_config; 15850 dev_priv->display.crtc_compute_clock = 15851 haswell_crtc_compute_clock; 15852 dev_priv->display.crtc_enable = haswell_crtc_enable; 15853 dev_priv->display.crtc_disable = haswell_crtc_disable; 15854 } else if (HAS_DDI(dev_priv)) { 15855 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 15856 dev_priv->display.get_initial_plane_config = 15857 i9xx_get_initial_plane_config; 15858 dev_priv->display.crtc_compute_clock = 15859 haswell_crtc_compute_clock; 15860 dev_priv->display.crtc_enable = haswell_crtc_enable; 15861 dev_priv->display.crtc_disable = haswell_crtc_disable; 15862 } else if (HAS_PCH_SPLIT(dev_priv)) { 15863 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 15864 dev_priv->display.get_initial_plane_config = 15865 i9xx_get_initial_plane_config; 15866 dev_priv->display.crtc_compute_clock = 15867 ironlake_crtc_compute_clock; 15868 dev_priv->display.crtc_enable = ironlake_crtc_enable; 15869 dev_priv->display.crtc_disable = ironlake_crtc_disable; 15870 } else if (IS_CHERRYVIEW(dev_priv)) { 15871 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15872 dev_priv->display.get_initial_plane_config = 15873 i9xx_get_initial_plane_config; 15874 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock; 15875 dev_priv->display.crtc_enable = valleyview_crtc_enable; 15876 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15877 } else if (IS_VALLEYVIEW(dev_priv)) { 15878 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15879 dev_priv->display.get_initial_plane_config = 15880 i9xx_get_initial_plane_config; 15881 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock; 15882 dev_priv->display.crtc_enable = valleyview_crtc_enable; 15883 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15884 } else if (IS_G4X(dev_priv)) { 15885 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15886 dev_priv->display.get_initial_plane_config = 15887 i9xx_get_initial_plane_config; 15888 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock; 15889 dev_priv->display.crtc_enable = i9xx_crtc_enable; 15890 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15891 } else if (IS_PINEVIEW(dev_priv)) { 15892 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15893 dev_priv->display.get_initial_plane_config = 15894 i9xx_get_initial_plane_config; 15895 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock; 15896 dev_priv->display.crtc_enable = i9xx_crtc_enable; 15897 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15898 } else if (!IS_GEN(dev_priv, 2)) { 15899 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15900 dev_priv->display.get_initial_plane_config = 15901 i9xx_get_initial_plane_config; 15902 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 15903 dev_priv->display.crtc_enable = i9xx_crtc_enable; 15904 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15905 } else { 15906 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15907 dev_priv->display.get_initial_plane_config = 15908 i9xx_get_initial_plane_config; 15909 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock; 15910 dev_priv->display.crtc_enable = i9xx_crtc_enable; 15911 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15912 } 15913 15914 if (IS_GEN(dev_priv, 5)) { 15915 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 15916 } else if (IS_GEN(dev_priv, 6)) { 15917 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 15918 } else if (IS_IVYBRIDGE(dev_priv)) { 15919 /* FIXME: detect B0+ stepping and use auto training */ 15920 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 15921 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 15922 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 15923 } 15924 15925 if (INTEL_GEN(dev_priv) >= 9) 15926 dev_priv->display.update_crtcs = skl_update_crtcs; 15927 else 15928 dev_priv->display.update_crtcs = intel_update_crtcs; 15929 } 15930 15931 static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv) 15932 { 15933 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 15934 return VLV_VGACNTRL; 15935 else if (INTEL_GEN(dev_priv) >= 5) 15936 return CPU_VGACNTRL; 15937 else 15938 return VGACNTRL; 15939 } 15940 15941 /* Disable the VGA plane that we never use */ 15942 static void i915_disable_vga(struct drm_i915_private *dev_priv) 15943 { 15944 struct pci_dev *pdev = dev_priv->drm.pdev; 15945 u8 sr1; 15946 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv); 15947 15948 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ 15949 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); 15950 outb(SR01, VGA_SR_INDEX); 15951 sr1 = inb(VGA_SR_DATA); 15952 outb(sr1 | 1<<5, VGA_SR_DATA); 15953 vga_put(pdev, VGA_RSRC_LEGACY_IO); 15954 udelay(300); 15955 15956 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 15957 POSTING_READ(vga_reg); 15958 } 15959 15960 void intel_modeset_init_hw(struct drm_device *dev) 15961 { 15962 struct drm_i915_private *dev_priv = to_i915(dev); 15963 15964 intel_update_cdclk(dev_priv); 15965 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); 15966 dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw; 15967 } 15968 15969 /* 15970 * Calculate what we think the watermarks should be for the state we've read 15971 * out of the hardware and then immediately program those watermarks so that 15972 * we ensure the hardware settings match our internal state. 15973 * 15974 * We can calculate what we think WM's should be by creating a duplicate of the 15975 * current state (which was constructed during hardware readout) and running it 15976 * through the atomic check code to calculate new watermark values in the 15977 * state object. 15978 */ 15979 static void sanitize_watermarks(struct drm_device *dev) 15980 { 15981 struct drm_i915_private *dev_priv = to_i915(dev); 15982 struct drm_atomic_state *state; 15983 struct intel_atomic_state *intel_state; 15984 struct intel_crtc *crtc; 15985 struct intel_crtc_state *crtc_state; 15986 struct drm_modeset_acquire_ctx ctx; 15987 int ret; 15988 int i; 15989 15990 /* Only supported on platforms that use atomic watermark design */ 15991 if (!dev_priv->display.optimize_watermarks) 15992 return; 15993 15994 /* 15995 * We need to hold connection_mutex before calling duplicate_state so 15996 * that the connector loop is protected. 15997 */ 15998 drm_modeset_acquire_init(&ctx, 0); 15999 retry: 16000 ret = drm_modeset_lock_all_ctx(dev, &ctx); 16001 if (ret == -EDEADLK) { 16002 drm_modeset_backoff(&ctx); 16003 goto retry; 16004 } else if (WARN_ON(ret)) { 16005 goto fail; 16006 } 16007 16008 state = drm_atomic_helper_duplicate_state(dev, &ctx); 16009 if (WARN_ON(IS_ERR(state))) 16010 goto fail; 16011 16012 intel_state = to_intel_atomic_state(state); 16013 16014 /* 16015 * Hardware readout is the only time we don't want to calculate 16016 * intermediate watermarks (since we don't trust the current 16017 * watermarks). 16018 */ 16019 if (!HAS_GMCH(dev_priv)) 16020 intel_state->skip_intermediate_wm = true; 16021 16022 ret = intel_atomic_check(dev, state); 16023 if (ret) { 16024 /* 16025 * If we fail here, it means that the hardware appears to be 16026 * programmed in a way that shouldn't be possible, given our 16027 * understanding of watermark requirements. This might mean a 16028 * mistake in the hardware readout code or a mistake in the 16029 * watermark calculations for a given platform. Raise a WARN 16030 * so that this is noticeable. 16031 * 16032 * If this actually happens, we'll have to just leave the 16033 * BIOS-programmed watermarks untouched and hope for the best. 16034 */ 16035 WARN(true, "Could not determine valid watermarks for inherited state\n"); 16036 goto put_state; 16037 } 16038 16039 /* Write calculated watermark values back */ 16040 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { 16041 crtc_state->wm.need_postvbl_update = true; 16042 dev_priv->display.optimize_watermarks(intel_state, crtc_state); 16043 16044 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; 16045 } 16046 16047 put_state: 16048 drm_atomic_state_put(state); 16049 fail: 16050 drm_modeset_drop_locks(&ctx); 16051 drm_modeset_acquire_fini(&ctx); 16052 } 16053 16054 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv) 16055 { 16056 if (IS_GEN(dev_priv, 5)) { 16057 u32 fdi_pll_clk = 16058 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; 16059 16060 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000; 16061 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) { 16062 dev_priv->fdi_pll_freq = 270000; 16063 } else { 16064 return; 16065 } 16066 16067 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq); 16068 } 16069 16070 static int intel_initial_commit(struct drm_device *dev) 16071 { 16072 struct drm_atomic_state *state = NULL; 16073 struct drm_modeset_acquire_ctx ctx; 16074 struct drm_crtc *crtc; 16075 struct drm_crtc_state *crtc_state; 16076 int ret = 0; 16077 16078 state = drm_atomic_state_alloc(dev); 16079 if (!state) 16080 return -ENOMEM; 16081 16082 drm_modeset_acquire_init(&ctx, 0); 16083 16084 retry: 16085 state->acquire_ctx = &ctx; 16086 16087 drm_for_each_crtc(crtc, dev) { 16088 crtc_state = drm_atomic_get_crtc_state(state, crtc); 16089 if (IS_ERR(crtc_state)) { 16090 ret = PTR_ERR(crtc_state); 16091 goto out; 16092 } 16093 16094 if (crtc_state->active) { 16095 ret = drm_atomic_add_affected_planes(state, crtc); 16096 if (ret) 16097 goto out; 16098 16099 /* 16100 * FIXME hack to force a LUT update to avoid the 16101 * plane update forcing the pipe gamma on without 16102 * having a proper LUT loaded. Remove once we 16103 * have readout for pipe gamma enable. 16104 */ 16105 crtc_state->color_mgmt_changed = true; 16106 } 16107 } 16108 16109 ret = drm_atomic_commit(state); 16110 16111 out: 16112 if (ret == -EDEADLK) { 16113 drm_atomic_state_clear(state); 16114 drm_modeset_backoff(&ctx); 16115 goto retry; 16116 } 16117 16118 drm_atomic_state_put(state); 16119 16120 drm_modeset_drop_locks(&ctx); 16121 drm_modeset_acquire_fini(&ctx); 16122 16123 return ret; 16124 } 16125 16126 int intel_modeset_init(struct drm_device *dev) 16127 { 16128 struct drm_i915_private *dev_priv = to_i915(dev); 16129 struct i915_ggtt *ggtt = &dev_priv->ggtt; 16130 enum pipe pipe; 16131 struct intel_crtc *crtc; 16132 int ret; 16133 16134 dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); 16135 16136 drm_mode_config_init(dev); 16137 16138 ret = intel_bw_init(dev_priv); 16139 if (ret) 16140 return ret; 16141 16142 dev->mode_config.min_width = 0; 16143 dev->mode_config.min_height = 0; 16144 16145 dev->mode_config.preferred_depth = 24; 16146 dev->mode_config.prefer_shadow = 1; 16147 16148 dev->mode_config.allow_fb_modifiers = true; 16149 16150 dev->mode_config.funcs = &intel_mode_funcs; 16151 16152 init_llist_head(&dev_priv->atomic_helper.free_list); 16153 INIT_WORK(&dev_priv->atomic_helper.free_work, 16154 intel_atomic_helper_free_state_worker); 16155 16156 intel_init_quirks(dev_priv); 16157 16158 intel_fbc_init(dev_priv); 16159 16160 intel_init_pm(dev_priv); 16161 16162 /* 16163 * There may be no VBT; and if the BIOS enabled SSC we can 16164 * just keep using it to avoid unnecessary flicker. Whereas if the 16165 * BIOS isn't using it, don't assume it will work even if the VBT 16166 * indicates as much. 16167 */ 16168 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 16169 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & 16170 DREF_SSC1_ENABLE); 16171 16172 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 16173 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n", 16174 bios_lvds_use_ssc ? "en" : "dis", 16175 dev_priv->vbt.lvds_use_ssc ? "en" : "dis"); 16176 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 16177 } 16178 } 16179 16180 /* 16181 * Maximum framebuffer dimensions, chosen to match 16182 * the maximum render engine surface size on gen4+. 16183 */ 16184 if (INTEL_GEN(dev_priv) >= 7) { 16185 dev->mode_config.max_width = 16384; 16186 dev->mode_config.max_height = 16384; 16187 } else if (INTEL_GEN(dev_priv) >= 4) { 16188 dev->mode_config.max_width = 8192; 16189 dev->mode_config.max_height = 8192; 16190 } else if (IS_GEN(dev_priv, 3)) { 16191 dev->mode_config.max_width = 4096; 16192 dev->mode_config.max_height = 4096; 16193 } else { 16194 dev->mode_config.max_width = 2048; 16195 dev->mode_config.max_height = 2048; 16196 } 16197 16198 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 16199 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512; 16200 dev->mode_config.cursor_height = 1023; 16201 } else if (IS_GEN(dev_priv, 2)) { 16202 dev->mode_config.cursor_width = 64; 16203 dev->mode_config.cursor_height = 64; 16204 } else { 16205 dev->mode_config.cursor_width = 256; 16206 dev->mode_config.cursor_height = 256; 16207 } 16208 16209 dev->mode_config.fb_base = ggtt->gmadr.start; 16210 16211 DRM_DEBUG_KMS("%d display pipe%s available.\n", 16212 INTEL_INFO(dev_priv)->num_pipes, 16213 INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : ""); 16214 16215 for_each_pipe(dev_priv, pipe) { 16216 ret = intel_crtc_init(dev_priv, pipe); 16217 if (ret) { 16218 drm_mode_config_cleanup(dev); 16219 return ret; 16220 } 16221 } 16222 16223 intel_shared_dpll_init(dev); 16224 intel_update_fdi_pll_freq(dev_priv); 16225 16226 intel_update_czclk(dev_priv); 16227 intel_modeset_init_hw(dev); 16228 16229 intel_hdcp_component_init(dev_priv); 16230 16231 if (dev_priv->max_cdclk_freq == 0) 16232 intel_update_max_cdclk(dev_priv); 16233 16234 /* Just disable it once at startup */ 16235 i915_disable_vga(dev_priv); 16236 intel_setup_outputs(dev_priv); 16237 16238 drm_modeset_lock_all(dev); 16239 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); 16240 drm_modeset_unlock_all(dev); 16241 16242 for_each_intel_crtc(dev, crtc) { 16243 struct intel_initial_plane_config plane_config = {}; 16244 16245 if (!crtc->active) 16246 continue; 16247 16248 /* 16249 * Note that reserving the BIOS fb up front prevents us 16250 * from stuffing other stolen allocations like the ring 16251 * on top. This prevents some ugliness at boot time, and 16252 * can even allow for smooth boot transitions if the BIOS 16253 * fb is large enough for the active pipe configuration. 16254 */ 16255 dev_priv->display.get_initial_plane_config(crtc, 16256 &plane_config); 16257 16258 /* 16259 * If the fb is shared between multiple heads, we'll 16260 * just get the first one. 16261 */ 16262 intel_find_initial_plane_obj(crtc, &plane_config); 16263 } 16264 16265 /* 16266 * Make sure hardware watermarks really match the state we read out. 16267 * Note that we need to do this after reconstructing the BIOS fb's 16268 * since the watermark calculation done here will use pstate->fb. 16269 */ 16270 if (!HAS_GMCH(dev_priv)) 16271 sanitize_watermarks(dev); 16272 16273 /* 16274 * Force all active planes to recompute their states. So that on 16275 * mode_setcrtc after probe, all the intel_plane_state variables 16276 * are already calculated and there is no assert_plane warnings 16277 * during bootup. 16278 */ 16279 ret = intel_initial_commit(dev); 16280 if (ret) 16281 DRM_DEBUG_KMS("Initial commit in probe failed.\n"); 16282 16283 return 0; 16284 } 16285 16286 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 16287 { 16288 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 16289 /* 640x480@60Hz, ~25175 kHz */ 16290 struct dpll clock = { 16291 .m1 = 18, 16292 .m2 = 7, 16293 .p1 = 13, 16294 .p2 = 4, 16295 .n = 2, 16296 }; 16297 u32 dpll, fp; 16298 int i; 16299 16300 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154); 16301 16302 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 16303 pipe_name(pipe), clock.vco, clock.dot); 16304 16305 fp = i9xx_dpll_compute_fp(&clock); 16306 dpll = DPLL_DVO_2X_MODE | 16307 DPLL_VGA_MODE_DIS | 16308 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 16309 PLL_P2_DIVIDE_BY_4 | 16310 PLL_REF_INPUT_DREFCLK | 16311 DPLL_VCO_ENABLE; 16312 16313 I915_WRITE(FP0(pipe), fp); 16314 I915_WRITE(FP1(pipe), fp); 16315 16316 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); 16317 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); 16318 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); 16319 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16)); 16320 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16)); 16321 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); 16322 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); 16323 16324 /* 16325 * Apparently we need to have VGA mode enabled prior to changing 16326 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 16327 * dividers, even though the register value does change. 16328 */ 16329 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); 16330 I915_WRITE(DPLL(pipe), dpll); 16331 16332 /* Wait for the clocks to stabilize. */ 16333 POSTING_READ(DPLL(pipe)); 16334 udelay(150); 16335 16336 /* The pixel multiplier can only be updated once the 16337 * DPLL is enabled and the clocks are stable. 16338 * 16339 * So write it again. 16340 */ 16341 I915_WRITE(DPLL(pipe), dpll); 16342 16343 /* We do this three times for luck */ 16344 for (i = 0; i < 3 ; i++) { 16345 I915_WRITE(DPLL(pipe), dpll); 16346 POSTING_READ(DPLL(pipe)); 16347 udelay(150); /* wait for warmup */ 16348 } 16349 16350 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE); 16351 POSTING_READ(PIPECONF(pipe)); 16352 16353 intel_wait_for_pipe_scanline_moving(crtc); 16354 } 16355 16356 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 16357 { 16358 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 16359 16360 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n", 16361 pipe_name(pipe)); 16362 16363 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE); 16364 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE); 16365 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE); 16366 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE); 16367 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE); 16368 16369 I915_WRITE(PIPECONF(pipe), 0); 16370 POSTING_READ(PIPECONF(pipe)); 16371 16372 intel_wait_for_pipe_scanline_stopped(crtc); 16373 16374 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 16375 POSTING_READ(DPLL(pipe)); 16376 } 16377 16378 static void 16379 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) 16380 { 16381 struct intel_crtc *crtc; 16382 16383 if (INTEL_GEN(dev_priv) >= 4) 16384 return; 16385 16386 for_each_intel_crtc(&dev_priv->drm, crtc) { 16387 struct intel_plane *plane = 16388 to_intel_plane(crtc->base.primary); 16389 struct intel_crtc *plane_crtc; 16390 enum pipe pipe; 16391 16392 if (!plane->get_hw_state(plane, &pipe)) 16393 continue; 16394 16395 if (pipe == crtc->pipe) 16396 continue; 16397 16398 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", 16399 plane->base.base.id, plane->base.name); 16400 16401 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 16402 intel_plane_disable_noatomic(plane_crtc, plane); 16403 } 16404 } 16405 16406 static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 16407 { 16408 struct drm_device *dev = crtc->base.dev; 16409 struct intel_encoder *encoder; 16410 16411 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 16412 return true; 16413 16414 return false; 16415 } 16416 16417 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder) 16418 { 16419 struct drm_device *dev = encoder->base.dev; 16420 struct intel_connector *connector; 16421 16422 for_each_connector_on_encoder(dev, &encoder->base, connector) 16423 return connector; 16424 16425 return NULL; 16426 } 16427 16428 static bool has_pch_trancoder(struct drm_i915_private *dev_priv, 16429 enum pipe pch_transcoder) 16430 { 16431 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 16432 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A); 16433 } 16434 16435 static void intel_sanitize_crtc(struct intel_crtc *crtc, 16436 struct drm_modeset_acquire_ctx *ctx) 16437 { 16438 struct drm_device *dev = crtc->base.dev; 16439 struct drm_i915_private *dev_priv = to_i915(dev); 16440 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); 16441 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 16442 16443 /* Clear any frame start delays used for debugging left by the BIOS */ 16444 if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) { 16445 i915_reg_t reg = PIPECONF(cpu_transcoder); 16446 16447 I915_WRITE(reg, 16448 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 16449 } 16450 16451 if (crtc_state->base.active) { 16452 struct intel_plane *plane; 16453 16454 /* Disable everything but the primary plane */ 16455 for_each_intel_plane_on_crtc(dev, crtc, plane) { 16456 const struct intel_plane_state *plane_state = 16457 to_intel_plane_state(plane->base.state); 16458 16459 if (plane_state->base.visible && 16460 plane->base.type != DRM_PLANE_TYPE_PRIMARY) 16461 intel_plane_disable_noatomic(crtc, plane); 16462 } 16463 16464 /* 16465 * Disable any background color set by the BIOS, but enable the 16466 * gamma and CSC to match how we program our planes. 16467 */ 16468 if (INTEL_GEN(dev_priv) >= 9) 16469 I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe), 16470 SKL_BOTTOM_COLOR_GAMMA_ENABLE | 16471 SKL_BOTTOM_COLOR_CSC_ENABLE); 16472 } 16473 16474 /* Adjust the state of the output pipe according to whether we 16475 * have active connectors/encoders. */ 16476 if (crtc_state->base.active && !intel_crtc_has_encoders(crtc)) 16477 intel_crtc_disable_noatomic(&crtc->base, ctx); 16478 16479 if (crtc_state->base.active || HAS_GMCH(dev_priv)) { 16480 /* 16481 * We start out with underrun reporting disabled to avoid races. 16482 * For correct bookkeeping mark this on active crtcs. 16483 * 16484 * Also on gmch platforms we dont have any hardware bits to 16485 * disable the underrun reporting. Which means we need to start 16486 * out with underrun reporting disabled also on inactive pipes, 16487 * since otherwise we'll complain about the garbage we read when 16488 * e.g. coming up after runtime pm. 16489 * 16490 * No protection against concurrent access is required - at 16491 * worst a fifo underrun happens which also sets this to false. 16492 */ 16493 crtc->cpu_fifo_underrun_disabled = true; 16494 /* 16495 * We track the PCH trancoder underrun reporting state 16496 * within the crtc. With crtc for pipe A housing the underrun 16497 * reporting state for PCH transcoder A, crtc for pipe B housing 16498 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, 16499 * and marking underrun reporting as disabled for the non-existing 16500 * PCH transcoders B and C would prevent enabling the south 16501 * error interrupt (see cpt_can_enable_serr_int()). 16502 */ 16503 if (has_pch_trancoder(dev_priv, crtc->pipe)) 16504 crtc->pch_fifo_underrun_disabled = true; 16505 } 16506 } 16507 16508 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) 16509 { 16510 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 16511 16512 /* 16513 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram 16514 * the hardware when a high res displays plugged in. DPLL P 16515 * divider is zero, and the pipe timings are bonkers. We'll 16516 * try to disable everything in that case. 16517 * 16518 * FIXME would be nice to be able to sanitize this state 16519 * without several WARNs, but for now let's take the easy 16520 * road. 16521 */ 16522 return IS_GEN(dev_priv, 6) && 16523 crtc_state->base.active && 16524 crtc_state->shared_dpll && 16525 crtc_state->port_clock == 0; 16526 } 16527 16528 static void intel_sanitize_encoder(struct intel_encoder *encoder) 16529 { 16530 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 16531 struct intel_connector *connector; 16532 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 16533 struct intel_crtc_state *crtc_state = crtc ? 16534 to_intel_crtc_state(crtc->base.state) : NULL; 16535 16536 /* We need to check both for a crtc link (meaning that the 16537 * encoder is active and trying to read from a pipe) and the 16538 * pipe itself being active. */ 16539 bool has_active_crtc = crtc_state && 16540 crtc_state->base.active; 16541 16542 if (crtc_state && has_bogus_dpll_config(crtc_state)) { 16543 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n", 16544 pipe_name(crtc->pipe)); 16545 has_active_crtc = false; 16546 } 16547 16548 connector = intel_encoder_find_connector(encoder); 16549 if (connector && !has_active_crtc) { 16550 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 16551 encoder->base.base.id, 16552 encoder->base.name); 16553 16554 /* Connector is active, but has no active pipe. This is 16555 * fallout from our resume register restoring. Disable 16556 * the encoder manually again. */ 16557 if (crtc_state) { 16558 struct drm_encoder *best_encoder; 16559 16560 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 16561 encoder->base.base.id, 16562 encoder->base.name); 16563 16564 /* avoid oopsing in case the hooks consult best_encoder */ 16565 best_encoder = connector->base.state->best_encoder; 16566 connector->base.state->best_encoder = &encoder->base; 16567 16568 if (encoder->disable) 16569 encoder->disable(encoder, crtc_state, 16570 connector->base.state); 16571 if (encoder->post_disable) 16572 encoder->post_disable(encoder, crtc_state, 16573 connector->base.state); 16574 16575 connector->base.state->best_encoder = best_encoder; 16576 } 16577 encoder->base.crtc = NULL; 16578 16579 /* Inconsistent output/port/pipe state happens presumably due to 16580 * a bug in one of the get_hw_state functions. Or someplace else 16581 * in our code, like the register restore mess on resume. Clamp 16582 * things to off as a safer default. */ 16583 16584 connector->base.dpms = DRM_MODE_DPMS_OFF; 16585 connector->base.encoder = NULL; 16586 } 16587 16588 /* notify opregion of the sanitized encoder state */ 16589 intel_opregion_notify_encoder(encoder, connector && has_active_crtc); 16590 16591 if (INTEL_GEN(dev_priv) >= 11) 16592 icl_sanitize_encoder_pll_mapping(encoder); 16593 } 16594 16595 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv) 16596 { 16597 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv); 16598 16599 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 16600 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 16601 i915_disable_vga(dev_priv); 16602 } 16603 } 16604 16605 void i915_redisable_vga(struct drm_i915_private *dev_priv) 16606 { 16607 intel_wakeref_t wakeref; 16608 16609 /* 16610 * This function can be called both from intel_modeset_setup_hw_state or 16611 * at a very early point in our resume sequence, where the power well 16612 * structures are not yet restored. Since this function is at a very 16613 * paranoid "someone might have enabled VGA while we were not looking" 16614 * level, just check if the power well is enabled instead of trying to 16615 * follow the "don't touch the power well if we don't need it" policy 16616 * the rest of the driver uses. 16617 */ 16618 wakeref = intel_display_power_get_if_enabled(dev_priv, 16619 POWER_DOMAIN_VGA); 16620 if (!wakeref) 16621 return; 16622 16623 i915_redisable_vga_power_on(dev_priv); 16624 16625 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref); 16626 } 16627 16628 /* FIXME read out full plane state for all planes */ 16629 static void readout_plane_state(struct drm_i915_private *dev_priv) 16630 { 16631 struct intel_plane *plane; 16632 struct intel_crtc *crtc; 16633 16634 for_each_intel_plane(&dev_priv->drm, plane) { 16635 struct intel_plane_state *plane_state = 16636 to_intel_plane_state(plane->base.state); 16637 struct intel_crtc_state *crtc_state; 16638 enum pipe pipe = PIPE_A; 16639 bool visible; 16640 16641 visible = plane->get_hw_state(plane, &pipe); 16642 16643 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 16644 crtc_state = to_intel_crtc_state(crtc->base.state); 16645 16646 intel_set_plane_visible(crtc_state, plane_state, visible); 16647 16648 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n", 16649 plane->base.base.id, plane->base.name, 16650 enableddisabled(visible), pipe_name(pipe)); 16651 } 16652 16653 for_each_intel_crtc(&dev_priv->drm, crtc) { 16654 struct intel_crtc_state *crtc_state = 16655 to_intel_crtc_state(crtc->base.state); 16656 16657 fixup_active_planes(crtc_state); 16658 } 16659 } 16660 16661 static void intel_modeset_readout_hw_state(struct drm_device *dev) 16662 { 16663 struct drm_i915_private *dev_priv = to_i915(dev); 16664 enum pipe pipe; 16665 struct intel_crtc *crtc; 16666 struct intel_encoder *encoder; 16667 struct intel_connector *connector; 16668 struct drm_connector_list_iter conn_iter; 16669 int i; 16670 16671 dev_priv->active_crtcs = 0; 16672 16673 for_each_intel_crtc(dev, crtc) { 16674 struct intel_crtc_state *crtc_state = 16675 to_intel_crtc_state(crtc->base.state); 16676 16677 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base); 16678 memset(crtc_state, 0, sizeof(*crtc_state)); 16679 __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base); 16680 16681 crtc_state->base.active = crtc_state->base.enable = 16682 dev_priv->display.get_pipe_config(crtc, crtc_state); 16683 16684 crtc->base.enabled = crtc_state->base.enable; 16685 crtc->active = crtc_state->base.active; 16686 16687 if (crtc_state->base.active) 16688 dev_priv->active_crtcs |= 1 << crtc->pipe; 16689 16690 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", 16691 crtc->base.base.id, crtc->base.name, 16692 enableddisabled(crtc_state->base.active)); 16693 } 16694 16695 readout_plane_state(dev_priv); 16696 16697 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 16698 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 16699 16700 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll, 16701 &pll->state.hw_state); 16702 16703 if (IS_ELKHARTLAKE(dev_priv) && pll->on && 16704 pll->info->id == DPLL_ID_EHL_DPLL4) { 16705 pll->wakeref = intel_display_power_get(dev_priv, 16706 POWER_DOMAIN_DPLL_DC_OFF); 16707 } 16708 16709 pll->state.crtc_mask = 0; 16710 for_each_intel_crtc(dev, crtc) { 16711 struct intel_crtc_state *crtc_state = 16712 to_intel_crtc_state(crtc->base.state); 16713 16714 if (crtc_state->base.active && 16715 crtc_state->shared_dpll == pll) 16716 pll->state.crtc_mask |= 1 << crtc->pipe; 16717 } 16718 pll->active_mask = pll->state.crtc_mask; 16719 16720 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", 16721 pll->info->name, pll->state.crtc_mask, pll->on); 16722 } 16723 16724 for_each_intel_encoder(dev, encoder) { 16725 pipe = 0; 16726 16727 if (encoder->get_hw_state(encoder, &pipe)) { 16728 struct intel_crtc_state *crtc_state; 16729 16730 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 16731 crtc_state = to_intel_crtc_state(crtc->base.state); 16732 16733 encoder->base.crtc = &crtc->base; 16734 encoder->get_config(encoder, crtc_state); 16735 } else { 16736 encoder->base.crtc = NULL; 16737 } 16738 16739 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 16740 encoder->base.base.id, encoder->base.name, 16741 enableddisabled(encoder->base.crtc), 16742 pipe_name(pipe)); 16743 } 16744 16745 drm_connector_list_iter_begin(dev, &conn_iter); 16746 for_each_intel_connector_iter(connector, &conn_iter) { 16747 if (connector->get_hw_state(connector)) { 16748 connector->base.dpms = DRM_MODE_DPMS_ON; 16749 16750 encoder = connector->encoder; 16751 connector->base.encoder = &encoder->base; 16752 16753 if (encoder->base.crtc && 16754 encoder->base.crtc->state->active) { 16755 /* 16756 * This has to be done during hardware readout 16757 * because anything calling .crtc_disable may 16758 * rely on the connector_mask being accurate. 16759 */ 16760 encoder->base.crtc->state->connector_mask |= 16761 drm_connector_mask(&connector->base); 16762 encoder->base.crtc->state->encoder_mask |= 16763 drm_encoder_mask(&encoder->base); 16764 } 16765 16766 } else { 16767 connector->base.dpms = DRM_MODE_DPMS_OFF; 16768 connector->base.encoder = NULL; 16769 } 16770 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 16771 connector->base.base.id, connector->base.name, 16772 enableddisabled(connector->base.encoder)); 16773 } 16774 drm_connector_list_iter_end(&conn_iter); 16775 16776 for_each_intel_crtc(dev, crtc) { 16777 struct intel_bw_state *bw_state = 16778 to_intel_bw_state(dev_priv->bw_obj.state); 16779 struct intel_crtc_state *crtc_state = 16780 to_intel_crtc_state(crtc->base.state); 16781 struct intel_plane *plane; 16782 int min_cdclk = 0; 16783 16784 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); 16785 if (crtc_state->base.active) { 16786 intel_mode_from_pipe_config(&crtc->base.mode, crtc_state); 16787 crtc->base.mode.hdisplay = crtc_state->pipe_src_w; 16788 crtc->base.mode.vdisplay = crtc_state->pipe_src_h; 16789 intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state); 16790 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); 16791 16792 /* 16793 * The initial mode needs to be set in order to keep 16794 * the atomic core happy. It wants a valid mode if the 16795 * crtc's enabled, so we do the above call. 16796 * 16797 * But we don't set all the derived state fully, hence 16798 * set a flag to indicate that a full recalculation is 16799 * needed on the next commit. 16800 */ 16801 crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED; 16802 16803 intel_crtc_compute_pixel_rate(crtc_state); 16804 16805 if (dev_priv->display.modeset_calc_cdclk) { 16806 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); 16807 if (WARN_ON(min_cdclk < 0)) 16808 min_cdclk = 0; 16809 } 16810 16811 drm_calc_timestamping_constants(&crtc->base, 16812 &crtc_state->base.adjusted_mode); 16813 update_scanline_offset(crtc_state); 16814 } 16815 16816 dev_priv->min_cdclk[crtc->pipe] = min_cdclk; 16817 dev_priv->min_voltage_level[crtc->pipe] = 16818 crtc_state->min_voltage_level; 16819 16820 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 16821 const struct intel_plane_state *plane_state = 16822 to_intel_plane_state(plane->base.state); 16823 16824 /* 16825 * FIXME don't have the fb yet, so can't 16826 * use intel_plane_data_rate() :( 16827 */ 16828 if (plane_state->base.visible) 16829 crtc_state->data_rate[plane->id] = 16830 4 * crtc_state->pixel_rate; 16831 } 16832 16833 intel_bw_crtc_update(bw_state, crtc_state); 16834 16835 intel_pipe_config_sanity_check(dev_priv, crtc_state); 16836 } 16837 } 16838 16839 static void 16840 get_encoder_power_domains(struct drm_i915_private *dev_priv) 16841 { 16842 struct intel_encoder *encoder; 16843 16844 for_each_intel_encoder(&dev_priv->drm, encoder) { 16845 struct intel_crtc_state *crtc_state; 16846 16847 if (!encoder->get_power_domains) 16848 continue; 16849 16850 /* 16851 * MST-primary and inactive encoders don't have a crtc state 16852 * and neither of these require any power domain references. 16853 */ 16854 if (!encoder->base.crtc) 16855 continue; 16856 16857 crtc_state = to_intel_crtc_state(encoder->base.crtc->state); 16858 encoder->get_power_domains(encoder, crtc_state); 16859 } 16860 } 16861 16862 static void intel_early_display_was(struct drm_i915_private *dev_priv) 16863 { 16864 /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */ 16865 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) 16866 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | 16867 DARBF_GATING_DIS); 16868 16869 if (IS_HASWELL(dev_priv)) { 16870 /* 16871 * WaRsPkgCStateDisplayPMReq:hsw 16872 * System hang if this isn't done before disabling all planes! 16873 */ 16874 I915_WRITE(CHICKEN_PAR1_1, 16875 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); 16876 } 16877 } 16878 16879 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv, 16880 enum port port, i915_reg_t hdmi_reg) 16881 { 16882 u32 val = I915_READ(hdmi_reg); 16883 16884 if (val & SDVO_ENABLE || 16885 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A)) 16886 return; 16887 16888 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n", 16889 port_name(port)); 16890 16891 val &= ~SDVO_PIPE_SEL_MASK; 16892 val |= SDVO_PIPE_SEL(PIPE_A); 16893 16894 I915_WRITE(hdmi_reg, val); 16895 } 16896 16897 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv, 16898 enum port port, i915_reg_t dp_reg) 16899 { 16900 u32 val = I915_READ(dp_reg); 16901 16902 if (val & DP_PORT_EN || 16903 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A)) 16904 return; 16905 16906 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n", 16907 port_name(port)); 16908 16909 val &= ~DP_PIPE_SEL_MASK; 16910 val |= DP_PIPE_SEL(PIPE_A); 16911 16912 I915_WRITE(dp_reg, val); 16913 } 16914 16915 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv) 16916 { 16917 /* 16918 * The BIOS may select transcoder B on some of the PCH 16919 * ports even it doesn't enable the port. This would trip 16920 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled(). 16921 * Sanitize the transcoder select bits to prevent that. We 16922 * assume that the BIOS never actually enabled the port, 16923 * because if it did we'd actually have to toggle the port 16924 * on and back off to make the transcoder A select stick 16925 * (see. intel_dp_link_down(), intel_disable_hdmi(), 16926 * intel_disable_sdvo()). 16927 */ 16928 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B); 16929 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C); 16930 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D); 16931 16932 /* PCH SDVOB multiplex with HDMIB */ 16933 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB); 16934 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC); 16935 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID); 16936 } 16937 16938 /* Scan out the current hw modeset state, 16939 * and sanitizes it to the current state 16940 */ 16941 static void 16942 intel_modeset_setup_hw_state(struct drm_device *dev, 16943 struct drm_modeset_acquire_ctx *ctx) 16944 { 16945 struct drm_i915_private *dev_priv = to_i915(dev); 16946 struct intel_crtc_state *crtc_state; 16947 struct intel_encoder *encoder; 16948 struct intel_crtc *crtc; 16949 intel_wakeref_t wakeref; 16950 int i; 16951 16952 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 16953 16954 intel_early_display_was(dev_priv); 16955 intel_modeset_readout_hw_state(dev); 16956 16957 /* HW state is read out, now we need to sanitize this mess. */ 16958 16959 /* Sanitize the TypeC port mode upfront, encoders depend on this */ 16960 for_each_intel_encoder(dev, encoder) { 16961 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 16962 16963 /* We need to sanitize only the MST primary port. */ 16964 if (encoder->type != INTEL_OUTPUT_DP_MST && 16965 intel_phy_is_tc(dev_priv, phy)) 16966 intel_tc_port_sanitize(enc_to_dig_port(&encoder->base)); 16967 } 16968 16969 get_encoder_power_domains(dev_priv); 16970 16971 if (HAS_PCH_IBX(dev_priv)) 16972 ibx_sanitize_pch_ports(dev_priv); 16973 16974 /* 16975 * intel_sanitize_plane_mapping() may need to do vblank 16976 * waits, so we need vblank interrupts restored beforehand. 16977 */ 16978 for_each_intel_crtc(&dev_priv->drm, crtc) { 16979 crtc_state = to_intel_crtc_state(crtc->base.state); 16980 16981 drm_crtc_vblank_reset(&crtc->base); 16982 16983 if (crtc_state->base.active) 16984 intel_crtc_vblank_on(crtc_state); 16985 } 16986 16987 intel_sanitize_plane_mapping(dev_priv); 16988 16989 for_each_intel_encoder(dev, encoder) 16990 intel_sanitize_encoder(encoder); 16991 16992 for_each_intel_crtc(&dev_priv->drm, crtc) { 16993 crtc_state = to_intel_crtc_state(crtc->base.state); 16994 intel_sanitize_crtc(crtc, ctx); 16995 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]"); 16996 } 16997 16998 intel_modeset_update_connector_atomic_state(dev); 16999 17000 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 17001 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 17002 17003 if (!pll->on || pll->active_mask) 17004 continue; 17005 17006 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", 17007 pll->info->name); 17008 17009 pll->info->funcs->disable(dev_priv, pll); 17010 pll->on = false; 17011 } 17012 17013 if (IS_G4X(dev_priv)) { 17014 g4x_wm_get_hw_state(dev_priv); 17015 g4x_wm_sanitize(dev_priv); 17016 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 17017 vlv_wm_get_hw_state(dev_priv); 17018 vlv_wm_sanitize(dev_priv); 17019 } else if (INTEL_GEN(dev_priv) >= 9) { 17020 skl_wm_get_hw_state(dev_priv); 17021 } else if (HAS_PCH_SPLIT(dev_priv)) { 17022 ilk_wm_get_hw_state(dev_priv); 17023 } 17024 17025 for_each_intel_crtc(dev, crtc) { 17026 u64 put_domains; 17027 17028 crtc_state = to_intel_crtc_state(crtc->base.state); 17029 put_domains = modeset_get_crtc_power_domains(crtc_state); 17030 if (WARN_ON(put_domains)) 17031 modeset_put_power_domains(dev_priv, put_domains); 17032 } 17033 17034 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); 17035 17036 intel_fbc_init_pipe_state(dev_priv); 17037 } 17038 17039 void intel_display_resume(struct drm_device *dev) 17040 { 17041 struct drm_i915_private *dev_priv = to_i915(dev); 17042 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 17043 struct drm_modeset_acquire_ctx ctx; 17044 int ret; 17045 17046 dev_priv->modeset_restore_state = NULL; 17047 if (state) 17048 state->acquire_ctx = &ctx; 17049 17050 drm_modeset_acquire_init(&ctx, 0); 17051 17052 while (1) { 17053 ret = drm_modeset_lock_all_ctx(dev, &ctx); 17054 if (ret != -EDEADLK) 17055 break; 17056 17057 drm_modeset_backoff(&ctx); 17058 } 17059 17060 if (!ret) 17061 ret = __intel_display_resume(dev, state, &ctx); 17062 17063 intel_enable_ipc(dev_priv); 17064 drm_modeset_drop_locks(&ctx); 17065 drm_modeset_acquire_fini(&ctx); 17066 17067 if (ret) 17068 DRM_ERROR("Restoring old state failed with %i\n", ret); 17069 if (state) 17070 drm_atomic_state_put(state); 17071 } 17072 17073 static void intel_hpd_poll_fini(struct drm_device *dev) 17074 { 17075 struct intel_connector *connector; 17076 struct drm_connector_list_iter conn_iter; 17077 17078 /* Kill all the work that may have been queued by hpd. */ 17079 drm_connector_list_iter_begin(dev, &conn_iter); 17080 for_each_intel_connector_iter(connector, &conn_iter) { 17081 if (connector->modeset_retry_work.func) 17082 cancel_work_sync(&connector->modeset_retry_work); 17083 if (connector->hdcp.shim) { 17084 cancel_delayed_work_sync(&connector->hdcp.check_work); 17085 cancel_work_sync(&connector->hdcp.prop_work); 17086 } 17087 } 17088 drm_connector_list_iter_end(&conn_iter); 17089 } 17090 17091 void intel_modeset_driver_remove(struct drm_device *dev) 17092 { 17093 struct drm_i915_private *dev_priv = to_i915(dev); 17094 17095 flush_workqueue(dev_priv->modeset_wq); 17096 17097 flush_work(&dev_priv->atomic_helper.free_work); 17098 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list)); 17099 17100 /* 17101 * Interrupts and polling as the first thing to avoid creating havoc. 17102 * Too much stuff here (turning of connectors, ...) would 17103 * experience fancy races otherwise. 17104 */ 17105 intel_irq_uninstall(dev_priv); 17106 17107 /* 17108 * Due to the hpd irq storm handling the hotplug work can re-arm the 17109 * poll handlers. Hence disable polling after hpd handling is shut down. 17110 */ 17111 intel_hpd_poll_fini(dev); 17112 17113 /* poll work can call into fbdev, hence clean that up afterwards */ 17114 intel_fbdev_fini(dev_priv); 17115 17116 intel_unregister_dsm_handler(); 17117 17118 intel_fbc_global_disable(dev_priv); 17119 17120 /* flush any delayed tasks or pending work */ 17121 flush_scheduled_work(); 17122 17123 intel_hdcp_component_fini(dev_priv); 17124 17125 drm_mode_config_cleanup(dev); 17126 17127 intel_overlay_cleanup(dev_priv); 17128 17129 intel_gmbus_teardown(dev_priv); 17130 17131 destroy_workqueue(dev_priv->modeset_wq); 17132 17133 intel_fbc_cleanup_cfb(dev_priv); 17134 } 17135 17136 /* 17137 * set vga decode state - true == enable VGA decode 17138 */ 17139 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state) 17140 { 17141 unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 17142 u16 gmch_ctrl; 17143 17144 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { 17145 DRM_ERROR("failed to read control word\n"); 17146 return -EIO; 17147 } 17148 17149 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) 17150 return 0; 17151 17152 if (state) 17153 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 17154 else 17155 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 17156 17157 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { 17158 DRM_ERROR("failed to write control word\n"); 17159 return -EIO; 17160 } 17161 17162 return 0; 17163 } 17164 17165 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 17166 17167 struct intel_display_error_state { 17168 17169 u32 power_well_driver; 17170 17171 struct intel_cursor_error_state { 17172 u32 control; 17173 u32 position; 17174 u32 base; 17175 u32 size; 17176 } cursor[I915_MAX_PIPES]; 17177 17178 struct intel_pipe_error_state { 17179 bool power_domain_on; 17180 u32 source; 17181 u32 stat; 17182 } pipe[I915_MAX_PIPES]; 17183 17184 struct intel_plane_error_state { 17185 u32 control; 17186 u32 stride; 17187 u32 size; 17188 u32 pos; 17189 u32 addr; 17190 u32 surface; 17191 u32 tile_offset; 17192 } plane[I915_MAX_PIPES]; 17193 17194 struct intel_transcoder_error_state { 17195 bool available; 17196 bool power_domain_on; 17197 enum transcoder cpu_transcoder; 17198 17199 u32 conf; 17200 17201 u32 htotal; 17202 u32 hblank; 17203 u32 hsync; 17204 u32 vtotal; 17205 u32 vblank; 17206 u32 vsync; 17207 } transcoder[5]; 17208 }; 17209 17210 struct intel_display_error_state * 17211 intel_display_capture_error_state(struct drm_i915_private *dev_priv) 17212 { 17213 struct intel_display_error_state *error; 17214 int transcoders[] = { 17215 TRANSCODER_A, 17216 TRANSCODER_B, 17217 TRANSCODER_C, 17218 TRANSCODER_D, 17219 TRANSCODER_EDP, 17220 }; 17221 int i; 17222 17223 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder)); 17224 17225 if (!HAS_DISPLAY(dev_priv)) 17226 return NULL; 17227 17228 error = kzalloc(sizeof(*error), GFP_ATOMIC); 17229 if (error == NULL) 17230 return NULL; 17231 17232 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 17233 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2); 17234 17235 for_each_pipe(dev_priv, i) { 17236 error->pipe[i].power_domain_on = 17237 __intel_display_power_is_enabled(dev_priv, 17238 POWER_DOMAIN_PIPE(i)); 17239 if (!error->pipe[i].power_domain_on) 17240 continue; 17241 17242 error->cursor[i].control = I915_READ(CURCNTR(i)); 17243 error->cursor[i].position = I915_READ(CURPOS(i)); 17244 error->cursor[i].base = I915_READ(CURBASE(i)); 17245 17246 error->plane[i].control = I915_READ(DSPCNTR(i)); 17247 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 17248 if (INTEL_GEN(dev_priv) <= 3) { 17249 error->plane[i].size = I915_READ(DSPSIZE(i)); 17250 error->plane[i].pos = I915_READ(DSPPOS(i)); 17251 } 17252 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 17253 error->plane[i].addr = I915_READ(DSPADDR(i)); 17254 if (INTEL_GEN(dev_priv) >= 4) { 17255 error->plane[i].surface = I915_READ(DSPSURF(i)); 17256 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 17257 } 17258 17259 error->pipe[i].source = I915_READ(PIPESRC(i)); 17260 17261 if (HAS_GMCH(dev_priv)) 17262 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 17263 } 17264 17265 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { 17266 enum transcoder cpu_transcoder = transcoders[i]; 17267 17268 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder]) 17269 continue; 17270 17271 error->transcoder[i].available = true; 17272 error->transcoder[i].power_domain_on = 17273 __intel_display_power_is_enabled(dev_priv, 17274 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 17275 if (!error->transcoder[i].power_domain_on) 17276 continue; 17277 17278 error->transcoder[i].cpu_transcoder = cpu_transcoder; 17279 17280 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 17281 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 17282 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 17283 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 17284 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 17285 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 17286 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 17287 } 17288 17289 return error; 17290 } 17291 17292 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 17293 17294 void 17295 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 17296 struct intel_display_error_state *error) 17297 { 17298 struct drm_i915_private *dev_priv = m->i915; 17299 int i; 17300 17301 if (!error) 17302 return; 17303 17304 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes); 17305 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 17306 err_printf(m, "PWR_WELL_CTL2: %08x\n", 17307 error->power_well_driver); 17308 for_each_pipe(dev_priv, i) { 17309 err_printf(m, "Pipe [%d]:\n", i); 17310 err_printf(m, " Power: %s\n", 17311 onoff(error->pipe[i].power_domain_on)); 17312 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 17313 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 17314 17315 err_printf(m, "Plane [%d]:\n", i); 17316 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 17317 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 17318 if (INTEL_GEN(dev_priv) <= 3) { 17319 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 17320 err_printf(m, " POS: %08x\n", error->plane[i].pos); 17321 } 17322 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 17323 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 17324 if (INTEL_GEN(dev_priv) >= 4) { 17325 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 17326 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 17327 } 17328 17329 err_printf(m, "Cursor [%d]:\n", i); 17330 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 17331 err_printf(m, " POS: %08x\n", error->cursor[i].position); 17332 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 17333 } 17334 17335 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { 17336 if (!error->transcoder[i].available) 17337 continue; 17338 17339 err_printf(m, "CPU transcoder: %s\n", 17340 transcoder_name(error->transcoder[i].cpu_transcoder)); 17341 err_printf(m, " Power: %s\n", 17342 onoff(error->transcoder[i].power_domain_on)); 17343 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 17344 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 17345 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 17346 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 17347 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 17348 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 17349 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 17350 } 17351 } 17352 17353 #endif 17354