1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/i2c.h> 28 #include <linux/input.h> 29 #include <linux/intel-iommu.h> 30 #include <linux/kernel.h> 31 #include <linux/module.h> 32 #include <linux/dma-resv.h> 33 #include <linux/slab.h> 34 #include <linux/vgaarb.h> 35 36 #include <drm/drm_atomic.h> 37 #include <drm/drm_atomic_helper.h> 38 #include <drm/drm_atomic_uapi.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_edid.h> 41 #include <drm/drm_fourcc.h> 42 #include <drm/drm_plane_helper.h> 43 #include <drm/drm_probe_helper.h> 44 #include <drm/drm_rect.h> 45 #include <drm/i915_drm.h> 46 47 #include "display/intel_crt.h" 48 #include "display/intel_ddi.h" 49 #include "display/intel_dp.h" 50 #include "display/intel_dsi.h" 51 #include "display/intel_dvo.h" 52 #include "display/intel_gmbus.h" 53 #include "display/intel_hdmi.h" 54 #include "display/intel_lvds.h" 55 #include "display/intel_sdvo.h" 56 #include "display/intel_tv.h" 57 #include "display/intel_vdsc.h" 58 59 #include "i915_drv.h" 60 #include "i915_trace.h" 61 #include "intel_acpi.h" 62 #include "intel_atomic.h" 63 #include "intel_atomic_plane.h" 64 #include "intel_bw.h" 65 #include "intel_cdclk.h" 66 #include "intel_color.h" 67 #include "intel_display_types.h" 68 #include "intel_fbc.h" 69 #include "intel_fbdev.h" 70 #include "intel_fifo_underrun.h" 71 #include "intel_frontbuffer.h" 72 #include "intel_hdcp.h" 73 #include "intel_hotplug.h" 74 #include "intel_overlay.h" 75 #include "intel_pipe_crc.h" 76 #include "intel_pm.h" 77 #include "intel_psr.h" 78 #include "intel_quirks.h" 79 #include "intel_sideband.h" 80 #include "intel_sprite.h" 81 #include "intel_tc.h" 82 83 /* Primary plane formats for gen <= 3 */ 84 static const u32 i8xx_primary_formats[] = { 85 DRM_FORMAT_C8, 86 DRM_FORMAT_RGB565, 87 DRM_FORMAT_XRGB1555, 88 DRM_FORMAT_XRGB8888, 89 }; 90 91 /* Primary plane formats for gen >= 4 */ 92 static const u32 i965_primary_formats[] = { 93 DRM_FORMAT_C8, 94 DRM_FORMAT_RGB565, 95 DRM_FORMAT_XRGB8888, 96 DRM_FORMAT_XBGR8888, 97 DRM_FORMAT_XRGB2101010, 98 DRM_FORMAT_XBGR2101010, 99 }; 100 101 static const u64 i9xx_format_modifiers[] = { 102 I915_FORMAT_MOD_X_TILED, 103 DRM_FORMAT_MOD_LINEAR, 104 DRM_FORMAT_MOD_INVALID 105 }; 106 107 /* Cursor formats */ 108 static const u32 intel_cursor_formats[] = { 109 DRM_FORMAT_ARGB8888, 110 }; 111 112 static const u64 cursor_format_modifiers[] = { 113 DRM_FORMAT_MOD_LINEAR, 114 DRM_FORMAT_MOD_INVALID 115 }; 116 117 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 118 struct intel_crtc_state *pipe_config); 119 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 120 struct intel_crtc_state *pipe_config); 121 122 static int intel_framebuffer_init(struct intel_framebuffer *ifb, 123 struct drm_i915_gem_object *obj, 124 struct drm_mode_fb_cmd2 *mode_cmd); 125 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state); 126 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 127 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 128 const struct intel_link_m_n *m_n, 129 const struct intel_link_m_n *m2_n2); 130 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); 131 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state); 132 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state); 133 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); 134 static void vlv_prepare_pll(struct intel_crtc *crtc, 135 const struct intel_crtc_state *pipe_config); 136 static void chv_prepare_pll(struct intel_crtc *crtc, 137 const struct intel_crtc_state *pipe_config); 138 static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *); 139 static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *); 140 static void intel_crtc_init_scalers(struct intel_crtc *crtc, 141 struct intel_crtc_state *crtc_state); 142 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state); 143 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state); 144 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state); 145 static void intel_modeset_setup_hw_state(struct drm_device *dev, 146 struct drm_modeset_acquire_ctx *ctx); 147 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 148 149 struct intel_limit { 150 struct { 151 int min, max; 152 } dot, vco, n, m, m1, m2, p, p1; 153 154 struct { 155 int dot_limit; 156 int p2_slow, p2_fast; 157 } p2; 158 }; 159 160 /* returns HPLL frequency in kHz */ 161 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 162 { 163 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 164 165 /* Obtain SKU information */ 166 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 167 CCK_FUSE_HPLL_FREQ_MASK; 168 169 return vco_freq[hpll_freq] * 1000; 170 } 171 172 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 173 const char *name, u32 reg, int ref_freq) 174 { 175 u32 val; 176 int divider; 177 178 val = vlv_cck_read(dev_priv, reg); 179 divider = val & CCK_FREQUENCY_VALUES; 180 181 WARN((val & CCK_FREQUENCY_STATUS) != 182 (divider << CCK_FREQUENCY_STATUS_SHIFT), 183 "%s change in progress\n", name); 184 185 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 186 } 187 188 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 189 const char *name, u32 reg) 190 { 191 int hpll; 192 193 vlv_cck_get(dev_priv); 194 195 if (dev_priv->hpll_freq == 0) 196 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 197 198 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); 199 200 vlv_cck_put(dev_priv); 201 202 return hpll; 203 } 204 205 static void intel_update_czclk(struct drm_i915_private *dev_priv) 206 { 207 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 208 return; 209 210 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 211 CCK_CZ_CLOCK_CONTROL); 212 213 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq); 214 } 215 216 static inline u32 /* units of 100MHz */ 217 intel_fdi_link_freq(struct drm_i915_private *dev_priv, 218 const struct intel_crtc_state *pipe_config) 219 { 220 if (HAS_DDI(dev_priv)) 221 return pipe_config->port_clock; /* SPLL */ 222 else 223 return dev_priv->fdi_pll_freq; 224 } 225 226 static const struct intel_limit intel_limits_i8xx_dac = { 227 .dot = { .min = 25000, .max = 350000 }, 228 .vco = { .min = 908000, .max = 1512000 }, 229 .n = { .min = 2, .max = 16 }, 230 .m = { .min = 96, .max = 140 }, 231 .m1 = { .min = 18, .max = 26 }, 232 .m2 = { .min = 6, .max = 16 }, 233 .p = { .min = 4, .max = 128 }, 234 .p1 = { .min = 2, .max = 33 }, 235 .p2 = { .dot_limit = 165000, 236 .p2_slow = 4, .p2_fast = 2 }, 237 }; 238 239 static const struct intel_limit intel_limits_i8xx_dvo = { 240 .dot = { .min = 25000, .max = 350000 }, 241 .vco = { .min = 908000, .max = 1512000 }, 242 .n = { .min = 2, .max = 16 }, 243 .m = { .min = 96, .max = 140 }, 244 .m1 = { .min = 18, .max = 26 }, 245 .m2 = { .min = 6, .max = 16 }, 246 .p = { .min = 4, .max = 128 }, 247 .p1 = { .min = 2, .max = 33 }, 248 .p2 = { .dot_limit = 165000, 249 .p2_slow = 4, .p2_fast = 4 }, 250 }; 251 252 static const struct intel_limit intel_limits_i8xx_lvds = { 253 .dot = { .min = 25000, .max = 350000 }, 254 .vco = { .min = 908000, .max = 1512000 }, 255 .n = { .min = 2, .max = 16 }, 256 .m = { .min = 96, .max = 140 }, 257 .m1 = { .min = 18, .max = 26 }, 258 .m2 = { .min = 6, .max = 16 }, 259 .p = { .min = 4, .max = 128 }, 260 .p1 = { .min = 1, .max = 6 }, 261 .p2 = { .dot_limit = 165000, 262 .p2_slow = 14, .p2_fast = 7 }, 263 }; 264 265 static const struct intel_limit intel_limits_i9xx_sdvo = { 266 .dot = { .min = 20000, .max = 400000 }, 267 .vco = { .min = 1400000, .max = 2800000 }, 268 .n = { .min = 1, .max = 6 }, 269 .m = { .min = 70, .max = 120 }, 270 .m1 = { .min = 8, .max = 18 }, 271 .m2 = { .min = 3, .max = 7 }, 272 .p = { .min = 5, .max = 80 }, 273 .p1 = { .min = 1, .max = 8 }, 274 .p2 = { .dot_limit = 200000, 275 .p2_slow = 10, .p2_fast = 5 }, 276 }; 277 278 static const struct intel_limit intel_limits_i9xx_lvds = { 279 .dot = { .min = 20000, .max = 400000 }, 280 .vco = { .min = 1400000, .max = 2800000 }, 281 .n = { .min = 1, .max = 6 }, 282 .m = { .min = 70, .max = 120 }, 283 .m1 = { .min = 8, .max = 18 }, 284 .m2 = { .min = 3, .max = 7 }, 285 .p = { .min = 7, .max = 98 }, 286 .p1 = { .min = 1, .max = 8 }, 287 .p2 = { .dot_limit = 112000, 288 .p2_slow = 14, .p2_fast = 7 }, 289 }; 290 291 292 static const struct intel_limit intel_limits_g4x_sdvo = { 293 .dot = { .min = 25000, .max = 270000 }, 294 .vco = { .min = 1750000, .max = 3500000}, 295 .n = { .min = 1, .max = 4 }, 296 .m = { .min = 104, .max = 138 }, 297 .m1 = { .min = 17, .max = 23 }, 298 .m2 = { .min = 5, .max = 11 }, 299 .p = { .min = 10, .max = 30 }, 300 .p1 = { .min = 1, .max = 3}, 301 .p2 = { .dot_limit = 270000, 302 .p2_slow = 10, 303 .p2_fast = 10 304 }, 305 }; 306 307 static const struct intel_limit intel_limits_g4x_hdmi = { 308 .dot = { .min = 22000, .max = 400000 }, 309 .vco = { .min = 1750000, .max = 3500000}, 310 .n = { .min = 1, .max = 4 }, 311 .m = { .min = 104, .max = 138 }, 312 .m1 = { .min = 16, .max = 23 }, 313 .m2 = { .min = 5, .max = 11 }, 314 .p = { .min = 5, .max = 80 }, 315 .p1 = { .min = 1, .max = 8}, 316 .p2 = { .dot_limit = 165000, 317 .p2_slow = 10, .p2_fast = 5 }, 318 }; 319 320 static const struct intel_limit intel_limits_g4x_single_channel_lvds = { 321 .dot = { .min = 20000, .max = 115000 }, 322 .vco = { .min = 1750000, .max = 3500000 }, 323 .n = { .min = 1, .max = 3 }, 324 .m = { .min = 104, .max = 138 }, 325 .m1 = { .min = 17, .max = 23 }, 326 .m2 = { .min = 5, .max = 11 }, 327 .p = { .min = 28, .max = 112 }, 328 .p1 = { .min = 2, .max = 8 }, 329 .p2 = { .dot_limit = 0, 330 .p2_slow = 14, .p2_fast = 14 331 }, 332 }; 333 334 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { 335 .dot = { .min = 80000, .max = 224000 }, 336 .vco = { .min = 1750000, .max = 3500000 }, 337 .n = { .min = 1, .max = 3 }, 338 .m = { .min = 104, .max = 138 }, 339 .m1 = { .min = 17, .max = 23 }, 340 .m2 = { .min = 5, .max = 11 }, 341 .p = { .min = 14, .max = 42 }, 342 .p1 = { .min = 2, .max = 6 }, 343 .p2 = { .dot_limit = 0, 344 .p2_slow = 7, .p2_fast = 7 345 }, 346 }; 347 348 static const struct intel_limit intel_limits_pineview_sdvo = { 349 .dot = { .min = 20000, .max = 400000}, 350 .vco = { .min = 1700000, .max = 3500000 }, 351 /* Pineview's Ncounter is a ring counter */ 352 .n = { .min = 3, .max = 6 }, 353 .m = { .min = 2, .max = 256 }, 354 /* Pineview only has one combined m divider, which we treat as m2. */ 355 .m1 = { .min = 0, .max = 0 }, 356 .m2 = { .min = 0, .max = 254 }, 357 .p = { .min = 5, .max = 80 }, 358 .p1 = { .min = 1, .max = 8 }, 359 .p2 = { .dot_limit = 200000, 360 .p2_slow = 10, .p2_fast = 5 }, 361 }; 362 363 static const struct intel_limit intel_limits_pineview_lvds = { 364 .dot = { .min = 20000, .max = 400000 }, 365 .vco = { .min = 1700000, .max = 3500000 }, 366 .n = { .min = 3, .max = 6 }, 367 .m = { .min = 2, .max = 256 }, 368 .m1 = { .min = 0, .max = 0 }, 369 .m2 = { .min = 0, .max = 254 }, 370 .p = { .min = 7, .max = 112 }, 371 .p1 = { .min = 1, .max = 8 }, 372 .p2 = { .dot_limit = 112000, 373 .p2_slow = 14, .p2_fast = 14 }, 374 }; 375 376 /* Ironlake / Sandybridge 377 * 378 * We calculate clock using (register_value + 2) for N/M1/M2, so here 379 * the range value for them is (actual_value - 2). 380 */ 381 static const struct intel_limit intel_limits_ironlake_dac = { 382 .dot = { .min = 25000, .max = 350000 }, 383 .vco = { .min = 1760000, .max = 3510000 }, 384 .n = { .min = 1, .max = 5 }, 385 .m = { .min = 79, .max = 127 }, 386 .m1 = { .min = 12, .max = 22 }, 387 .m2 = { .min = 5, .max = 9 }, 388 .p = { .min = 5, .max = 80 }, 389 .p1 = { .min = 1, .max = 8 }, 390 .p2 = { .dot_limit = 225000, 391 .p2_slow = 10, .p2_fast = 5 }, 392 }; 393 394 static const struct intel_limit intel_limits_ironlake_single_lvds = { 395 .dot = { .min = 25000, .max = 350000 }, 396 .vco = { .min = 1760000, .max = 3510000 }, 397 .n = { .min = 1, .max = 3 }, 398 .m = { .min = 79, .max = 118 }, 399 .m1 = { .min = 12, .max = 22 }, 400 .m2 = { .min = 5, .max = 9 }, 401 .p = { .min = 28, .max = 112 }, 402 .p1 = { .min = 2, .max = 8 }, 403 .p2 = { .dot_limit = 225000, 404 .p2_slow = 14, .p2_fast = 14 }, 405 }; 406 407 static const struct intel_limit intel_limits_ironlake_dual_lvds = { 408 .dot = { .min = 25000, .max = 350000 }, 409 .vco = { .min = 1760000, .max = 3510000 }, 410 .n = { .min = 1, .max = 3 }, 411 .m = { .min = 79, .max = 127 }, 412 .m1 = { .min = 12, .max = 22 }, 413 .m2 = { .min = 5, .max = 9 }, 414 .p = { .min = 14, .max = 56 }, 415 .p1 = { .min = 2, .max = 8 }, 416 .p2 = { .dot_limit = 225000, 417 .p2_slow = 7, .p2_fast = 7 }, 418 }; 419 420 /* LVDS 100mhz refclk limits. */ 421 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = { 422 .dot = { .min = 25000, .max = 350000 }, 423 .vco = { .min = 1760000, .max = 3510000 }, 424 .n = { .min = 1, .max = 2 }, 425 .m = { .min = 79, .max = 126 }, 426 .m1 = { .min = 12, .max = 22 }, 427 .m2 = { .min = 5, .max = 9 }, 428 .p = { .min = 28, .max = 112 }, 429 .p1 = { .min = 2, .max = 8 }, 430 .p2 = { .dot_limit = 225000, 431 .p2_slow = 14, .p2_fast = 14 }, 432 }; 433 434 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = { 435 .dot = { .min = 25000, .max = 350000 }, 436 .vco = { .min = 1760000, .max = 3510000 }, 437 .n = { .min = 1, .max = 3 }, 438 .m = { .min = 79, .max = 126 }, 439 .m1 = { .min = 12, .max = 22 }, 440 .m2 = { .min = 5, .max = 9 }, 441 .p = { .min = 14, .max = 42 }, 442 .p1 = { .min = 2, .max = 6 }, 443 .p2 = { .dot_limit = 225000, 444 .p2_slow = 7, .p2_fast = 7 }, 445 }; 446 447 static const struct intel_limit intel_limits_vlv = { 448 /* 449 * These are the data rate limits (measured in fast clocks) 450 * since those are the strictest limits we have. The fast 451 * clock and actual rate limits are more relaxed, so checking 452 * them would make no difference. 453 */ 454 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 455 .vco = { .min = 4000000, .max = 6000000 }, 456 .n = { .min = 1, .max = 7 }, 457 .m1 = { .min = 2, .max = 3 }, 458 .m2 = { .min = 11, .max = 156 }, 459 .p1 = { .min = 2, .max = 3 }, 460 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 461 }; 462 463 static const struct intel_limit intel_limits_chv = { 464 /* 465 * These are the data rate limits (measured in fast clocks) 466 * since those are the strictest limits we have. The fast 467 * clock and actual rate limits are more relaxed, so checking 468 * them would make no difference. 469 */ 470 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 471 .vco = { .min = 4800000, .max = 6480000 }, 472 .n = { .min = 1, .max = 1 }, 473 .m1 = { .min = 2, .max = 2 }, 474 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 475 .p1 = { .min = 2, .max = 4 }, 476 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 477 }; 478 479 static const struct intel_limit intel_limits_bxt = { 480 /* FIXME: find real dot limits */ 481 .dot = { .min = 0, .max = INT_MAX }, 482 .vco = { .min = 4800000, .max = 6700000 }, 483 .n = { .min = 1, .max = 1 }, 484 .m1 = { .min = 2, .max = 2 }, 485 /* FIXME: find real m2 limits */ 486 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 487 .p1 = { .min = 2, .max = 4 }, 488 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 489 }; 490 491 /* WA Display #0827: Gen9:all */ 492 static void 493 skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable) 494 { 495 if (enable) 496 I915_WRITE(CLKGATE_DIS_PSL(pipe), 497 I915_READ(CLKGATE_DIS_PSL(pipe)) | 498 DUPS1_GATING_DIS | DUPS2_GATING_DIS); 499 else 500 I915_WRITE(CLKGATE_DIS_PSL(pipe), 501 I915_READ(CLKGATE_DIS_PSL(pipe)) & 502 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS)); 503 } 504 505 /* Wa_2006604312:icl */ 506 static void 507 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 508 bool enable) 509 { 510 if (enable) 511 I915_WRITE(CLKGATE_DIS_PSL(pipe), 512 I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS); 513 else 514 I915_WRITE(CLKGATE_DIS_PSL(pipe), 515 I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS); 516 } 517 518 static bool 519 needs_modeset(const struct intel_crtc_state *state) 520 { 521 return drm_atomic_crtc_needs_modeset(&state->base); 522 } 523 524 /* 525 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 526 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 527 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 528 * The helpers' return value is the rate of the clock that is fed to the 529 * display engine's pipe which can be the above fast dot clock rate or a 530 * divided-down version of it. 531 */ 532 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 533 static int pnv_calc_dpll_params(int refclk, struct dpll *clock) 534 { 535 clock->m = clock->m2 + 2; 536 clock->p = clock->p1 * clock->p2; 537 if (WARN_ON(clock->n == 0 || clock->p == 0)) 538 return 0; 539 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 540 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 541 542 return clock->dot; 543 } 544 545 static u32 i9xx_dpll_compute_m(struct dpll *dpll) 546 { 547 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 548 } 549 550 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock) 551 { 552 clock->m = i9xx_dpll_compute_m(clock); 553 clock->p = clock->p1 * clock->p2; 554 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 555 return 0; 556 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 557 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 558 559 return clock->dot; 560 } 561 562 static int vlv_calc_dpll_params(int refclk, struct dpll *clock) 563 { 564 clock->m = clock->m1 * clock->m2; 565 clock->p = clock->p1 * clock->p2; 566 if (WARN_ON(clock->n == 0 || clock->p == 0)) 567 return 0; 568 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 569 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 570 571 return clock->dot / 5; 572 } 573 574 int chv_calc_dpll_params(int refclk, struct dpll *clock) 575 { 576 clock->m = clock->m1 * clock->m2; 577 clock->p = clock->p1 * clock->p2; 578 if (WARN_ON(clock->n == 0 || clock->p == 0)) 579 return 0; 580 clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), 581 clock->n << 22); 582 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 583 584 return clock->dot / 5; 585 } 586 587 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 588 589 /* 590 * Returns whether the given set of divisors are valid for a given refclk with 591 * the given connectors. 592 */ 593 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv, 594 const struct intel_limit *limit, 595 const struct dpll *clock) 596 { 597 if (clock->n < limit->n.min || limit->n.max < clock->n) 598 INTELPllInvalid("n out of range\n"); 599 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 600 INTELPllInvalid("p1 out of range\n"); 601 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 602 INTELPllInvalid("m2 out of range\n"); 603 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 604 INTELPllInvalid("m1 out of range\n"); 605 606 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) && 607 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv)) 608 if (clock->m1 <= clock->m2) 609 INTELPllInvalid("m1 <= m2\n"); 610 611 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 612 !IS_GEN9_LP(dev_priv)) { 613 if (clock->p < limit->p.min || limit->p.max < clock->p) 614 INTELPllInvalid("p out of range\n"); 615 if (clock->m < limit->m.min || limit->m.max < clock->m) 616 INTELPllInvalid("m out of range\n"); 617 } 618 619 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 620 INTELPllInvalid("vco out of range\n"); 621 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 622 * connector, etc., rather than just a single range. 623 */ 624 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 625 INTELPllInvalid("dot out of range\n"); 626 627 return true; 628 } 629 630 static int 631 i9xx_select_p2_div(const struct intel_limit *limit, 632 const struct intel_crtc_state *crtc_state, 633 int target) 634 { 635 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 636 637 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 638 /* 639 * For LVDS just rely on its current settings for dual-channel. 640 * We haven't figured out how to reliably set up different 641 * single/dual channel state, if we even can. 642 */ 643 if (intel_is_dual_link_lvds(dev_priv)) 644 return limit->p2.p2_fast; 645 else 646 return limit->p2.p2_slow; 647 } else { 648 if (target < limit->p2.dot_limit) 649 return limit->p2.p2_slow; 650 else 651 return limit->p2.p2_fast; 652 } 653 } 654 655 /* 656 * Returns a set of divisors for the desired target clock with the given 657 * refclk, or FALSE. The returned values represent the clock equation: 658 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 659 * 660 * Target and reference clocks are specified in kHz. 661 * 662 * If match_clock is provided, then best_clock P divider must match the P 663 * divider from @match_clock used for LVDS downclocking. 664 */ 665 static bool 666 i9xx_find_best_dpll(const struct intel_limit *limit, 667 struct intel_crtc_state *crtc_state, 668 int target, int refclk, struct dpll *match_clock, 669 struct dpll *best_clock) 670 { 671 struct drm_device *dev = crtc_state->base.crtc->dev; 672 struct dpll clock; 673 int err = target; 674 675 memset(best_clock, 0, sizeof(*best_clock)); 676 677 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 678 679 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 680 clock.m1++) { 681 for (clock.m2 = limit->m2.min; 682 clock.m2 <= limit->m2.max; clock.m2++) { 683 if (clock.m2 >= clock.m1) 684 break; 685 for (clock.n = limit->n.min; 686 clock.n <= limit->n.max; clock.n++) { 687 for (clock.p1 = limit->p1.min; 688 clock.p1 <= limit->p1.max; clock.p1++) { 689 int this_err; 690 691 i9xx_calc_dpll_params(refclk, &clock); 692 if (!intel_PLL_is_valid(to_i915(dev), 693 limit, 694 &clock)) 695 continue; 696 if (match_clock && 697 clock.p != match_clock->p) 698 continue; 699 700 this_err = abs(clock.dot - target); 701 if (this_err < err) { 702 *best_clock = clock; 703 err = this_err; 704 } 705 } 706 } 707 } 708 } 709 710 return (err != target); 711 } 712 713 /* 714 * Returns a set of divisors for the desired target clock with the given 715 * refclk, or FALSE. The returned values represent the clock equation: 716 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 717 * 718 * Target and reference clocks are specified in kHz. 719 * 720 * If match_clock is provided, then best_clock P divider must match the P 721 * divider from @match_clock used for LVDS downclocking. 722 */ 723 static bool 724 pnv_find_best_dpll(const struct intel_limit *limit, 725 struct intel_crtc_state *crtc_state, 726 int target, int refclk, struct dpll *match_clock, 727 struct dpll *best_clock) 728 { 729 struct drm_device *dev = crtc_state->base.crtc->dev; 730 struct dpll clock; 731 int err = target; 732 733 memset(best_clock, 0, sizeof(*best_clock)); 734 735 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 736 737 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 738 clock.m1++) { 739 for (clock.m2 = limit->m2.min; 740 clock.m2 <= limit->m2.max; clock.m2++) { 741 for (clock.n = limit->n.min; 742 clock.n <= limit->n.max; clock.n++) { 743 for (clock.p1 = limit->p1.min; 744 clock.p1 <= limit->p1.max; clock.p1++) { 745 int this_err; 746 747 pnv_calc_dpll_params(refclk, &clock); 748 if (!intel_PLL_is_valid(to_i915(dev), 749 limit, 750 &clock)) 751 continue; 752 if (match_clock && 753 clock.p != match_clock->p) 754 continue; 755 756 this_err = abs(clock.dot - target); 757 if (this_err < err) { 758 *best_clock = clock; 759 err = this_err; 760 } 761 } 762 } 763 } 764 } 765 766 return (err != target); 767 } 768 769 /* 770 * Returns a set of divisors for the desired target clock with the given 771 * refclk, or FALSE. The returned values represent the clock equation: 772 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 773 * 774 * Target and reference clocks are specified in kHz. 775 * 776 * If match_clock is provided, then best_clock P divider must match the P 777 * divider from @match_clock used for LVDS downclocking. 778 */ 779 static bool 780 g4x_find_best_dpll(const struct intel_limit *limit, 781 struct intel_crtc_state *crtc_state, 782 int target, int refclk, struct dpll *match_clock, 783 struct dpll *best_clock) 784 { 785 struct drm_device *dev = crtc_state->base.crtc->dev; 786 struct dpll clock; 787 int max_n; 788 bool found = false; 789 /* approximately equals target * 0.00585 */ 790 int err_most = (target >> 8) + (target >> 9); 791 792 memset(best_clock, 0, sizeof(*best_clock)); 793 794 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 795 796 max_n = limit->n.max; 797 /* based on hardware requirement, prefer smaller n to precision */ 798 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 799 /* based on hardware requirement, prefere larger m1,m2 */ 800 for (clock.m1 = limit->m1.max; 801 clock.m1 >= limit->m1.min; clock.m1--) { 802 for (clock.m2 = limit->m2.max; 803 clock.m2 >= limit->m2.min; clock.m2--) { 804 for (clock.p1 = limit->p1.max; 805 clock.p1 >= limit->p1.min; clock.p1--) { 806 int this_err; 807 808 i9xx_calc_dpll_params(refclk, &clock); 809 if (!intel_PLL_is_valid(to_i915(dev), 810 limit, 811 &clock)) 812 continue; 813 814 this_err = abs(clock.dot - target); 815 if (this_err < err_most) { 816 *best_clock = clock; 817 err_most = this_err; 818 max_n = clock.n; 819 found = true; 820 } 821 } 822 } 823 } 824 } 825 return found; 826 } 827 828 /* 829 * Check if the calculated PLL configuration is more optimal compared to the 830 * best configuration and error found so far. Return the calculated error. 831 */ 832 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 833 const struct dpll *calculated_clock, 834 const struct dpll *best_clock, 835 unsigned int best_error_ppm, 836 unsigned int *error_ppm) 837 { 838 /* 839 * For CHV ignore the error and consider only the P value. 840 * Prefer a bigger P value based on HW requirements. 841 */ 842 if (IS_CHERRYVIEW(to_i915(dev))) { 843 *error_ppm = 0; 844 845 return calculated_clock->p > best_clock->p; 846 } 847 848 if (WARN_ON_ONCE(!target_freq)) 849 return false; 850 851 *error_ppm = div_u64(1000000ULL * 852 abs(target_freq - calculated_clock->dot), 853 target_freq); 854 /* 855 * Prefer a better P value over a better (smaller) error if the error 856 * is small. Ensure this preference for future configurations too by 857 * setting the error to 0. 858 */ 859 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 860 *error_ppm = 0; 861 862 return true; 863 } 864 865 return *error_ppm + 10 < best_error_ppm; 866 } 867 868 /* 869 * Returns a set of divisors for the desired target clock with the given 870 * refclk, or FALSE. The returned values represent the clock equation: 871 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 872 */ 873 static bool 874 vlv_find_best_dpll(const struct intel_limit *limit, 875 struct intel_crtc_state *crtc_state, 876 int target, int refclk, struct dpll *match_clock, 877 struct dpll *best_clock) 878 { 879 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 880 struct drm_device *dev = crtc->base.dev; 881 struct dpll clock; 882 unsigned int bestppm = 1000000; 883 /* min update 19.2 MHz */ 884 int max_n = min(limit->n.max, refclk / 19200); 885 bool found = false; 886 887 target *= 5; /* fast clock */ 888 889 memset(best_clock, 0, sizeof(*best_clock)); 890 891 /* based on hardware requirement, prefer smaller n to precision */ 892 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 893 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 894 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 895 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 896 clock.p = clock.p1 * clock.p2; 897 /* based on hardware requirement, prefer bigger m1,m2 values */ 898 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 899 unsigned int ppm; 900 901 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 902 refclk * clock.m1); 903 904 vlv_calc_dpll_params(refclk, &clock); 905 906 if (!intel_PLL_is_valid(to_i915(dev), 907 limit, 908 &clock)) 909 continue; 910 911 if (!vlv_PLL_is_optimal(dev, target, 912 &clock, 913 best_clock, 914 bestppm, &ppm)) 915 continue; 916 917 *best_clock = clock; 918 bestppm = ppm; 919 found = true; 920 } 921 } 922 } 923 } 924 925 return found; 926 } 927 928 /* 929 * Returns a set of divisors for the desired target clock with the given 930 * refclk, or FALSE. The returned values represent the clock equation: 931 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 932 */ 933 static bool 934 chv_find_best_dpll(const struct intel_limit *limit, 935 struct intel_crtc_state *crtc_state, 936 int target, int refclk, struct dpll *match_clock, 937 struct dpll *best_clock) 938 { 939 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 940 struct drm_device *dev = crtc->base.dev; 941 unsigned int best_error_ppm; 942 struct dpll clock; 943 u64 m2; 944 int found = false; 945 946 memset(best_clock, 0, sizeof(*best_clock)); 947 best_error_ppm = 1000000; 948 949 /* 950 * Based on hardware doc, the n always set to 1, and m1 always 951 * set to 2. If requires to support 200Mhz refclk, we need to 952 * revisit this because n may not 1 anymore. 953 */ 954 clock.n = 1, clock.m1 = 2; 955 target *= 5; /* fast clock */ 956 957 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 958 for (clock.p2 = limit->p2.p2_fast; 959 clock.p2 >= limit->p2.p2_slow; 960 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 961 unsigned int error_ppm; 962 963 clock.p = clock.p1 * clock.p2; 964 965 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22, 966 refclk * clock.m1); 967 968 if (m2 > INT_MAX/clock.m1) 969 continue; 970 971 clock.m2 = m2; 972 973 chv_calc_dpll_params(refclk, &clock); 974 975 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock)) 976 continue; 977 978 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 979 best_error_ppm, &error_ppm)) 980 continue; 981 982 *best_clock = clock; 983 best_error_ppm = error_ppm; 984 found = true; 985 } 986 } 987 988 return found; 989 } 990 991 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, 992 struct dpll *best_clock) 993 { 994 int refclk = 100000; 995 const struct intel_limit *limit = &intel_limits_bxt; 996 997 return chv_find_best_dpll(limit, crtc_state, 998 crtc_state->port_clock, refclk, 999 NULL, best_clock); 1000 } 1001 1002 bool intel_crtc_active(struct intel_crtc *crtc) 1003 { 1004 /* Be paranoid as we can arrive here with only partial 1005 * state retrieved from the hardware during setup. 1006 * 1007 * We can ditch the adjusted_mode.crtc_clock check as soon 1008 * as Haswell has gained clock readout/fastboot support. 1009 * 1010 * We can ditch the crtc->primary->state->fb check as soon as we can 1011 * properly reconstruct framebuffers. 1012 * 1013 * FIXME: The intel_crtc->active here should be switched to 1014 * crtc->state->active once we have proper CRTC states wired up 1015 * for atomic. 1016 */ 1017 return crtc->active && crtc->base.primary->state->fb && 1018 crtc->config->base.adjusted_mode.crtc_clock; 1019 } 1020 1021 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 1022 enum pipe pipe) 1023 { 1024 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1025 1026 return crtc->config->cpu_transcoder; 1027 } 1028 1029 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, 1030 enum pipe pipe) 1031 { 1032 i915_reg_t reg = PIPEDSL(pipe); 1033 u32 line1, line2; 1034 u32 line_mask; 1035 1036 if (IS_GEN(dev_priv, 2)) 1037 line_mask = DSL_LINEMASK_GEN2; 1038 else 1039 line_mask = DSL_LINEMASK_GEN3; 1040 1041 line1 = I915_READ(reg) & line_mask; 1042 msleep(5); 1043 line2 = I915_READ(reg) & line_mask; 1044 1045 return line1 != line2; 1046 } 1047 1048 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state) 1049 { 1050 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1051 enum pipe pipe = crtc->pipe; 1052 1053 /* Wait for the display line to settle/start moving */ 1054 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100)) 1055 DRM_ERROR("pipe %c scanline %s wait timed out\n", 1056 pipe_name(pipe), onoff(state)); 1057 } 1058 1059 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc) 1060 { 1061 wait_for_pipe_scanline_moving(crtc, false); 1062 } 1063 1064 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) 1065 { 1066 wait_for_pipe_scanline_moving(crtc, true); 1067 } 1068 1069 static void 1070 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 1071 { 1072 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 1073 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1074 1075 if (INTEL_GEN(dev_priv) >= 4) { 1076 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 1077 i915_reg_t reg = PIPECONF(cpu_transcoder); 1078 1079 /* Wait for the Pipe State to go off */ 1080 if (intel_de_wait_for_clear(dev_priv, reg, 1081 I965_PIPECONF_ACTIVE, 100)) 1082 WARN(1, "pipe_off wait timed out\n"); 1083 } else { 1084 intel_wait_for_pipe_scanline_stopped(crtc); 1085 } 1086 } 1087 1088 /* Only for pre-ILK configs */ 1089 void assert_pll(struct drm_i915_private *dev_priv, 1090 enum pipe pipe, bool state) 1091 { 1092 u32 val; 1093 bool cur_state; 1094 1095 val = I915_READ(DPLL(pipe)); 1096 cur_state = !!(val & DPLL_VCO_ENABLE); 1097 I915_STATE_WARN(cur_state != state, 1098 "PLL state assertion failure (expected %s, current %s)\n", 1099 onoff(state), onoff(cur_state)); 1100 } 1101 1102 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1103 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1104 { 1105 u32 val; 1106 bool cur_state; 1107 1108 vlv_cck_get(dev_priv); 1109 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1110 vlv_cck_put(dev_priv); 1111 1112 cur_state = val & DSI_PLL_VCO_EN; 1113 I915_STATE_WARN(cur_state != state, 1114 "DSI PLL state assertion failure (expected %s, current %s)\n", 1115 onoff(state), onoff(cur_state)); 1116 } 1117 1118 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1119 enum pipe pipe, bool state) 1120 { 1121 bool cur_state; 1122 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1123 pipe); 1124 1125 if (HAS_DDI(dev_priv)) { 1126 /* DDI does not have a specific FDI_TX register */ 1127 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1128 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1129 } else { 1130 u32 val = I915_READ(FDI_TX_CTL(pipe)); 1131 cur_state = !!(val & FDI_TX_ENABLE); 1132 } 1133 I915_STATE_WARN(cur_state != state, 1134 "FDI TX state assertion failure (expected %s, current %s)\n", 1135 onoff(state), onoff(cur_state)); 1136 } 1137 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1138 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1139 1140 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1141 enum pipe pipe, bool state) 1142 { 1143 u32 val; 1144 bool cur_state; 1145 1146 val = I915_READ(FDI_RX_CTL(pipe)); 1147 cur_state = !!(val & FDI_RX_ENABLE); 1148 I915_STATE_WARN(cur_state != state, 1149 "FDI RX state assertion failure (expected %s, current %s)\n", 1150 onoff(state), onoff(cur_state)); 1151 } 1152 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1153 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1154 1155 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1156 enum pipe pipe) 1157 { 1158 u32 val; 1159 1160 /* ILK FDI PLL is always enabled */ 1161 if (IS_GEN(dev_priv, 5)) 1162 return; 1163 1164 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1165 if (HAS_DDI(dev_priv)) 1166 return; 1167 1168 val = I915_READ(FDI_TX_CTL(pipe)); 1169 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1170 } 1171 1172 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1173 enum pipe pipe, bool state) 1174 { 1175 u32 val; 1176 bool cur_state; 1177 1178 val = I915_READ(FDI_RX_CTL(pipe)); 1179 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1180 I915_STATE_WARN(cur_state != state, 1181 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1182 onoff(state), onoff(cur_state)); 1183 } 1184 1185 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) 1186 { 1187 i915_reg_t pp_reg; 1188 u32 val; 1189 enum pipe panel_pipe = INVALID_PIPE; 1190 bool locked = true; 1191 1192 if (WARN_ON(HAS_DDI(dev_priv))) 1193 return; 1194 1195 if (HAS_PCH_SPLIT(dev_priv)) { 1196 u32 port_sel; 1197 1198 pp_reg = PP_CONTROL(0); 1199 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1200 1201 switch (port_sel) { 1202 case PANEL_PORT_SELECT_LVDS: 1203 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe); 1204 break; 1205 case PANEL_PORT_SELECT_DPA: 1206 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe); 1207 break; 1208 case PANEL_PORT_SELECT_DPC: 1209 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe); 1210 break; 1211 case PANEL_PORT_SELECT_DPD: 1212 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe); 1213 break; 1214 default: 1215 MISSING_CASE(port_sel); 1216 break; 1217 } 1218 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1219 /* presumably write lock depends on pipe, not port select */ 1220 pp_reg = PP_CONTROL(pipe); 1221 panel_pipe = pipe; 1222 } else { 1223 u32 port_sel; 1224 1225 pp_reg = PP_CONTROL(0); 1226 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1227 1228 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS); 1229 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe); 1230 } 1231 1232 val = I915_READ(pp_reg); 1233 if (!(val & PANEL_POWER_ON) || 1234 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1235 locked = false; 1236 1237 I915_STATE_WARN(panel_pipe == pipe && locked, 1238 "panel assertion failure, pipe %c regs locked\n", 1239 pipe_name(pipe)); 1240 } 1241 1242 void assert_pipe(struct drm_i915_private *dev_priv, 1243 enum pipe pipe, bool state) 1244 { 1245 bool cur_state; 1246 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1247 pipe); 1248 enum intel_display_power_domain power_domain; 1249 intel_wakeref_t wakeref; 1250 1251 /* we keep both pipes enabled on 830 */ 1252 if (IS_I830(dev_priv)) 1253 state = true; 1254 1255 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 1256 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 1257 if (wakeref) { 1258 u32 val = I915_READ(PIPECONF(cpu_transcoder)); 1259 cur_state = !!(val & PIPECONF_ENABLE); 1260 1261 intel_display_power_put(dev_priv, power_domain, wakeref); 1262 } else { 1263 cur_state = false; 1264 } 1265 1266 I915_STATE_WARN(cur_state != state, 1267 "pipe %c assertion failure (expected %s, current %s)\n", 1268 pipe_name(pipe), onoff(state), onoff(cur_state)); 1269 } 1270 1271 static void assert_plane(struct intel_plane *plane, bool state) 1272 { 1273 enum pipe pipe; 1274 bool cur_state; 1275 1276 cur_state = plane->get_hw_state(plane, &pipe); 1277 1278 I915_STATE_WARN(cur_state != state, 1279 "%s assertion failure (expected %s, current %s)\n", 1280 plane->base.name, onoff(state), onoff(cur_state)); 1281 } 1282 1283 #define assert_plane_enabled(p) assert_plane(p, true) 1284 #define assert_plane_disabled(p) assert_plane(p, false) 1285 1286 static void assert_planes_disabled(struct intel_crtc *crtc) 1287 { 1288 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1289 struct intel_plane *plane; 1290 1291 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 1292 assert_plane_disabled(plane); 1293 } 1294 1295 static void assert_vblank_disabled(struct drm_crtc *crtc) 1296 { 1297 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1298 drm_crtc_vblank_put(crtc); 1299 } 1300 1301 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1302 enum pipe pipe) 1303 { 1304 u32 val; 1305 bool enabled; 1306 1307 val = I915_READ(PCH_TRANSCONF(pipe)); 1308 enabled = !!(val & TRANS_ENABLE); 1309 I915_STATE_WARN(enabled, 1310 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1311 pipe_name(pipe)); 1312 } 1313 1314 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1315 enum pipe pipe, enum port port, 1316 i915_reg_t dp_reg) 1317 { 1318 enum pipe port_pipe; 1319 bool state; 1320 1321 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); 1322 1323 I915_STATE_WARN(state && port_pipe == pipe, 1324 "PCH DP %c enabled on transcoder %c, should be disabled\n", 1325 port_name(port), pipe_name(pipe)); 1326 1327 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 1328 "IBX PCH DP %c still using transcoder B\n", 1329 port_name(port)); 1330 } 1331 1332 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1333 enum pipe pipe, enum port port, 1334 i915_reg_t hdmi_reg) 1335 { 1336 enum pipe port_pipe; 1337 bool state; 1338 1339 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); 1340 1341 I915_STATE_WARN(state && port_pipe == pipe, 1342 "PCH HDMI %c enabled on transcoder %c, should be disabled\n", 1343 port_name(port), pipe_name(pipe)); 1344 1345 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 1346 "IBX PCH HDMI %c still using transcoder B\n", 1347 port_name(port)); 1348 } 1349 1350 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1351 enum pipe pipe) 1352 { 1353 enum pipe port_pipe; 1354 1355 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); 1356 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); 1357 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); 1358 1359 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && 1360 port_pipe == pipe, 1361 "PCH VGA enabled on transcoder %c, should be disabled\n", 1362 pipe_name(pipe)); 1363 1364 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && 1365 port_pipe == pipe, 1366 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1367 pipe_name(pipe)); 1368 1369 /* PCH SDVOB multiplex with HDMIB */ 1370 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); 1371 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); 1372 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); 1373 } 1374 1375 static void _vlv_enable_pll(struct intel_crtc *crtc, 1376 const struct intel_crtc_state *pipe_config) 1377 { 1378 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1379 enum pipe pipe = crtc->pipe; 1380 1381 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1382 POSTING_READ(DPLL(pipe)); 1383 udelay(150); 1384 1385 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) 1386 DRM_ERROR("DPLL %d failed to lock\n", pipe); 1387 } 1388 1389 static void vlv_enable_pll(struct intel_crtc *crtc, 1390 const struct intel_crtc_state *pipe_config) 1391 { 1392 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1393 enum pipe pipe = crtc->pipe; 1394 1395 assert_pipe_disabled(dev_priv, pipe); 1396 1397 /* PLL is protected by panel, make sure we can write it */ 1398 assert_panel_unlocked(dev_priv, pipe); 1399 1400 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1401 _vlv_enable_pll(crtc, pipe_config); 1402 1403 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1404 POSTING_READ(DPLL_MD(pipe)); 1405 } 1406 1407 1408 static void _chv_enable_pll(struct intel_crtc *crtc, 1409 const struct intel_crtc_state *pipe_config) 1410 { 1411 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1412 enum pipe pipe = crtc->pipe; 1413 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1414 u32 tmp; 1415 1416 vlv_dpio_get(dev_priv); 1417 1418 /* Enable back the 10bit clock to display controller */ 1419 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1420 tmp |= DPIO_DCLKP_EN; 1421 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1422 1423 vlv_dpio_put(dev_priv); 1424 1425 /* 1426 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1427 */ 1428 udelay(1); 1429 1430 /* Enable PLL */ 1431 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1432 1433 /* Check PLL is locked */ 1434 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) 1435 DRM_ERROR("PLL %d failed to lock\n", pipe); 1436 } 1437 1438 static void chv_enable_pll(struct intel_crtc *crtc, 1439 const struct intel_crtc_state *pipe_config) 1440 { 1441 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1442 enum pipe pipe = crtc->pipe; 1443 1444 assert_pipe_disabled(dev_priv, pipe); 1445 1446 /* PLL is protected by panel, make sure we can write it */ 1447 assert_panel_unlocked(dev_priv, pipe); 1448 1449 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1450 _chv_enable_pll(crtc, pipe_config); 1451 1452 if (pipe != PIPE_A) { 1453 /* 1454 * WaPixelRepeatModeFixForC0:chv 1455 * 1456 * DPLLCMD is AWOL. Use chicken bits to propagate 1457 * the value from DPLLBMD to either pipe B or C. 1458 */ 1459 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); 1460 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md); 1461 I915_WRITE(CBR4_VLV, 0); 1462 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; 1463 1464 /* 1465 * DPLLB VGA mode also seems to cause problems. 1466 * We should always have it disabled. 1467 */ 1468 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0); 1469 } else { 1470 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1471 POSTING_READ(DPLL_MD(pipe)); 1472 } 1473 } 1474 1475 static bool i9xx_has_pps(struct drm_i915_private *dev_priv) 1476 { 1477 if (IS_I830(dev_priv)) 1478 return false; 1479 1480 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 1481 } 1482 1483 static void i9xx_enable_pll(struct intel_crtc *crtc, 1484 const struct intel_crtc_state *crtc_state) 1485 { 1486 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1487 i915_reg_t reg = DPLL(crtc->pipe); 1488 u32 dpll = crtc_state->dpll_hw_state.dpll; 1489 int i; 1490 1491 assert_pipe_disabled(dev_priv, crtc->pipe); 1492 1493 /* PLL is protected by panel, make sure we can write it */ 1494 if (i9xx_has_pps(dev_priv)) 1495 assert_panel_unlocked(dev_priv, crtc->pipe); 1496 1497 /* 1498 * Apparently we need to have VGA mode enabled prior to changing 1499 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1500 * dividers, even though the register value does change. 1501 */ 1502 I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS); 1503 I915_WRITE(reg, dpll); 1504 1505 /* Wait for the clocks to stabilize. */ 1506 POSTING_READ(reg); 1507 udelay(150); 1508 1509 if (INTEL_GEN(dev_priv) >= 4) { 1510 I915_WRITE(DPLL_MD(crtc->pipe), 1511 crtc_state->dpll_hw_state.dpll_md); 1512 } else { 1513 /* The pixel multiplier can only be updated once the 1514 * DPLL is enabled and the clocks are stable. 1515 * 1516 * So write it again. 1517 */ 1518 I915_WRITE(reg, dpll); 1519 } 1520 1521 /* We do this three times for luck */ 1522 for (i = 0; i < 3; i++) { 1523 I915_WRITE(reg, dpll); 1524 POSTING_READ(reg); 1525 udelay(150); /* wait for warmup */ 1526 } 1527 } 1528 1529 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) 1530 { 1531 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1532 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1533 enum pipe pipe = crtc->pipe; 1534 1535 /* Don't disable pipe or pipe PLLs if needed */ 1536 if (IS_I830(dev_priv)) 1537 return; 1538 1539 /* Make sure the pipe isn't still relying on us */ 1540 assert_pipe_disabled(dev_priv, pipe); 1541 1542 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 1543 POSTING_READ(DPLL(pipe)); 1544 } 1545 1546 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1547 { 1548 u32 val; 1549 1550 /* Make sure the pipe isn't still relying on us */ 1551 assert_pipe_disabled(dev_priv, pipe); 1552 1553 val = DPLL_INTEGRATED_REF_CLK_VLV | 1554 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1555 if (pipe != PIPE_A) 1556 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1557 1558 I915_WRITE(DPLL(pipe), val); 1559 POSTING_READ(DPLL(pipe)); 1560 } 1561 1562 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1563 { 1564 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1565 u32 val; 1566 1567 /* Make sure the pipe isn't still relying on us */ 1568 assert_pipe_disabled(dev_priv, pipe); 1569 1570 val = DPLL_SSC_REF_CLK_CHV | 1571 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1572 if (pipe != PIPE_A) 1573 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1574 1575 I915_WRITE(DPLL(pipe), val); 1576 POSTING_READ(DPLL(pipe)); 1577 1578 vlv_dpio_get(dev_priv); 1579 1580 /* Disable 10bit clock to display controller */ 1581 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1582 val &= ~DPIO_DCLKP_EN; 1583 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1584 1585 vlv_dpio_put(dev_priv); 1586 } 1587 1588 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1589 struct intel_digital_port *dport, 1590 unsigned int expected_mask) 1591 { 1592 u32 port_mask; 1593 i915_reg_t dpll_reg; 1594 1595 switch (dport->base.port) { 1596 case PORT_B: 1597 port_mask = DPLL_PORTB_READY_MASK; 1598 dpll_reg = DPLL(0); 1599 break; 1600 case PORT_C: 1601 port_mask = DPLL_PORTC_READY_MASK; 1602 dpll_reg = DPLL(0); 1603 expected_mask <<= 4; 1604 break; 1605 case PORT_D: 1606 port_mask = DPLL_PORTD_READY_MASK; 1607 dpll_reg = DPIO_PHY_STATUS; 1608 break; 1609 default: 1610 BUG(); 1611 } 1612 1613 if (intel_de_wait_for_register(dev_priv, dpll_reg, 1614 port_mask, expected_mask, 1000)) 1615 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", 1616 port_name(dport->base.port), 1617 I915_READ(dpll_reg) & port_mask, expected_mask); 1618 } 1619 1620 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) 1621 { 1622 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1623 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1624 enum pipe pipe = crtc->pipe; 1625 i915_reg_t reg; 1626 u32 val, pipeconf_val; 1627 1628 /* Make sure PCH DPLL is enabled */ 1629 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); 1630 1631 /* FDI must be feeding us bits for PCH ports */ 1632 assert_fdi_tx_enabled(dev_priv, pipe); 1633 assert_fdi_rx_enabled(dev_priv, pipe); 1634 1635 if (HAS_PCH_CPT(dev_priv)) { 1636 /* Workaround: Set the timing override bit before enabling the 1637 * pch transcoder. */ 1638 reg = TRANS_CHICKEN2(pipe); 1639 val = I915_READ(reg); 1640 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1641 I915_WRITE(reg, val); 1642 } 1643 1644 reg = PCH_TRANSCONF(pipe); 1645 val = I915_READ(reg); 1646 pipeconf_val = I915_READ(PIPECONF(pipe)); 1647 1648 if (HAS_PCH_IBX(dev_priv)) { 1649 /* 1650 * Make the BPC in transcoder be consistent with 1651 * that in pipeconf reg. For HDMI we must use 8bpc 1652 * here for both 8bpc and 12bpc. 1653 */ 1654 val &= ~PIPECONF_BPC_MASK; 1655 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 1656 val |= PIPECONF_8BPC; 1657 else 1658 val |= pipeconf_val & PIPECONF_BPC_MASK; 1659 } 1660 1661 val &= ~TRANS_INTERLACE_MASK; 1662 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { 1663 if (HAS_PCH_IBX(dev_priv) && 1664 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 1665 val |= TRANS_LEGACY_INTERLACED_ILK; 1666 else 1667 val |= TRANS_INTERLACED; 1668 } else { 1669 val |= TRANS_PROGRESSIVE; 1670 } 1671 1672 I915_WRITE(reg, val | TRANS_ENABLE); 1673 if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100)) 1674 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 1675 } 1676 1677 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1678 enum transcoder cpu_transcoder) 1679 { 1680 u32 val, pipeconf_val; 1681 1682 /* FDI must be feeding us bits for PCH ports */ 1683 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); 1684 assert_fdi_rx_enabled(dev_priv, PIPE_A); 1685 1686 /* Workaround: set timing override bit. */ 1687 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1688 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1689 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1690 1691 val = TRANS_ENABLE; 1692 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 1693 1694 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 1695 PIPECONF_INTERLACED_ILK) 1696 val |= TRANS_INTERLACED; 1697 else 1698 val |= TRANS_PROGRESSIVE; 1699 1700 I915_WRITE(LPT_TRANSCONF, val); 1701 if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, 1702 TRANS_STATE_ENABLE, 100)) 1703 DRM_ERROR("Failed to enable PCH transcoder\n"); 1704 } 1705 1706 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1707 enum pipe pipe) 1708 { 1709 i915_reg_t reg; 1710 u32 val; 1711 1712 /* FDI relies on the transcoder */ 1713 assert_fdi_tx_disabled(dev_priv, pipe); 1714 assert_fdi_rx_disabled(dev_priv, pipe); 1715 1716 /* Ports must be off as well */ 1717 assert_pch_ports_disabled(dev_priv, pipe); 1718 1719 reg = PCH_TRANSCONF(pipe); 1720 val = I915_READ(reg); 1721 val &= ~TRANS_ENABLE; 1722 I915_WRITE(reg, val); 1723 /* wait for PCH transcoder off, transcoder state */ 1724 if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) 1725 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 1726 1727 if (HAS_PCH_CPT(dev_priv)) { 1728 /* Workaround: Clear the timing override chicken bit again. */ 1729 reg = TRANS_CHICKEN2(pipe); 1730 val = I915_READ(reg); 1731 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1732 I915_WRITE(reg, val); 1733 } 1734 } 1735 1736 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 1737 { 1738 u32 val; 1739 1740 val = I915_READ(LPT_TRANSCONF); 1741 val &= ~TRANS_ENABLE; 1742 I915_WRITE(LPT_TRANSCONF, val); 1743 /* wait for PCH transcoder off, transcoder state */ 1744 if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, 1745 TRANS_STATE_ENABLE, 50)) 1746 DRM_ERROR("Failed to disable PCH transcoder\n"); 1747 1748 /* Workaround: clear timing override bit. */ 1749 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1750 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1751 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1752 } 1753 1754 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) 1755 { 1756 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1757 1758 if (HAS_PCH_LPT(dev_priv)) 1759 return PIPE_A; 1760 else 1761 return crtc->pipe; 1762 } 1763 1764 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state) 1765 { 1766 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 1767 1768 /* 1769 * On i965gm the hardware frame counter reads 1770 * zero when the TV encoder is enabled :( 1771 */ 1772 if (IS_I965GM(dev_priv) && 1773 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT))) 1774 return 0; 1775 1776 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1777 return 0xffffffff; /* full 32 bit counter */ 1778 else if (INTEL_GEN(dev_priv) >= 3) 1779 return 0xffffff; /* only 24 bits of frame count */ 1780 else 1781 return 0; /* Gen2 doesn't have a hardware frame counter */ 1782 } 1783 1784 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) 1785 { 1786 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1787 1788 drm_crtc_set_max_vblank_count(&crtc->base, 1789 intel_crtc_max_vblank_count(crtc_state)); 1790 drm_crtc_vblank_on(&crtc->base); 1791 } 1792 1793 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) 1794 { 1795 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 1796 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1797 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 1798 enum pipe pipe = crtc->pipe; 1799 i915_reg_t reg; 1800 u32 val; 1801 1802 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); 1803 1804 assert_planes_disabled(crtc); 1805 1806 /* 1807 * A pipe without a PLL won't actually be able to drive bits from 1808 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1809 * need the check. 1810 */ 1811 if (HAS_GMCH(dev_priv)) { 1812 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 1813 assert_dsi_pll_enabled(dev_priv); 1814 else 1815 assert_pll_enabled(dev_priv, pipe); 1816 } else { 1817 if (new_crtc_state->has_pch_encoder) { 1818 /* if driving the PCH, we need FDI enabled */ 1819 assert_fdi_rx_pll_enabled(dev_priv, 1820 intel_crtc_pch_transcoder(crtc)); 1821 assert_fdi_tx_pll_enabled(dev_priv, 1822 (enum pipe) cpu_transcoder); 1823 } 1824 /* FIXME: assert CPU port conditions for SNB+ */ 1825 } 1826 1827 trace_intel_pipe_enable(crtc); 1828 1829 reg = PIPECONF(cpu_transcoder); 1830 val = I915_READ(reg); 1831 if (val & PIPECONF_ENABLE) { 1832 /* we keep both pipes enabled on 830 */ 1833 WARN_ON(!IS_I830(dev_priv)); 1834 return; 1835 } 1836 1837 I915_WRITE(reg, val | PIPECONF_ENABLE); 1838 POSTING_READ(reg); 1839 1840 /* 1841 * Until the pipe starts PIPEDSL reads will return a stale value, 1842 * which causes an apparent vblank timestamp jump when PIPEDSL 1843 * resets to its proper value. That also messes up the frame count 1844 * when it's derived from the timestamps. So let's wait for the 1845 * pipe to start properly before we call drm_crtc_vblank_on() 1846 */ 1847 if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 1848 intel_wait_for_pipe_scanline_moving(crtc); 1849 } 1850 1851 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) 1852 { 1853 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 1854 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1855 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 1856 enum pipe pipe = crtc->pipe; 1857 i915_reg_t reg; 1858 u32 val; 1859 1860 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); 1861 1862 /* 1863 * Make sure planes won't keep trying to pump pixels to us, 1864 * or we might hang the display. 1865 */ 1866 assert_planes_disabled(crtc); 1867 1868 trace_intel_pipe_disable(crtc); 1869 1870 reg = PIPECONF(cpu_transcoder); 1871 val = I915_READ(reg); 1872 if ((val & PIPECONF_ENABLE) == 0) 1873 return; 1874 1875 /* 1876 * Double wide has implications for planes 1877 * so best keep it disabled when not needed. 1878 */ 1879 if (old_crtc_state->double_wide) 1880 val &= ~PIPECONF_DOUBLE_WIDE; 1881 1882 /* Don't disable pipe or pipe PLLs if needed */ 1883 if (!IS_I830(dev_priv)) 1884 val &= ~PIPECONF_ENABLE; 1885 1886 I915_WRITE(reg, val); 1887 if ((val & PIPECONF_ENABLE) == 0) 1888 intel_wait_for_pipe_off(old_crtc_state); 1889 } 1890 1891 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) 1892 { 1893 return IS_GEN(dev_priv, 2) ? 2048 : 4096; 1894 } 1895 1896 static unsigned int 1897 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) 1898 { 1899 struct drm_i915_private *dev_priv = to_i915(fb->dev); 1900 unsigned int cpp = fb->format->cpp[color_plane]; 1901 1902 switch (fb->modifier) { 1903 case DRM_FORMAT_MOD_LINEAR: 1904 return intel_tile_size(dev_priv); 1905 case I915_FORMAT_MOD_X_TILED: 1906 if (IS_GEN(dev_priv, 2)) 1907 return 128; 1908 else 1909 return 512; 1910 case I915_FORMAT_MOD_Y_TILED_CCS: 1911 if (color_plane == 1) 1912 return 128; 1913 /* fall through */ 1914 case I915_FORMAT_MOD_Y_TILED: 1915 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv)) 1916 return 128; 1917 else 1918 return 512; 1919 case I915_FORMAT_MOD_Yf_TILED_CCS: 1920 if (color_plane == 1) 1921 return 128; 1922 /* fall through */ 1923 case I915_FORMAT_MOD_Yf_TILED: 1924 switch (cpp) { 1925 case 1: 1926 return 64; 1927 case 2: 1928 case 4: 1929 return 128; 1930 case 8: 1931 case 16: 1932 return 256; 1933 default: 1934 MISSING_CASE(cpp); 1935 return cpp; 1936 } 1937 break; 1938 default: 1939 MISSING_CASE(fb->modifier); 1940 return cpp; 1941 } 1942 } 1943 1944 static unsigned int 1945 intel_tile_height(const struct drm_framebuffer *fb, int color_plane) 1946 { 1947 return intel_tile_size(to_i915(fb->dev)) / 1948 intel_tile_width_bytes(fb, color_plane); 1949 } 1950 1951 /* Return the tile dimensions in pixel units */ 1952 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane, 1953 unsigned int *tile_width, 1954 unsigned int *tile_height) 1955 { 1956 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane); 1957 unsigned int cpp = fb->format->cpp[color_plane]; 1958 1959 *tile_width = tile_width_bytes / cpp; 1960 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes; 1961 } 1962 1963 unsigned int 1964 intel_fb_align_height(const struct drm_framebuffer *fb, 1965 int color_plane, unsigned int height) 1966 { 1967 unsigned int tile_height = intel_tile_height(fb, color_plane); 1968 1969 return ALIGN(height, tile_height); 1970 } 1971 1972 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 1973 { 1974 unsigned int size = 0; 1975 int i; 1976 1977 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 1978 size += rot_info->plane[i].width * rot_info->plane[i].height; 1979 1980 return size; 1981 } 1982 1983 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info) 1984 { 1985 unsigned int size = 0; 1986 int i; 1987 1988 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) 1989 size += rem_info->plane[i].width * rem_info->plane[i].height; 1990 1991 return size; 1992 } 1993 1994 static void 1995 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, 1996 const struct drm_framebuffer *fb, 1997 unsigned int rotation) 1998 { 1999 view->type = I915_GGTT_VIEW_NORMAL; 2000 if (drm_rotation_90_or_270(rotation)) { 2001 view->type = I915_GGTT_VIEW_ROTATED; 2002 view->rotated = to_intel_framebuffer(fb)->rot_info; 2003 } 2004 } 2005 2006 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv) 2007 { 2008 if (IS_I830(dev_priv)) 2009 return 16 * 1024; 2010 else if (IS_I85X(dev_priv)) 2011 return 256; 2012 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) 2013 return 32; 2014 else 2015 return 4 * 1024; 2016 } 2017 2018 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) 2019 { 2020 if (INTEL_GEN(dev_priv) >= 9) 2021 return 256 * 1024; 2022 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || 2023 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2024 return 128 * 1024; 2025 else if (INTEL_GEN(dev_priv) >= 4) 2026 return 4 * 1024; 2027 else 2028 return 0; 2029 } 2030 2031 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, 2032 int color_plane) 2033 { 2034 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2035 2036 /* AUX_DIST needs only 4K alignment */ 2037 if (color_plane == 1) 2038 return 4096; 2039 2040 switch (fb->modifier) { 2041 case DRM_FORMAT_MOD_LINEAR: 2042 return intel_linear_alignment(dev_priv); 2043 case I915_FORMAT_MOD_X_TILED: 2044 if (INTEL_GEN(dev_priv) >= 9) 2045 return 256 * 1024; 2046 return 0; 2047 case I915_FORMAT_MOD_Y_TILED_CCS: 2048 case I915_FORMAT_MOD_Yf_TILED_CCS: 2049 case I915_FORMAT_MOD_Y_TILED: 2050 case I915_FORMAT_MOD_Yf_TILED: 2051 return 1 * 1024 * 1024; 2052 default: 2053 MISSING_CASE(fb->modifier); 2054 return 0; 2055 } 2056 } 2057 2058 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) 2059 { 2060 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 2061 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 2062 2063 return INTEL_GEN(dev_priv) < 4 || 2064 (plane->has_fbc && 2065 plane_state->view.type == I915_GGTT_VIEW_NORMAL); 2066 } 2067 2068 struct i915_vma * 2069 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 2070 const struct i915_ggtt_view *view, 2071 bool uses_fence, 2072 unsigned long *out_flags) 2073 { 2074 struct drm_device *dev = fb->dev; 2075 struct drm_i915_private *dev_priv = to_i915(dev); 2076 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2077 intel_wakeref_t wakeref; 2078 struct i915_vma *vma; 2079 unsigned int pinctl; 2080 u32 alignment; 2081 2082 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2083 2084 alignment = intel_surf_alignment(fb, 0); 2085 2086 /* Note that the w/a also requires 64 PTE of padding following the 2087 * bo. We currently fill all unused PTE with the shadow page and so 2088 * we should always have valid PTE following the scanout preventing 2089 * the VT-d warning. 2090 */ 2091 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) 2092 alignment = 256 * 1024; 2093 2094 /* 2095 * Global gtt pte registers are special registers which actually forward 2096 * writes to a chunk of system memory. Which means that there is no risk 2097 * that the register values disappear as soon as we call 2098 * intel_runtime_pm_put(), so it is correct to wrap only the 2099 * pin/unpin/fence and not more. 2100 */ 2101 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2102 i915_gem_object_lock(obj); 2103 2104 atomic_inc(&dev_priv->gpu_error.pending_fb_pin); 2105 2106 pinctl = 0; 2107 2108 /* Valleyview is definitely limited to scanning out the first 2109 * 512MiB. Lets presume this behaviour was inherited from the 2110 * g4x display engine and that all earlier gen are similarly 2111 * limited. Testing suggests that it is a little more 2112 * complicated than this. For example, Cherryview appears quite 2113 * happy to scanout from anywhere within its global aperture. 2114 */ 2115 if (HAS_GMCH(dev_priv)) 2116 pinctl |= PIN_MAPPABLE; 2117 2118 vma = i915_gem_object_pin_to_display_plane(obj, 2119 alignment, view, pinctl); 2120 if (IS_ERR(vma)) 2121 goto err; 2122 2123 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) { 2124 int ret; 2125 2126 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2127 * fence, whereas 965+ only requires a fence if using 2128 * framebuffer compression. For simplicity, we always, when 2129 * possible, install a fence as the cost is not that onerous. 2130 * 2131 * If we fail to fence the tiled scanout, then either the 2132 * modeset will reject the change (which is highly unlikely as 2133 * the affected systems, all but one, do not have unmappable 2134 * space) or we will not be able to enable full powersaving 2135 * techniques (also likely not to apply due to various limits 2136 * FBC and the like impose on the size of the buffer, which 2137 * presumably we violated anyway with this unmappable buffer). 2138 * Anyway, it is presumably better to stumble onwards with 2139 * something and try to run the system in a "less than optimal" 2140 * mode that matches the user configuration. 2141 */ 2142 ret = i915_vma_pin_fence(vma); 2143 if (ret != 0 && INTEL_GEN(dev_priv) < 4) { 2144 i915_gem_object_unpin_from_display_plane(vma); 2145 vma = ERR_PTR(ret); 2146 goto err; 2147 } 2148 2149 if (ret == 0 && vma->fence) 2150 *out_flags |= PLANE_HAS_FENCE; 2151 } 2152 2153 i915_vma_get(vma); 2154 err: 2155 atomic_dec(&dev_priv->gpu_error.pending_fb_pin); 2156 2157 i915_gem_object_unlock(obj); 2158 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2159 return vma; 2160 } 2161 2162 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) 2163 { 2164 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 2165 2166 i915_gem_object_lock(vma->obj); 2167 if (flags & PLANE_HAS_FENCE) 2168 i915_vma_unpin_fence(vma); 2169 i915_gem_object_unpin_from_display_plane(vma); 2170 i915_gem_object_unlock(vma->obj); 2171 2172 i915_vma_put(vma); 2173 } 2174 2175 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane, 2176 unsigned int rotation) 2177 { 2178 if (drm_rotation_90_or_270(rotation)) 2179 return to_intel_framebuffer(fb)->rotated[color_plane].pitch; 2180 else 2181 return fb->pitches[color_plane]; 2182 } 2183 2184 /* 2185 * Convert the x/y offsets into a linear offset. 2186 * Only valid with 0/180 degree rotation, which is fine since linear 2187 * offset is only used with linear buffers on pre-hsw and tiled buffers 2188 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 2189 */ 2190 u32 intel_fb_xy_to_linear(int x, int y, 2191 const struct intel_plane_state *state, 2192 int color_plane) 2193 { 2194 const struct drm_framebuffer *fb = state->base.fb; 2195 unsigned int cpp = fb->format->cpp[color_plane]; 2196 unsigned int pitch = state->color_plane[color_plane].stride; 2197 2198 return y * pitch + x * cpp; 2199 } 2200 2201 /* 2202 * Add the x/y offsets derived from fb->offsets[] to the user 2203 * specified plane src x/y offsets. The resulting x/y offsets 2204 * specify the start of scanout from the beginning of the gtt mapping. 2205 */ 2206 void intel_add_fb_offsets(int *x, int *y, 2207 const struct intel_plane_state *state, 2208 int color_plane) 2209 2210 { 2211 *x += state->color_plane[color_plane].x; 2212 *y += state->color_plane[color_plane].y; 2213 } 2214 2215 static u32 intel_adjust_tile_offset(int *x, int *y, 2216 unsigned int tile_width, 2217 unsigned int tile_height, 2218 unsigned int tile_size, 2219 unsigned int pitch_tiles, 2220 u32 old_offset, 2221 u32 new_offset) 2222 { 2223 unsigned int pitch_pixels = pitch_tiles * tile_width; 2224 unsigned int tiles; 2225 2226 WARN_ON(old_offset & (tile_size - 1)); 2227 WARN_ON(new_offset & (tile_size - 1)); 2228 WARN_ON(new_offset > old_offset); 2229 2230 tiles = (old_offset - new_offset) / tile_size; 2231 2232 *y += tiles / pitch_tiles * tile_height; 2233 *x += tiles % pitch_tiles * tile_width; 2234 2235 /* minimize x in case it got needlessly big */ 2236 *y += *x / pitch_pixels * tile_height; 2237 *x %= pitch_pixels; 2238 2239 return new_offset; 2240 } 2241 2242 static bool is_surface_linear(u64 modifier, int color_plane) 2243 { 2244 return modifier == DRM_FORMAT_MOD_LINEAR; 2245 } 2246 2247 static u32 intel_adjust_aligned_offset(int *x, int *y, 2248 const struct drm_framebuffer *fb, 2249 int color_plane, 2250 unsigned int rotation, 2251 unsigned int pitch, 2252 u32 old_offset, u32 new_offset) 2253 { 2254 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2255 unsigned int cpp = fb->format->cpp[color_plane]; 2256 2257 WARN_ON(new_offset > old_offset); 2258 2259 if (!is_surface_linear(fb->modifier, color_plane)) { 2260 unsigned int tile_size, tile_width, tile_height; 2261 unsigned int pitch_tiles; 2262 2263 tile_size = intel_tile_size(dev_priv); 2264 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2265 2266 if (drm_rotation_90_or_270(rotation)) { 2267 pitch_tiles = pitch / tile_height; 2268 swap(tile_width, tile_height); 2269 } else { 2270 pitch_tiles = pitch / (tile_width * cpp); 2271 } 2272 2273 intel_adjust_tile_offset(x, y, tile_width, tile_height, 2274 tile_size, pitch_tiles, 2275 old_offset, new_offset); 2276 } else { 2277 old_offset += *y * pitch + *x * cpp; 2278 2279 *y = (old_offset - new_offset) / pitch; 2280 *x = ((old_offset - new_offset) - *y * pitch) / cpp; 2281 } 2282 2283 return new_offset; 2284 } 2285 2286 /* 2287 * Adjust the tile offset by moving the difference into 2288 * the x/y offsets. 2289 */ 2290 static u32 intel_plane_adjust_aligned_offset(int *x, int *y, 2291 const struct intel_plane_state *state, 2292 int color_plane, 2293 u32 old_offset, u32 new_offset) 2294 { 2295 return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane, 2296 state->base.rotation, 2297 state->color_plane[color_plane].stride, 2298 old_offset, new_offset); 2299 } 2300 2301 /* 2302 * Computes the aligned offset to the base tile and adjusts 2303 * x, y. bytes per pixel is assumed to be a power-of-two. 2304 * 2305 * In the 90/270 rotated case, x and y are assumed 2306 * to be already rotated to match the rotated GTT view, and 2307 * pitch is the tile_height aligned framebuffer height. 2308 * 2309 * This function is used when computing the derived information 2310 * under intel_framebuffer, so using any of that information 2311 * here is not allowed. Anything under drm_framebuffer can be 2312 * used. This is why the user has to pass in the pitch since it 2313 * is specified in the rotated orientation. 2314 */ 2315 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv, 2316 int *x, int *y, 2317 const struct drm_framebuffer *fb, 2318 int color_plane, 2319 unsigned int pitch, 2320 unsigned int rotation, 2321 u32 alignment) 2322 { 2323 unsigned int cpp = fb->format->cpp[color_plane]; 2324 u32 offset, offset_aligned; 2325 2326 if (alignment) 2327 alignment--; 2328 2329 if (!is_surface_linear(fb->modifier, color_plane)) { 2330 unsigned int tile_size, tile_width, tile_height; 2331 unsigned int tile_rows, tiles, pitch_tiles; 2332 2333 tile_size = intel_tile_size(dev_priv); 2334 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2335 2336 if (drm_rotation_90_or_270(rotation)) { 2337 pitch_tiles = pitch / tile_height; 2338 swap(tile_width, tile_height); 2339 } else { 2340 pitch_tiles = pitch / (tile_width * cpp); 2341 } 2342 2343 tile_rows = *y / tile_height; 2344 *y %= tile_height; 2345 2346 tiles = *x / tile_width; 2347 *x %= tile_width; 2348 2349 offset = (tile_rows * pitch_tiles + tiles) * tile_size; 2350 offset_aligned = offset & ~alignment; 2351 2352 intel_adjust_tile_offset(x, y, tile_width, tile_height, 2353 tile_size, pitch_tiles, 2354 offset, offset_aligned); 2355 } else { 2356 offset = *y * pitch + *x * cpp; 2357 offset_aligned = offset & ~alignment; 2358 2359 *y = (offset & alignment) / pitch; 2360 *x = ((offset & alignment) - *y * pitch) / cpp; 2361 } 2362 2363 return offset_aligned; 2364 } 2365 2366 static u32 intel_plane_compute_aligned_offset(int *x, int *y, 2367 const struct intel_plane_state *state, 2368 int color_plane) 2369 { 2370 struct intel_plane *intel_plane = to_intel_plane(state->base.plane); 2371 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 2372 const struct drm_framebuffer *fb = state->base.fb; 2373 unsigned int rotation = state->base.rotation; 2374 int pitch = state->color_plane[color_plane].stride; 2375 u32 alignment; 2376 2377 if (intel_plane->id == PLANE_CURSOR) 2378 alignment = intel_cursor_alignment(dev_priv); 2379 else 2380 alignment = intel_surf_alignment(fb, color_plane); 2381 2382 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane, 2383 pitch, rotation, alignment); 2384 } 2385 2386 /* Convert the fb->offset[] into x/y offsets */ 2387 static int intel_fb_offset_to_xy(int *x, int *y, 2388 const struct drm_framebuffer *fb, 2389 int color_plane) 2390 { 2391 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2392 unsigned int height; 2393 2394 if (fb->modifier != DRM_FORMAT_MOD_LINEAR && 2395 fb->offsets[color_plane] % intel_tile_size(dev_priv)) { 2396 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n", 2397 fb->offsets[color_plane], color_plane); 2398 return -EINVAL; 2399 } 2400 2401 height = drm_framebuffer_plane_height(fb->height, fb, color_plane); 2402 height = ALIGN(height, intel_tile_height(fb, color_plane)); 2403 2404 /* Catch potential overflows early */ 2405 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]), 2406 fb->offsets[color_plane])) { 2407 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n", 2408 fb->offsets[color_plane], fb->pitches[color_plane], 2409 color_plane); 2410 return -ERANGE; 2411 } 2412 2413 *x = 0; 2414 *y = 0; 2415 2416 intel_adjust_aligned_offset(x, y, 2417 fb, color_plane, DRM_MODE_ROTATE_0, 2418 fb->pitches[color_plane], 2419 fb->offsets[color_plane], 0); 2420 2421 return 0; 2422 } 2423 2424 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) 2425 { 2426 switch (fb_modifier) { 2427 case I915_FORMAT_MOD_X_TILED: 2428 return I915_TILING_X; 2429 case I915_FORMAT_MOD_Y_TILED: 2430 case I915_FORMAT_MOD_Y_TILED_CCS: 2431 return I915_TILING_Y; 2432 default: 2433 return I915_TILING_NONE; 2434 } 2435 } 2436 2437 /* 2438 * From the Sky Lake PRM: 2439 * "The Color Control Surface (CCS) contains the compression status of 2440 * the cache-line pairs. The compression state of the cache-line pair 2441 * is specified by 2 bits in the CCS. Each CCS cache-line represents 2442 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled 2443 * cache-line-pairs. CCS is always Y tiled." 2444 * 2445 * Since cache line pairs refers to horizontally adjacent cache lines, 2446 * each cache line in the CCS corresponds to an area of 32x16 cache 2447 * lines on the main surface. Since each pixel is 4 bytes, this gives 2448 * us a ratio of one byte in the CCS for each 8x16 pixels in the 2449 * main surface. 2450 */ 2451 static const struct drm_format_info ccs_formats[] = { 2452 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 2453 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2454 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 2455 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2456 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 2457 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 2458 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 2459 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 2460 }; 2461 2462 static const struct drm_format_info * 2463 lookup_format_info(const struct drm_format_info formats[], 2464 int num_formats, u32 format) 2465 { 2466 int i; 2467 2468 for (i = 0; i < num_formats; i++) { 2469 if (formats[i].format == format) 2470 return &formats[i]; 2471 } 2472 2473 return NULL; 2474 } 2475 2476 static const struct drm_format_info * 2477 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 2478 { 2479 switch (cmd->modifier[0]) { 2480 case I915_FORMAT_MOD_Y_TILED_CCS: 2481 case I915_FORMAT_MOD_Yf_TILED_CCS: 2482 return lookup_format_info(ccs_formats, 2483 ARRAY_SIZE(ccs_formats), 2484 cmd->pixel_format); 2485 default: 2486 return NULL; 2487 } 2488 } 2489 2490 bool is_ccs_modifier(u64 modifier) 2491 { 2492 return modifier == I915_FORMAT_MOD_Y_TILED_CCS || 2493 modifier == I915_FORMAT_MOD_Yf_TILED_CCS; 2494 } 2495 2496 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 2497 u32 pixel_format, u64 modifier) 2498 { 2499 struct intel_crtc *crtc; 2500 struct intel_plane *plane; 2501 2502 /* 2503 * We assume the primary plane for pipe A has 2504 * the highest stride limits of them all. 2505 */ 2506 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); 2507 plane = to_intel_plane(crtc->base.primary); 2508 2509 return plane->max_stride(plane, pixel_format, modifier, 2510 DRM_MODE_ROTATE_0); 2511 } 2512 2513 static 2514 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv, 2515 u32 pixel_format, u64 modifier) 2516 { 2517 /* 2518 * Arbitrary limit for gen4+ chosen to match the 2519 * render engine max stride. 2520 * 2521 * The new CCS hash mode makes remapping impossible 2522 */ 2523 if (!is_ccs_modifier(modifier)) { 2524 if (INTEL_GEN(dev_priv) >= 7) 2525 return 256*1024; 2526 else if (INTEL_GEN(dev_priv) >= 4) 2527 return 128*1024; 2528 } 2529 2530 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier); 2531 } 2532 2533 static u32 2534 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) 2535 { 2536 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2537 2538 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) { 2539 u32 max_stride = intel_plane_fb_max_stride(dev_priv, 2540 fb->format->format, 2541 fb->modifier); 2542 2543 /* 2544 * To make remapping with linear generally feasible 2545 * we need the stride to be page aligned. 2546 */ 2547 if (fb->pitches[color_plane] > max_stride) 2548 return intel_tile_size(dev_priv); 2549 else 2550 return 64; 2551 } else { 2552 return intel_tile_width_bytes(fb, color_plane); 2553 } 2554 } 2555 2556 bool intel_plane_can_remap(const struct intel_plane_state *plane_state) 2557 { 2558 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 2559 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 2560 const struct drm_framebuffer *fb = plane_state->base.fb; 2561 int i; 2562 2563 /* We don't want to deal with remapping with cursors */ 2564 if (plane->id == PLANE_CURSOR) 2565 return false; 2566 2567 /* 2568 * The display engine limits already match/exceed the 2569 * render engine limits, so not much point in remapping. 2570 * Would also need to deal with the fence POT alignment 2571 * and gen2 2KiB GTT tile size. 2572 */ 2573 if (INTEL_GEN(dev_priv) < 4) 2574 return false; 2575 2576 /* 2577 * The new CCS hash mode isn't compatible with remapping as 2578 * the virtual address of the pages affects the compressed data. 2579 */ 2580 if (is_ccs_modifier(fb->modifier)) 2581 return false; 2582 2583 /* Linear needs a page aligned stride for remapping */ 2584 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) { 2585 unsigned int alignment = intel_tile_size(dev_priv) - 1; 2586 2587 for (i = 0; i < fb->format->num_planes; i++) { 2588 if (fb->pitches[i] & alignment) 2589 return false; 2590 } 2591 } 2592 2593 return true; 2594 } 2595 2596 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) 2597 { 2598 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 2599 const struct drm_framebuffer *fb = plane_state->base.fb; 2600 unsigned int rotation = plane_state->base.rotation; 2601 u32 stride, max_stride; 2602 2603 /* 2604 * No remapping for invisible planes since we don't have 2605 * an actual source viewport to remap. 2606 */ 2607 if (!plane_state->base.visible) 2608 return false; 2609 2610 if (!intel_plane_can_remap(plane_state)) 2611 return false; 2612 2613 /* 2614 * FIXME: aux plane limits on gen9+ are 2615 * unclear in Bspec, for now no checking. 2616 */ 2617 stride = intel_fb_pitch(fb, 0, rotation); 2618 max_stride = plane->max_stride(plane, fb->format->format, 2619 fb->modifier, rotation); 2620 2621 return stride > max_stride; 2622 } 2623 2624 static int 2625 intel_fill_fb_info(struct drm_i915_private *dev_priv, 2626 struct drm_framebuffer *fb) 2627 { 2628 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2629 struct intel_rotation_info *rot_info = &intel_fb->rot_info; 2630 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2631 u32 gtt_offset_rotated = 0; 2632 unsigned int max_size = 0; 2633 int i, num_planes = fb->format->num_planes; 2634 unsigned int tile_size = intel_tile_size(dev_priv); 2635 2636 for (i = 0; i < num_planes; i++) { 2637 unsigned int width, height; 2638 unsigned int cpp, size; 2639 u32 offset; 2640 int x, y; 2641 int ret; 2642 2643 cpp = fb->format->cpp[i]; 2644 width = drm_framebuffer_plane_width(fb->width, fb, i); 2645 height = drm_framebuffer_plane_height(fb->height, fb, i); 2646 2647 ret = intel_fb_offset_to_xy(&x, &y, fb, i); 2648 if (ret) { 2649 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", 2650 i, fb->offsets[i]); 2651 return ret; 2652 } 2653 2654 if (is_ccs_modifier(fb->modifier) && i == 1) { 2655 int hsub = fb->format->hsub; 2656 int vsub = fb->format->vsub; 2657 int tile_width, tile_height; 2658 int main_x, main_y; 2659 int ccs_x, ccs_y; 2660 2661 intel_tile_dims(fb, i, &tile_width, &tile_height); 2662 tile_width *= hsub; 2663 tile_height *= vsub; 2664 2665 ccs_x = (x * hsub) % tile_width; 2666 ccs_y = (y * vsub) % tile_height; 2667 main_x = intel_fb->normal[0].x % tile_width; 2668 main_y = intel_fb->normal[0].y % tile_height; 2669 2670 /* 2671 * CCS doesn't have its own x/y offset register, so the intra CCS tile 2672 * x/y offsets must match between CCS and the main surface. 2673 */ 2674 if (main_x != ccs_x || main_y != ccs_y) { 2675 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n", 2676 main_x, main_y, 2677 ccs_x, ccs_y, 2678 intel_fb->normal[0].x, 2679 intel_fb->normal[0].y, 2680 x, y); 2681 return -EINVAL; 2682 } 2683 } 2684 2685 /* 2686 * The fence (if used) is aligned to the start of the object 2687 * so having the framebuffer wrap around across the edge of the 2688 * fenced region doesn't really work. We have no API to configure 2689 * the fence start offset within the object (nor could we probably 2690 * on gen2/3). So it's just easier if we just require that the 2691 * fb layout agrees with the fence layout. We already check that the 2692 * fb stride matches the fence stride elsewhere. 2693 */ 2694 if (i == 0 && i915_gem_object_is_tiled(obj) && 2695 (x + width) * cpp > fb->pitches[i]) { 2696 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", 2697 i, fb->offsets[i]); 2698 return -EINVAL; 2699 } 2700 2701 /* 2702 * First pixel of the framebuffer from 2703 * the start of the normal gtt mapping. 2704 */ 2705 intel_fb->normal[i].x = x; 2706 intel_fb->normal[i].y = y; 2707 2708 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i, 2709 fb->pitches[i], 2710 DRM_MODE_ROTATE_0, 2711 tile_size); 2712 offset /= tile_size; 2713 2714 if (!is_surface_linear(fb->modifier, i)) { 2715 unsigned int tile_width, tile_height; 2716 unsigned int pitch_tiles; 2717 struct drm_rect r; 2718 2719 intel_tile_dims(fb, i, &tile_width, &tile_height); 2720 2721 rot_info->plane[i].offset = offset; 2722 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp); 2723 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); 2724 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); 2725 2726 intel_fb->rotated[i].pitch = 2727 rot_info->plane[i].height * tile_height; 2728 2729 /* how many tiles does this plane need */ 2730 size = rot_info->plane[i].stride * rot_info->plane[i].height; 2731 /* 2732 * If the plane isn't horizontally tile aligned, 2733 * we need one more tile. 2734 */ 2735 if (x != 0) 2736 size++; 2737 2738 /* rotate the x/y offsets to match the GTT view */ 2739 r.x1 = x; 2740 r.y1 = y; 2741 r.x2 = x + width; 2742 r.y2 = y + height; 2743 drm_rect_rotate(&r, 2744 rot_info->plane[i].width * tile_width, 2745 rot_info->plane[i].height * tile_height, 2746 DRM_MODE_ROTATE_270); 2747 x = r.x1; 2748 y = r.y1; 2749 2750 /* rotate the tile dimensions to match the GTT view */ 2751 pitch_tiles = intel_fb->rotated[i].pitch / tile_height; 2752 swap(tile_width, tile_height); 2753 2754 /* 2755 * We only keep the x/y offsets, so push all of the 2756 * gtt offset into the x/y offsets. 2757 */ 2758 intel_adjust_tile_offset(&x, &y, 2759 tile_width, tile_height, 2760 tile_size, pitch_tiles, 2761 gtt_offset_rotated * tile_size, 0); 2762 2763 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; 2764 2765 /* 2766 * First pixel of the framebuffer from 2767 * the start of the rotated gtt mapping. 2768 */ 2769 intel_fb->rotated[i].x = x; 2770 intel_fb->rotated[i].y = y; 2771 } else { 2772 size = DIV_ROUND_UP((y + height) * fb->pitches[i] + 2773 x * cpp, tile_size); 2774 } 2775 2776 /* how many tiles in total needed in the bo */ 2777 max_size = max(max_size, offset + size); 2778 } 2779 2780 if (mul_u32_u32(max_size, tile_size) > obj->base.size) { 2781 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n", 2782 mul_u32_u32(max_size, tile_size), obj->base.size); 2783 return -EINVAL; 2784 } 2785 2786 return 0; 2787 } 2788 2789 static void 2790 intel_plane_remap_gtt(struct intel_plane_state *plane_state) 2791 { 2792 struct drm_i915_private *dev_priv = 2793 to_i915(plane_state->base.plane->dev); 2794 struct drm_framebuffer *fb = plane_state->base.fb; 2795 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2796 struct intel_rotation_info *info = &plane_state->view.rotated; 2797 unsigned int rotation = plane_state->base.rotation; 2798 int i, num_planes = fb->format->num_planes; 2799 unsigned int tile_size = intel_tile_size(dev_priv); 2800 unsigned int src_x, src_y; 2801 unsigned int src_w, src_h; 2802 u32 gtt_offset = 0; 2803 2804 memset(&plane_state->view, 0, sizeof(plane_state->view)); 2805 plane_state->view.type = drm_rotation_90_or_270(rotation) ? 2806 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED; 2807 2808 src_x = plane_state->base.src.x1 >> 16; 2809 src_y = plane_state->base.src.y1 >> 16; 2810 src_w = drm_rect_width(&plane_state->base.src) >> 16; 2811 src_h = drm_rect_height(&plane_state->base.src) >> 16; 2812 2813 WARN_ON(is_ccs_modifier(fb->modifier)); 2814 2815 /* Make src coordinates relative to the viewport */ 2816 drm_rect_translate(&plane_state->base.src, 2817 -(src_x << 16), -(src_y << 16)); 2818 2819 /* Rotate src coordinates to match rotated GTT view */ 2820 if (drm_rotation_90_or_270(rotation)) 2821 drm_rect_rotate(&plane_state->base.src, 2822 src_w << 16, src_h << 16, 2823 DRM_MODE_ROTATE_270); 2824 2825 for (i = 0; i < num_planes; i++) { 2826 unsigned int hsub = i ? fb->format->hsub : 1; 2827 unsigned int vsub = i ? fb->format->vsub : 1; 2828 unsigned int cpp = fb->format->cpp[i]; 2829 unsigned int tile_width, tile_height; 2830 unsigned int width, height; 2831 unsigned int pitch_tiles; 2832 unsigned int x, y; 2833 u32 offset; 2834 2835 intel_tile_dims(fb, i, &tile_width, &tile_height); 2836 2837 x = src_x / hsub; 2838 y = src_y / vsub; 2839 width = src_w / hsub; 2840 height = src_h / vsub; 2841 2842 /* 2843 * First pixel of the src viewport from the 2844 * start of the normal gtt mapping. 2845 */ 2846 x += intel_fb->normal[i].x; 2847 y += intel_fb->normal[i].y; 2848 2849 offset = intel_compute_aligned_offset(dev_priv, &x, &y, 2850 fb, i, fb->pitches[i], 2851 DRM_MODE_ROTATE_0, tile_size); 2852 offset /= tile_size; 2853 2854 info->plane[i].offset = offset; 2855 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], 2856 tile_width * cpp); 2857 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); 2858 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); 2859 2860 if (drm_rotation_90_or_270(rotation)) { 2861 struct drm_rect r; 2862 2863 /* rotate the x/y offsets to match the GTT view */ 2864 r.x1 = x; 2865 r.y1 = y; 2866 r.x2 = x + width; 2867 r.y2 = y + height; 2868 drm_rect_rotate(&r, 2869 info->plane[i].width * tile_width, 2870 info->plane[i].height * tile_height, 2871 DRM_MODE_ROTATE_270); 2872 x = r.x1; 2873 y = r.y1; 2874 2875 pitch_tiles = info->plane[i].height; 2876 plane_state->color_plane[i].stride = pitch_tiles * tile_height; 2877 2878 /* rotate the tile dimensions to match the GTT view */ 2879 swap(tile_width, tile_height); 2880 } else { 2881 pitch_tiles = info->plane[i].width; 2882 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp; 2883 } 2884 2885 /* 2886 * We only keep the x/y offsets, so push all of the 2887 * gtt offset into the x/y offsets. 2888 */ 2889 intel_adjust_tile_offset(&x, &y, 2890 tile_width, tile_height, 2891 tile_size, pitch_tiles, 2892 gtt_offset * tile_size, 0); 2893 2894 gtt_offset += info->plane[i].width * info->plane[i].height; 2895 2896 plane_state->color_plane[i].offset = 0; 2897 plane_state->color_plane[i].x = x; 2898 plane_state->color_plane[i].y = y; 2899 } 2900 } 2901 2902 static int 2903 intel_plane_compute_gtt(struct intel_plane_state *plane_state) 2904 { 2905 const struct intel_framebuffer *fb = 2906 to_intel_framebuffer(plane_state->base.fb); 2907 unsigned int rotation = plane_state->base.rotation; 2908 int i, num_planes; 2909 2910 if (!fb) 2911 return 0; 2912 2913 num_planes = fb->base.format->num_planes; 2914 2915 if (intel_plane_needs_remap(plane_state)) { 2916 intel_plane_remap_gtt(plane_state); 2917 2918 /* 2919 * Sometimes even remapping can't overcome 2920 * the stride limitations :( Can happen with 2921 * big plane sizes and suitably misaligned 2922 * offsets. 2923 */ 2924 return intel_plane_check_stride(plane_state); 2925 } 2926 2927 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation); 2928 2929 for (i = 0; i < num_planes; i++) { 2930 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation); 2931 plane_state->color_plane[i].offset = 0; 2932 2933 if (drm_rotation_90_or_270(rotation)) { 2934 plane_state->color_plane[i].x = fb->rotated[i].x; 2935 plane_state->color_plane[i].y = fb->rotated[i].y; 2936 } else { 2937 plane_state->color_plane[i].x = fb->normal[i].x; 2938 plane_state->color_plane[i].y = fb->normal[i].y; 2939 } 2940 } 2941 2942 /* Rotate src coordinates to match rotated GTT view */ 2943 if (drm_rotation_90_or_270(rotation)) 2944 drm_rect_rotate(&plane_state->base.src, 2945 fb->base.width << 16, fb->base.height << 16, 2946 DRM_MODE_ROTATE_270); 2947 2948 return intel_plane_check_stride(plane_state); 2949 } 2950 2951 static int i9xx_format_to_fourcc(int format) 2952 { 2953 switch (format) { 2954 case DISPPLANE_8BPP: 2955 return DRM_FORMAT_C8; 2956 case DISPPLANE_BGRX555: 2957 return DRM_FORMAT_XRGB1555; 2958 case DISPPLANE_BGRX565: 2959 return DRM_FORMAT_RGB565; 2960 default: 2961 case DISPPLANE_BGRX888: 2962 return DRM_FORMAT_XRGB8888; 2963 case DISPPLANE_RGBX888: 2964 return DRM_FORMAT_XBGR8888; 2965 case DISPPLANE_BGRX101010: 2966 return DRM_FORMAT_XRGB2101010; 2967 case DISPPLANE_RGBX101010: 2968 return DRM_FORMAT_XBGR2101010; 2969 } 2970 } 2971 2972 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 2973 { 2974 switch (format) { 2975 case PLANE_CTL_FORMAT_RGB_565: 2976 return DRM_FORMAT_RGB565; 2977 case PLANE_CTL_FORMAT_NV12: 2978 return DRM_FORMAT_NV12; 2979 case PLANE_CTL_FORMAT_P010: 2980 return DRM_FORMAT_P010; 2981 case PLANE_CTL_FORMAT_P012: 2982 return DRM_FORMAT_P012; 2983 case PLANE_CTL_FORMAT_P016: 2984 return DRM_FORMAT_P016; 2985 case PLANE_CTL_FORMAT_Y210: 2986 return DRM_FORMAT_Y210; 2987 case PLANE_CTL_FORMAT_Y212: 2988 return DRM_FORMAT_Y212; 2989 case PLANE_CTL_FORMAT_Y216: 2990 return DRM_FORMAT_Y216; 2991 case PLANE_CTL_FORMAT_Y410: 2992 return DRM_FORMAT_XVYU2101010; 2993 case PLANE_CTL_FORMAT_Y412: 2994 return DRM_FORMAT_XVYU12_16161616; 2995 case PLANE_CTL_FORMAT_Y416: 2996 return DRM_FORMAT_XVYU16161616; 2997 default: 2998 case PLANE_CTL_FORMAT_XRGB_8888: 2999 if (rgb_order) { 3000 if (alpha) 3001 return DRM_FORMAT_ABGR8888; 3002 else 3003 return DRM_FORMAT_XBGR8888; 3004 } else { 3005 if (alpha) 3006 return DRM_FORMAT_ARGB8888; 3007 else 3008 return DRM_FORMAT_XRGB8888; 3009 } 3010 case PLANE_CTL_FORMAT_XRGB_2101010: 3011 if (rgb_order) 3012 return DRM_FORMAT_XBGR2101010; 3013 else 3014 return DRM_FORMAT_XRGB2101010; 3015 case PLANE_CTL_FORMAT_XRGB_16161616F: 3016 if (rgb_order) { 3017 if (alpha) 3018 return DRM_FORMAT_ABGR16161616F; 3019 else 3020 return DRM_FORMAT_XBGR16161616F; 3021 } else { 3022 if (alpha) 3023 return DRM_FORMAT_ARGB16161616F; 3024 else 3025 return DRM_FORMAT_XRGB16161616F; 3026 } 3027 } 3028 } 3029 3030 static bool 3031 intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 3032 struct intel_initial_plane_config *plane_config) 3033 { 3034 struct drm_device *dev = crtc->base.dev; 3035 struct drm_i915_private *dev_priv = to_i915(dev); 3036 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 3037 struct drm_framebuffer *fb = &plane_config->fb->base; 3038 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); 3039 u32 size_aligned = round_up(plane_config->base + plane_config->size, 3040 PAGE_SIZE); 3041 struct drm_i915_gem_object *obj; 3042 bool ret = false; 3043 3044 size_aligned -= base_aligned; 3045 3046 if (plane_config->size == 0) 3047 return false; 3048 3049 /* If the FB is too big, just don't use it since fbdev is not very 3050 * important and we should probably use that space with FBC or other 3051 * features. */ 3052 if (size_aligned * 2 > dev_priv->stolen_usable_size) 3053 return false; 3054 3055 switch (fb->modifier) { 3056 case DRM_FORMAT_MOD_LINEAR: 3057 case I915_FORMAT_MOD_X_TILED: 3058 case I915_FORMAT_MOD_Y_TILED: 3059 break; 3060 default: 3061 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n", 3062 fb->modifier); 3063 return false; 3064 } 3065 3066 mutex_lock(&dev->struct_mutex); 3067 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv, 3068 base_aligned, 3069 base_aligned, 3070 size_aligned); 3071 mutex_unlock(&dev->struct_mutex); 3072 if (!obj) 3073 return false; 3074 3075 switch (plane_config->tiling) { 3076 case I915_TILING_NONE: 3077 break; 3078 case I915_TILING_X: 3079 case I915_TILING_Y: 3080 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling; 3081 break; 3082 default: 3083 MISSING_CASE(plane_config->tiling); 3084 goto out; 3085 } 3086 3087 mode_cmd.pixel_format = fb->format->format; 3088 mode_cmd.width = fb->width; 3089 mode_cmd.height = fb->height; 3090 mode_cmd.pitches[0] = fb->pitches[0]; 3091 mode_cmd.modifier[0] = fb->modifier; 3092 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 3093 3094 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) { 3095 DRM_DEBUG_KMS("intel fb init failed\n"); 3096 goto out; 3097 } 3098 3099 3100 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); 3101 ret = true; 3102 out: 3103 i915_gem_object_put(obj); 3104 return ret; 3105 } 3106 3107 static void 3108 intel_set_plane_visible(struct intel_crtc_state *crtc_state, 3109 struct intel_plane_state *plane_state, 3110 bool visible) 3111 { 3112 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 3113 3114 plane_state->base.visible = visible; 3115 3116 if (visible) 3117 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base); 3118 else 3119 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base); 3120 } 3121 3122 static void fixup_active_planes(struct intel_crtc_state *crtc_state) 3123 { 3124 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 3125 struct drm_plane *plane; 3126 3127 /* 3128 * Active_planes aliases if multiple "primary" or cursor planes 3129 * have been used on the same (or wrong) pipe. plane_mask uses 3130 * unique ids, hence we can use that to reconstruct active_planes. 3131 */ 3132 crtc_state->active_planes = 0; 3133 3134 drm_for_each_plane_mask(plane, &dev_priv->drm, 3135 crtc_state->base.plane_mask) 3136 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 3137 } 3138 3139 static void intel_plane_disable_noatomic(struct intel_crtc *crtc, 3140 struct intel_plane *plane) 3141 { 3142 struct intel_crtc_state *crtc_state = 3143 to_intel_crtc_state(crtc->base.state); 3144 struct intel_plane_state *plane_state = 3145 to_intel_plane_state(plane->base.state); 3146 3147 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 3148 plane->base.base.id, plane->base.name, 3149 crtc->base.base.id, crtc->base.name); 3150 3151 intel_set_plane_visible(crtc_state, plane_state, false); 3152 fixup_active_planes(crtc_state); 3153 crtc_state->data_rate[plane->id] = 0; 3154 3155 if (plane->id == PLANE_PRIMARY) 3156 intel_pre_disable_primary_noatomic(&crtc->base); 3157 3158 intel_disable_plane(plane, crtc_state); 3159 } 3160 3161 static struct intel_frontbuffer * 3162 to_intel_frontbuffer(struct drm_framebuffer *fb) 3163 { 3164 return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL; 3165 } 3166 3167 static void 3168 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 3169 struct intel_initial_plane_config *plane_config) 3170 { 3171 struct drm_device *dev = intel_crtc->base.dev; 3172 struct drm_i915_private *dev_priv = to_i915(dev); 3173 struct drm_crtc *c; 3174 struct drm_plane *primary = intel_crtc->base.primary; 3175 struct drm_plane_state *plane_state = primary->state; 3176 struct intel_plane *intel_plane = to_intel_plane(primary); 3177 struct intel_plane_state *intel_state = 3178 to_intel_plane_state(plane_state); 3179 struct drm_framebuffer *fb; 3180 3181 if (!plane_config->fb) 3182 return; 3183 3184 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 3185 fb = &plane_config->fb->base; 3186 goto valid_fb; 3187 } 3188 3189 kfree(plane_config->fb); 3190 3191 /* 3192 * Failed to alloc the obj, check to see if we should share 3193 * an fb with another CRTC instead 3194 */ 3195 for_each_crtc(dev, c) { 3196 struct intel_plane_state *state; 3197 3198 if (c == &intel_crtc->base) 3199 continue; 3200 3201 if (!to_intel_crtc(c)->active) 3202 continue; 3203 3204 state = to_intel_plane_state(c->primary->state); 3205 if (!state->vma) 3206 continue; 3207 3208 if (intel_plane_ggtt_offset(state) == plane_config->base) { 3209 fb = state->base.fb; 3210 drm_framebuffer_get(fb); 3211 goto valid_fb; 3212 } 3213 } 3214 3215 /* 3216 * We've failed to reconstruct the BIOS FB. Current display state 3217 * indicates that the primary plane is visible, but has a NULL FB, 3218 * which will lead to problems later if we don't fix it up. The 3219 * simplest solution is to just disable the primary plane now and 3220 * pretend the BIOS never had it enabled. 3221 */ 3222 intel_plane_disable_noatomic(intel_crtc, intel_plane); 3223 3224 return; 3225 3226 valid_fb: 3227 intel_state->base.rotation = plane_config->rotation; 3228 intel_fill_fb_ggtt_view(&intel_state->view, fb, 3229 intel_state->base.rotation); 3230 intel_state->color_plane[0].stride = 3231 intel_fb_pitch(fb, 0, intel_state->base.rotation); 3232 3233 mutex_lock(&dev->struct_mutex); 3234 intel_state->vma = 3235 intel_pin_and_fence_fb_obj(fb, 3236 &intel_state->view, 3237 intel_plane_uses_fence(intel_state), 3238 &intel_state->flags); 3239 mutex_unlock(&dev->struct_mutex); 3240 if (IS_ERR(intel_state->vma)) { 3241 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n", 3242 intel_crtc->pipe, PTR_ERR(intel_state->vma)); 3243 3244 intel_state->vma = NULL; 3245 drm_framebuffer_put(fb); 3246 return; 3247 } 3248 3249 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); 3250 3251 plane_state->src_x = 0; 3252 plane_state->src_y = 0; 3253 plane_state->src_w = fb->width << 16; 3254 plane_state->src_h = fb->height << 16; 3255 3256 plane_state->crtc_x = 0; 3257 plane_state->crtc_y = 0; 3258 plane_state->crtc_w = fb->width; 3259 plane_state->crtc_h = fb->height; 3260 3261 intel_state->base.src = drm_plane_state_src(plane_state); 3262 intel_state->base.dst = drm_plane_state_dest(plane_state); 3263 3264 if (plane_config->tiling) 3265 dev_priv->preserve_bios_swizzle = true; 3266 3267 plane_state->fb = fb; 3268 plane_state->crtc = &intel_crtc->base; 3269 3270 atomic_or(to_intel_plane(primary)->frontbuffer_bit, 3271 &to_intel_frontbuffer(fb)->bits); 3272 } 3273 3274 static int skl_max_plane_width(const struct drm_framebuffer *fb, 3275 int color_plane, 3276 unsigned int rotation) 3277 { 3278 int cpp = fb->format->cpp[color_plane]; 3279 3280 switch (fb->modifier) { 3281 case DRM_FORMAT_MOD_LINEAR: 3282 case I915_FORMAT_MOD_X_TILED: 3283 /* 3284 * Validated limit is 4k, but has 5k should 3285 * work apart from the following features: 3286 * - Ytile (already limited to 4k) 3287 * - FP16 (already limited to 4k) 3288 * - render compression (already limited to 4k) 3289 * - KVMR sprite and cursor (don't care) 3290 * - horizontal panning (TODO verify this) 3291 * - pipe and plane scaling (TODO verify this) 3292 */ 3293 if (cpp == 8) 3294 return 4096; 3295 else 3296 return 5120; 3297 case I915_FORMAT_MOD_Y_TILED_CCS: 3298 case I915_FORMAT_MOD_Yf_TILED_CCS: 3299 /* FIXME AUX plane? */ 3300 case I915_FORMAT_MOD_Y_TILED: 3301 case I915_FORMAT_MOD_Yf_TILED: 3302 if (cpp == 8) 3303 return 2048; 3304 else 3305 return 4096; 3306 default: 3307 MISSING_CASE(fb->modifier); 3308 return 2048; 3309 } 3310 } 3311 3312 static int glk_max_plane_width(const struct drm_framebuffer *fb, 3313 int color_plane, 3314 unsigned int rotation) 3315 { 3316 int cpp = fb->format->cpp[color_plane]; 3317 3318 switch (fb->modifier) { 3319 case DRM_FORMAT_MOD_LINEAR: 3320 case I915_FORMAT_MOD_X_TILED: 3321 if (cpp == 8) 3322 return 4096; 3323 else 3324 return 5120; 3325 case I915_FORMAT_MOD_Y_TILED_CCS: 3326 case I915_FORMAT_MOD_Yf_TILED_CCS: 3327 /* FIXME AUX plane? */ 3328 case I915_FORMAT_MOD_Y_TILED: 3329 case I915_FORMAT_MOD_Yf_TILED: 3330 if (cpp == 8) 3331 return 2048; 3332 else 3333 return 5120; 3334 default: 3335 MISSING_CASE(fb->modifier); 3336 return 2048; 3337 } 3338 } 3339 3340 static int icl_max_plane_width(const struct drm_framebuffer *fb, 3341 int color_plane, 3342 unsigned int rotation) 3343 { 3344 return 5120; 3345 } 3346 3347 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, 3348 int main_x, int main_y, u32 main_offset) 3349 { 3350 const struct drm_framebuffer *fb = plane_state->base.fb; 3351 int hsub = fb->format->hsub; 3352 int vsub = fb->format->vsub; 3353 int aux_x = plane_state->color_plane[1].x; 3354 int aux_y = plane_state->color_plane[1].y; 3355 u32 aux_offset = plane_state->color_plane[1].offset; 3356 u32 alignment = intel_surf_alignment(fb, 1); 3357 3358 while (aux_offset >= main_offset && aux_y <= main_y) { 3359 int x, y; 3360 3361 if (aux_x == main_x && aux_y == main_y) 3362 break; 3363 3364 if (aux_offset == 0) 3365 break; 3366 3367 x = aux_x / hsub; 3368 y = aux_y / vsub; 3369 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1, 3370 aux_offset, aux_offset - alignment); 3371 aux_x = x * hsub + aux_x % hsub; 3372 aux_y = y * vsub + aux_y % vsub; 3373 } 3374 3375 if (aux_x != main_x || aux_y != main_y) 3376 return false; 3377 3378 plane_state->color_plane[1].offset = aux_offset; 3379 plane_state->color_plane[1].x = aux_x; 3380 plane_state->color_plane[1].y = aux_y; 3381 3382 return true; 3383 } 3384 3385 static int skl_check_main_surface(struct intel_plane_state *plane_state) 3386 { 3387 struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev); 3388 const struct drm_framebuffer *fb = plane_state->base.fb; 3389 unsigned int rotation = plane_state->base.rotation; 3390 int x = plane_state->base.src.x1 >> 16; 3391 int y = plane_state->base.src.y1 >> 16; 3392 int w = drm_rect_width(&plane_state->base.src) >> 16; 3393 int h = drm_rect_height(&plane_state->base.src) >> 16; 3394 int max_width; 3395 int max_height = 4096; 3396 u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset; 3397 3398 if (INTEL_GEN(dev_priv) >= 11) 3399 max_width = icl_max_plane_width(fb, 0, rotation); 3400 else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 3401 max_width = glk_max_plane_width(fb, 0, rotation); 3402 else 3403 max_width = skl_max_plane_width(fb, 0, rotation); 3404 3405 if (w > max_width || h > max_height) { 3406 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n", 3407 w, h, max_width, max_height); 3408 return -EINVAL; 3409 } 3410 3411 intel_add_fb_offsets(&x, &y, plane_state, 0); 3412 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0); 3413 alignment = intel_surf_alignment(fb, 0); 3414 3415 /* 3416 * AUX surface offset is specified as the distance from the 3417 * main surface offset, and it must be non-negative. Make 3418 * sure that is what we will get. 3419 */ 3420 if (offset > aux_offset) 3421 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3422 offset, aux_offset & ~(alignment - 1)); 3423 3424 /* 3425 * When using an X-tiled surface, the plane blows up 3426 * if the x offset + width exceed the stride. 3427 * 3428 * TODO: linear and Y-tiled seem fine, Yf untested, 3429 */ 3430 if (fb->modifier == I915_FORMAT_MOD_X_TILED) { 3431 int cpp = fb->format->cpp[0]; 3432 3433 while ((x + w) * cpp > plane_state->color_plane[0].stride) { 3434 if (offset == 0) { 3435 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n"); 3436 return -EINVAL; 3437 } 3438 3439 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3440 offset, offset - alignment); 3441 } 3442 } 3443 3444 /* 3445 * CCS AUX surface doesn't have its own x/y offsets, we must make sure 3446 * they match with the main surface x/y offsets. 3447 */ 3448 if (is_ccs_modifier(fb->modifier)) { 3449 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) { 3450 if (offset == 0) 3451 break; 3452 3453 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3454 offset, offset - alignment); 3455 } 3456 3457 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) { 3458 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n"); 3459 return -EINVAL; 3460 } 3461 } 3462 3463 plane_state->color_plane[0].offset = offset; 3464 plane_state->color_plane[0].x = x; 3465 plane_state->color_plane[0].y = y; 3466 3467 /* 3468 * Put the final coordinates back so that the src 3469 * coordinate checks will see the right values. 3470 */ 3471 drm_rect_translate(&plane_state->base.src, 3472 (x << 16) - plane_state->base.src.x1, 3473 (y << 16) - plane_state->base.src.y1); 3474 3475 return 0; 3476 } 3477 3478 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) 3479 { 3480 const struct drm_framebuffer *fb = plane_state->base.fb; 3481 unsigned int rotation = plane_state->base.rotation; 3482 int max_width = skl_max_plane_width(fb, 1, rotation); 3483 int max_height = 4096; 3484 int x = plane_state->base.src.x1 >> 17; 3485 int y = plane_state->base.src.y1 >> 17; 3486 int w = drm_rect_width(&plane_state->base.src) >> 17; 3487 int h = drm_rect_height(&plane_state->base.src) >> 17; 3488 u32 offset; 3489 3490 intel_add_fb_offsets(&x, &y, plane_state, 1); 3491 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1); 3492 3493 /* FIXME not quite sure how/if these apply to the chroma plane */ 3494 if (w > max_width || h > max_height) { 3495 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n", 3496 w, h, max_width, max_height); 3497 return -EINVAL; 3498 } 3499 3500 plane_state->color_plane[1].offset = offset; 3501 plane_state->color_plane[1].x = x; 3502 plane_state->color_plane[1].y = y; 3503 3504 return 0; 3505 } 3506 3507 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) 3508 { 3509 const struct drm_framebuffer *fb = plane_state->base.fb; 3510 int src_x = plane_state->base.src.x1 >> 16; 3511 int src_y = plane_state->base.src.y1 >> 16; 3512 int hsub = fb->format->hsub; 3513 int vsub = fb->format->vsub; 3514 int x = src_x / hsub; 3515 int y = src_y / vsub; 3516 u32 offset; 3517 3518 intel_add_fb_offsets(&x, &y, plane_state, 1); 3519 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1); 3520 3521 plane_state->color_plane[1].offset = offset; 3522 plane_state->color_plane[1].x = x * hsub + src_x % hsub; 3523 plane_state->color_plane[1].y = y * vsub + src_y % vsub; 3524 3525 return 0; 3526 } 3527 3528 int skl_check_plane_surface(struct intel_plane_state *plane_state) 3529 { 3530 const struct drm_framebuffer *fb = plane_state->base.fb; 3531 int ret; 3532 3533 ret = intel_plane_compute_gtt(plane_state); 3534 if (ret) 3535 return ret; 3536 3537 if (!plane_state->base.visible) 3538 return 0; 3539 3540 /* 3541 * Handle the AUX surface first since 3542 * the main surface setup depends on it. 3543 */ 3544 if (is_planar_yuv_format(fb->format->format)) { 3545 ret = skl_check_nv12_aux_surface(plane_state); 3546 if (ret) 3547 return ret; 3548 } else if (is_ccs_modifier(fb->modifier)) { 3549 ret = skl_check_ccs_aux_surface(plane_state); 3550 if (ret) 3551 return ret; 3552 } else { 3553 plane_state->color_plane[1].offset = ~0xfff; 3554 plane_state->color_plane[1].x = 0; 3555 plane_state->color_plane[1].y = 0; 3556 } 3557 3558 ret = skl_check_main_surface(plane_state); 3559 if (ret) 3560 return ret; 3561 3562 return 0; 3563 } 3564 3565 unsigned int 3566 i9xx_plane_max_stride(struct intel_plane *plane, 3567 u32 pixel_format, u64 modifier, 3568 unsigned int rotation) 3569 { 3570 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3571 3572 if (!HAS_GMCH(dev_priv)) { 3573 return 32*1024; 3574 } else if (INTEL_GEN(dev_priv) >= 4) { 3575 if (modifier == I915_FORMAT_MOD_X_TILED) 3576 return 16*1024; 3577 else 3578 return 32*1024; 3579 } else if (INTEL_GEN(dev_priv) >= 3) { 3580 if (modifier == I915_FORMAT_MOD_X_TILED) 3581 return 8*1024; 3582 else 3583 return 16*1024; 3584 } else { 3585 if (plane->i9xx_plane == PLANE_C) 3586 return 4*1024; 3587 else 3588 return 8*1024; 3589 } 3590 } 3591 3592 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 3593 { 3594 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 3595 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3596 u32 dspcntr = 0; 3597 3598 if (crtc_state->gamma_enable) 3599 dspcntr |= DISPPLANE_GAMMA_ENABLE; 3600 3601 if (crtc_state->csc_enable) 3602 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 3603 3604 if (INTEL_GEN(dev_priv) < 5) 3605 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); 3606 3607 return dspcntr; 3608 } 3609 3610 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, 3611 const struct intel_plane_state *plane_state) 3612 { 3613 struct drm_i915_private *dev_priv = 3614 to_i915(plane_state->base.plane->dev); 3615 const struct drm_framebuffer *fb = plane_state->base.fb; 3616 unsigned int rotation = plane_state->base.rotation; 3617 u32 dspcntr; 3618 3619 dspcntr = DISPLAY_PLANE_ENABLE; 3620 3621 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) || 3622 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 3623 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 3624 3625 switch (fb->format->format) { 3626 case DRM_FORMAT_C8: 3627 dspcntr |= DISPPLANE_8BPP; 3628 break; 3629 case DRM_FORMAT_XRGB1555: 3630 dspcntr |= DISPPLANE_BGRX555; 3631 break; 3632 case DRM_FORMAT_RGB565: 3633 dspcntr |= DISPPLANE_BGRX565; 3634 break; 3635 case DRM_FORMAT_XRGB8888: 3636 dspcntr |= DISPPLANE_BGRX888; 3637 break; 3638 case DRM_FORMAT_XBGR8888: 3639 dspcntr |= DISPPLANE_RGBX888; 3640 break; 3641 case DRM_FORMAT_XRGB2101010: 3642 dspcntr |= DISPPLANE_BGRX101010; 3643 break; 3644 case DRM_FORMAT_XBGR2101010: 3645 dspcntr |= DISPPLANE_RGBX101010; 3646 break; 3647 default: 3648 MISSING_CASE(fb->format->format); 3649 return 0; 3650 } 3651 3652 if (INTEL_GEN(dev_priv) >= 4 && 3653 fb->modifier == I915_FORMAT_MOD_X_TILED) 3654 dspcntr |= DISPPLANE_TILED; 3655 3656 if (rotation & DRM_MODE_ROTATE_180) 3657 dspcntr |= DISPPLANE_ROTATE_180; 3658 3659 if (rotation & DRM_MODE_REFLECT_X) 3660 dspcntr |= DISPPLANE_MIRROR; 3661 3662 return dspcntr; 3663 } 3664 3665 int i9xx_check_plane_surface(struct intel_plane_state *plane_state) 3666 { 3667 struct drm_i915_private *dev_priv = 3668 to_i915(plane_state->base.plane->dev); 3669 int src_x, src_y; 3670 u32 offset; 3671 int ret; 3672 3673 ret = intel_plane_compute_gtt(plane_state); 3674 if (ret) 3675 return ret; 3676 3677 if (!plane_state->base.visible) 3678 return 0; 3679 3680 src_x = plane_state->base.src.x1 >> 16; 3681 src_y = plane_state->base.src.y1 >> 16; 3682 3683 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 3684 3685 if (INTEL_GEN(dev_priv) >= 4) 3686 offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 3687 plane_state, 0); 3688 else 3689 offset = 0; 3690 3691 /* 3692 * Put the final coordinates back so that the src 3693 * coordinate checks will see the right values. 3694 */ 3695 drm_rect_translate(&plane_state->base.src, 3696 (src_x << 16) - plane_state->base.src.x1, 3697 (src_y << 16) - plane_state->base.src.y1); 3698 3699 /* HSW/BDW do this automagically in hardware */ 3700 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { 3701 unsigned int rotation = plane_state->base.rotation; 3702 int src_w = drm_rect_width(&plane_state->base.src) >> 16; 3703 int src_h = drm_rect_height(&plane_state->base.src) >> 16; 3704 3705 if (rotation & DRM_MODE_ROTATE_180) { 3706 src_x += src_w - 1; 3707 src_y += src_h - 1; 3708 } else if (rotation & DRM_MODE_REFLECT_X) { 3709 src_x += src_w - 1; 3710 } 3711 } 3712 3713 plane_state->color_plane[0].offset = offset; 3714 plane_state->color_plane[0].x = src_x; 3715 plane_state->color_plane[0].y = src_y; 3716 3717 return 0; 3718 } 3719 3720 static bool i9xx_plane_has_windowing(struct intel_plane *plane) 3721 { 3722 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3723 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 3724 3725 if (IS_CHERRYVIEW(dev_priv)) 3726 return i9xx_plane == PLANE_B; 3727 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 3728 return false; 3729 else if (IS_GEN(dev_priv, 4)) 3730 return i9xx_plane == PLANE_C; 3731 else 3732 return i9xx_plane == PLANE_B || 3733 i9xx_plane == PLANE_C; 3734 } 3735 3736 static int 3737 i9xx_plane_check(struct intel_crtc_state *crtc_state, 3738 struct intel_plane_state *plane_state) 3739 { 3740 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 3741 int ret; 3742 3743 ret = chv_plane_check_rotation(plane_state); 3744 if (ret) 3745 return ret; 3746 3747 ret = drm_atomic_helper_check_plane_state(&plane_state->base, 3748 &crtc_state->base, 3749 DRM_PLANE_HELPER_NO_SCALING, 3750 DRM_PLANE_HELPER_NO_SCALING, 3751 i9xx_plane_has_windowing(plane), 3752 true); 3753 if (ret) 3754 return ret; 3755 3756 ret = i9xx_check_plane_surface(plane_state); 3757 if (ret) 3758 return ret; 3759 3760 if (!plane_state->base.visible) 3761 return 0; 3762 3763 ret = intel_plane_check_src_coordinates(plane_state); 3764 if (ret) 3765 return ret; 3766 3767 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state); 3768 3769 return 0; 3770 } 3771 3772 static void i9xx_update_plane(struct intel_plane *plane, 3773 const struct intel_crtc_state *crtc_state, 3774 const struct intel_plane_state *plane_state) 3775 { 3776 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3777 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 3778 u32 linear_offset; 3779 int x = plane_state->color_plane[0].x; 3780 int y = plane_state->color_plane[0].y; 3781 int crtc_x = plane_state->base.dst.x1; 3782 int crtc_y = plane_state->base.dst.y1; 3783 int crtc_w = drm_rect_width(&plane_state->base.dst); 3784 int crtc_h = drm_rect_height(&plane_state->base.dst); 3785 unsigned long irqflags; 3786 u32 dspaddr_offset; 3787 u32 dspcntr; 3788 3789 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); 3790 3791 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 3792 3793 if (INTEL_GEN(dev_priv) >= 4) 3794 dspaddr_offset = plane_state->color_plane[0].offset; 3795 else 3796 dspaddr_offset = linear_offset; 3797 3798 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3799 3800 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride); 3801 3802 if (INTEL_GEN(dev_priv) < 4) { 3803 /* 3804 * PLANE_A doesn't actually have a full window 3805 * generator but let's assume we still need to 3806 * program whatever is there. 3807 */ 3808 I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x); 3809 I915_WRITE_FW(DSPSIZE(i9xx_plane), 3810 ((crtc_h - 1) << 16) | (crtc_w - 1)); 3811 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { 3812 I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x); 3813 I915_WRITE_FW(PRIMSIZE(i9xx_plane), 3814 ((crtc_h - 1) << 16) | (crtc_w - 1)); 3815 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0); 3816 } 3817 3818 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 3819 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x); 3820 } else if (INTEL_GEN(dev_priv) >= 4) { 3821 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset); 3822 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x); 3823 } 3824 3825 /* 3826 * The control register self-arms if the plane was previously 3827 * disabled. Try to make the plane enable atomic by writing 3828 * the control register just before the surface register. 3829 */ 3830 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr); 3831 if (INTEL_GEN(dev_priv) >= 4) 3832 I915_WRITE_FW(DSPSURF(i9xx_plane), 3833 intel_plane_ggtt_offset(plane_state) + 3834 dspaddr_offset); 3835 else 3836 I915_WRITE_FW(DSPADDR(i9xx_plane), 3837 intel_plane_ggtt_offset(plane_state) + 3838 dspaddr_offset); 3839 3840 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3841 } 3842 3843 static void i9xx_disable_plane(struct intel_plane *plane, 3844 const struct intel_crtc_state *crtc_state) 3845 { 3846 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3847 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 3848 unsigned long irqflags; 3849 u32 dspcntr; 3850 3851 /* 3852 * DSPCNTR pipe gamma enable on g4x+ and pipe csc 3853 * enable on ilk+ affect the pipe bottom color as 3854 * well, so we must configure them even if the plane 3855 * is disabled. 3856 * 3857 * On pre-g4x there is no way to gamma correct the 3858 * pipe bottom color but we'll keep on doing this 3859 * anyway so that the crtc state readout works correctly. 3860 */ 3861 dspcntr = i9xx_plane_ctl_crtc(crtc_state); 3862 3863 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3864 3865 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr); 3866 if (INTEL_GEN(dev_priv) >= 4) 3867 I915_WRITE_FW(DSPSURF(i9xx_plane), 0); 3868 else 3869 I915_WRITE_FW(DSPADDR(i9xx_plane), 0); 3870 3871 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3872 } 3873 3874 static bool i9xx_plane_get_hw_state(struct intel_plane *plane, 3875 enum pipe *pipe) 3876 { 3877 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3878 enum intel_display_power_domain power_domain; 3879 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 3880 intel_wakeref_t wakeref; 3881 bool ret; 3882 u32 val; 3883 3884 /* 3885 * Not 100% correct for planes that can move between pipes, 3886 * but that's only the case for gen2-4 which don't have any 3887 * display power wells. 3888 */ 3889 power_domain = POWER_DOMAIN_PIPE(plane->pipe); 3890 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 3891 if (!wakeref) 3892 return false; 3893 3894 val = I915_READ(DSPCNTR(i9xx_plane)); 3895 3896 ret = val & DISPLAY_PLANE_ENABLE; 3897 3898 if (INTEL_GEN(dev_priv) >= 5) 3899 *pipe = plane->pipe; 3900 else 3901 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 3902 DISPPLANE_SEL_PIPE_SHIFT; 3903 3904 intel_display_power_put(dev_priv, power_domain, wakeref); 3905 3906 return ret; 3907 } 3908 3909 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 3910 { 3911 struct drm_device *dev = intel_crtc->base.dev; 3912 struct drm_i915_private *dev_priv = to_i915(dev); 3913 3914 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); 3915 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 3916 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); 3917 } 3918 3919 /* 3920 * This function detaches (aka. unbinds) unused scalers in hardware 3921 */ 3922 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state) 3923 { 3924 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 3925 const struct intel_crtc_scaler_state *scaler_state = 3926 &crtc_state->scaler_state; 3927 int i; 3928 3929 /* loop through and disable scalers that aren't in use */ 3930 for (i = 0; i < intel_crtc->num_scalers; i++) { 3931 if (!scaler_state->scalers[i].in_use) 3932 skl_detach_scaler(intel_crtc, i); 3933 } 3934 } 3935 3936 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb, 3937 int color_plane, unsigned int rotation) 3938 { 3939 /* 3940 * The stride is either expressed as a multiple of 64 bytes chunks for 3941 * linear buffers or in number of tiles for tiled buffers. 3942 */ 3943 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) 3944 return 64; 3945 else if (drm_rotation_90_or_270(rotation)) 3946 return intel_tile_height(fb, color_plane); 3947 else 3948 return intel_tile_width_bytes(fb, color_plane); 3949 } 3950 3951 u32 skl_plane_stride(const struct intel_plane_state *plane_state, 3952 int color_plane) 3953 { 3954 const struct drm_framebuffer *fb = plane_state->base.fb; 3955 unsigned int rotation = plane_state->base.rotation; 3956 u32 stride = plane_state->color_plane[color_plane].stride; 3957 3958 if (color_plane >= fb->format->num_planes) 3959 return 0; 3960 3961 return stride / skl_plane_stride_mult(fb, color_plane, rotation); 3962 } 3963 3964 static u32 skl_plane_ctl_format(u32 pixel_format) 3965 { 3966 switch (pixel_format) { 3967 case DRM_FORMAT_C8: 3968 return PLANE_CTL_FORMAT_INDEXED; 3969 case DRM_FORMAT_RGB565: 3970 return PLANE_CTL_FORMAT_RGB_565; 3971 case DRM_FORMAT_XBGR8888: 3972 case DRM_FORMAT_ABGR8888: 3973 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; 3974 case DRM_FORMAT_XRGB8888: 3975 case DRM_FORMAT_ARGB8888: 3976 return PLANE_CTL_FORMAT_XRGB_8888; 3977 case DRM_FORMAT_XBGR2101010: 3978 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX; 3979 case DRM_FORMAT_XRGB2101010: 3980 return PLANE_CTL_FORMAT_XRGB_2101010; 3981 case DRM_FORMAT_XBGR16161616F: 3982 case DRM_FORMAT_ABGR16161616F: 3983 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX; 3984 case DRM_FORMAT_XRGB16161616F: 3985 case DRM_FORMAT_ARGB16161616F: 3986 return PLANE_CTL_FORMAT_XRGB_16161616F; 3987 case DRM_FORMAT_YUYV: 3988 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 3989 case DRM_FORMAT_YVYU: 3990 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; 3991 case DRM_FORMAT_UYVY: 3992 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; 3993 case DRM_FORMAT_VYUY: 3994 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 3995 case DRM_FORMAT_NV12: 3996 return PLANE_CTL_FORMAT_NV12; 3997 case DRM_FORMAT_P010: 3998 return PLANE_CTL_FORMAT_P010; 3999 case DRM_FORMAT_P012: 4000 return PLANE_CTL_FORMAT_P012; 4001 case DRM_FORMAT_P016: 4002 return PLANE_CTL_FORMAT_P016; 4003 case DRM_FORMAT_Y210: 4004 return PLANE_CTL_FORMAT_Y210; 4005 case DRM_FORMAT_Y212: 4006 return PLANE_CTL_FORMAT_Y212; 4007 case DRM_FORMAT_Y216: 4008 return PLANE_CTL_FORMAT_Y216; 4009 case DRM_FORMAT_XVYU2101010: 4010 return PLANE_CTL_FORMAT_Y410; 4011 case DRM_FORMAT_XVYU12_16161616: 4012 return PLANE_CTL_FORMAT_Y412; 4013 case DRM_FORMAT_XVYU16161616: 4014 return PLANE_CTL_FORMAT_Y416; 4015 default: 4016 MISSING_CASE(pixel_format); 4017 } 4018 4019 return 0; 4020 } 4021 4022 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state) 4023 { 4024 if (!plane_state->base.fb->format->has_alpha) 4025 return PLANE_CTL_ALPHA_DISABLE; 4026 4027 switch (plane_state->base.pixel_blend_mode) { 4028 case DRM_MODE_BLEND_PIXEL_NONE: 4029 return PLANE_CTL_ALPHA_DISABLE; 4030 case DRM_MODE_BLEND_PREMULTI: 4031 return PLANE_CTL_ALPHA_SW_PREMULTIPLY; 4032 case DRM_MODE_BLEND_COVERAGE: 4033 return PLANE_CTL_ALPHA_HW_PREMULTIPLY; 4034 default: 4035 MISSING_CASE(plane_state->base.pixel_blend_mode); 4036 return PLANE_CTL_ALPHA_DISABLE; 4037 } 4038 } 4039 4040 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state) 4041 { 4042 if (!plane_state->base.fb->format->has_alpha) 4043 return PLANE_COLOR_ALPHA_DISABLE; 4044 4045 switch (plane_state->base.pixel_blend_mode) { 4046 case DRM_MODE_BLEND_PIXEL_NONE: 4047 return PLANE_COLOR_ALPHA_DISABLE; 4048 case DRM_MODE_BLEND_PREMULTI: 4049 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY; 4050 case DRM_MODE_BLEND_COVERAGE: 4051 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY; 4052 default: 4053 MISSING_CASE(plane_state->base.pixel_blend_mode); 4054 return PLANE_COLOR_ALPHA_DISABLE; 4055 } 4056 } 4057 4058 static u32 skl_plane_ctl_tiling(u64 fb_modifier) 4059 { 4060 switch (fb_modifier) { 4061 case DRM_FORMAT_MOD_LINEAR: 4062 break; 4063 case I915_FORMAT_MOD_X_TILED: 4064 return PLANE_CTL_TILED_X; 4065 case I915_FORMAT_MOD_Y_TILED: 4066 return PLANE_CTL_TILED_Y; 4067 case I915_FORMAT_MOD_Y_TILED_CCS: 4068 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; 4069 case I915_FORMAT_MOD_Yf_TILED: 4070 return PLANE_CTL_TILED_YF; 4071 case I915_FORMAT_MOD_Yf_TILED_CCS: 4072 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; 4073 default: 4074 MISSING_CASE(fb_modifier); 4075 } 4076 4077 return 0; 4078 } 4079 4080 static u32 skl_plane_ctl_rotate(unsigned int rotate) 4081 { 4082 switch (rotate) { 4083 case DRM_MODE_ROTATE_0: 4084 break; 4085 /* 4086 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 4087 * while i915 HW rotation is clockwise, thats why this swapping. 4088 */ 4089 case DRM_MODE_ROTATE_90: 4090 return PLANE_CTL_ROTATE_270; 4091 case DRM_MODE_ROTATE_180: 4092 return PLANE_CTL_ROTATE_180; 4093 case DRM_MODE_ROTATE_270: 4094 return PLANE_CTL_ROTATE_90; 4095 default: 4096 MISSING_CASE(rotate); 4097 } 4098 4099 return 0; 4100 } 4101 4102 static u32 cnl_plane_ctl_flip(unsigned int reflect) 4103 { 4104 switch (reflect) { 4105 case 0: 4106 break; 4107 case DRM_MODE_REFLECT_X: 4108 return PLANE_CTL_FLIP_HORIZONTAL; 4109 case DRM_MODE_REFLECT_Y: 4110 default: 4111 MISSING_CASE(reflect); 4112 } 4113 4114 return 0; 4115 } 4116 4117 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 4118 { 4119 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 4120 u32 plane_ctl = 0; 4121 4122 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 4123 return plane_ctl; 4124 4125 if (crtc_state->gamma_enable) 4126 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE; 4127 4128 if (crtc_state->csc_enable) 4129 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE; 4130 4131 return plane_ctl; 4132 } 4133 4134 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, 4135 const struct intel_plane_state *plane_state) 4136 { 4137 struct drm_i915_private *dev_priv = 4138 to_i915(plane_state->base.plane->dev); 4139 const struct drm_framebuffer *fb = plane_state->base.fb; 4140 unsigned int rotation = plane_state->base.rotation; 4141 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 4142 u32 plane_ctl; 4143 4144 plane_ctl = PLANE_CTL_ENABLE; 4145 4146 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) { 4147 plane_ctl |= skl_plane_ctl_alpha(plane_state); 4148 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 4149 4150 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) 4151 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709; 4152 4153 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 4154 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE; 4155 } 4156 4157 plane_ctl |= skl_plane_ctl_format(fb->format->format); 4158 plane_ctl |= skl_plane_ctl_tiling(fb->modifier); 4159 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK); 4160 4161 if (INTEL_GEN(dev_priv) >= 10) 4162 plane_ctl |= cnl_plane_ctl_flip(rotation & 4163 DRM_MODE_REFLECT_MASK); 4164 4165 if (key->flags & I915_SET_COLORKEY_DESTINATION) 4166 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; 4167 else if (key->flags & I915_SET_COLORKEY_SOURCE) 4168 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; 4169 4170 return plane_ctl; 4171 } 4172 4173 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state) 4174 { 4175 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 4176 u32 plane_color_ctl = 0; 4177 4178 if (INTEL_GEN(dev_priv) >= 11) 4179 return plane_color_ctl; 4180 4181 if (crtc_state->gamma_enable) 4182 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE; 4183 4184 if (crtc_state->csc_enable) 4185 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; 4186 4187 return plane_color_ctl; 4188 } 4189 4190 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, 4191 const struct intel_plane_state *plane_state) 4192 { 4193 struct drm_i915_private *dev_priv = 4194 to_i915(plane_state->base.plane->dev); 4195 const struct drm_framebuffer *fb = plane_state->base.fb; 4196 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 4197 u32 plane_color_ctl = 0; 4198 4199 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; 4200 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state); 4201 4202 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) { 4203 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) 4204 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; 4205 else 4206 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709; 4207 4208 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 4209 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; 4210 } else if (fb->format->is_yuv) { 4211 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE; 4212 } 4213 4214 return plane_color_ctl; 4215 } 4216 4217 static int 4218 __intel_display_resume(struct drm_device *dev, 4219 struct drm_atomic_state *state, 4220 struct drm_modeset_acquire_ctx *ctx) 4221 { 4222 struct drm_crtc_state *crtc_state; 4223 struct drm_crtc *crtc; 4224 int i, ret; 4225 4226 intel_modeset_setup_hw_state(dev, ctx); 4227 i915_redisable_vga(to_i915(dev)); 4228 4229 if (!state) 4230 return 0; 4231 4232 /* 4233 * We've duplicated the state, pointers to the old state are invalid. 4234 * 4235 * Don't attempt to use the old state until we commit the duplicated state. 4236 */ 4237 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 4238 /* 4239 * Force recalculation even if we restore 4240 * current state. With fast modeset this may not result 4241 * in a modeset when the state is compatible. 4242 */ 4243 crtc_state->mode_changed = true; 4244 } 4245 4246 /* ignore any reset values/BIOS leftovers in the WM registers */ 4247 if (!HAS_GMCH(to_i915(dev))) 4248 to_intel_atomic_state(state)->skip_intermediate_wm = true; 4249 4250 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 4251 4252 WARN_ON(ret == -EDEADLK); 4253 return ret; 4254 } 4255 4256 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) 4257 { 4258 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display && 4259 intel_has_gpu_reset(dev_priv)); 4260 } 4261 4262 void intel_prepare_reset(struct drm_i915_private *dev_priv) 4263 { 4264 struct drm_device *dev = &dev_priv->drm; 4265 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 4266 struct drm_atomic_state *state; 4267 int ret; 4268 4269 /* reset doesn't touch the display */ 4270 if (!i915_modparams.force_reset_modeset_test && 4271 !gpu_reset_clobbers_display(dev_priv)) 4272 return; 4273 4274 /* We have a modeset vs reset deadlock, defensively unbreak it. */ 4275 set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 4276 smp_mb__after_atomic(); 4277 wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET); 4278 4279 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) { 4280 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n"); 4281 intel_gt_set_wedged(&dev_priv->gt); 4282 } 4283 4284 /* 4285 * Need mode_config.mutex so that we don't 4286 * trample ongoing ->detect() and whatnot. 4287 */ 4288 mutex_lock(&dev->mode_config.mutex); 4289 drm_modeset_acquire_init(ctx, 0); 4290 while (1) { 4291 ret = drm_modeset_lock_all_ctx(dev, ctx); 4292 if (ret != -EDEADLK) 4293 break; 4294 4295 drm_modeset_backoff(ctx); 4296 } 4297 /* 4298 * Disabling the crtcs gracefully seems nicer. Also the 4299 * g33 docs say we should at least disable all the planes. 4300 */ 4301 state = drm_atomic_helper_duplicate_state(dev, ctx); 4302 if (IS_ERR(state)) { 4303 ret = PTR_ERR(state); 4304 DRM_ERROR("Duplicating state failed with %i\n", ret); 4305 return; 4306 } 4307 4308 ret = drm_atomic_helper_disable_all(dev, ctx); 4309 if (ret) { 4310 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 4311 drm_atomic_state_put(state); 4312 return; 4313 } 4314 4315 dev_priv->modeset_restore_state = state; 4316 state->acquire_ctx = ctx; 4317 } 4318 4319 void intel_finish_reset(struct drm_i915_private *dev_priv) 4320 { 4321 struct drm_device *dev = &dev_priv->drm; 4322 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 4323 struct drm_atomic_state *state; 4324 int ret; 4325 4326 /* reset doesn't touch the display */ 4327 if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) 4328 return; 4329 4330 state = fetch_and_zero(&dev_priv->modeset_restore_state); 4331 if (!state) 4332 goto unlock; 4333 4334 /* reset doesn't touch the display */ 4335 if (!gpu_reset_clobbers_display(dev_priv)) { 4336 /* for testing only restore the display */ 4337 ret = __intel_display_resume(dev, state, ctx); 4338 if (ret) 4339 DRM_ERROR("Restoring old state failed with %i\n", ret); 4340 } else { 4341 /* 4342 * The display has been reset as well, 4343 * so need a full re-initialization. 4344 */ 4345 intel_pps_unlock_regs_wa(dev_priv); 4346 intel_modeset_init_hw(dev); 4347 intel_init_clock_gating(dev_priv); 4348 4349 spin_lock_irq(&dev_priv->irq_lock); 4350 if (dev_priv->display.hpd_irq_setup) 4351 dev_priv->display.hpd_irq_setup(dev_priv); 4352 spin_unlock_irq(&dev_priv->irq_lock); 4353 4354 ret = __intel_display_resume(dev, state, ctx); 4355 if (ret) 4356 DRM_ERROR("Restoring old state failed with %i\n", ret); 4357 4358 intel_hpd_init(dev_priv); 4359 } 4360 4361 drm_atomic_state_put(state); 4362 unlock: 4363 drm_modeset_drop_locks(ctx); 4364 drm_modeset_acquire_fini(ctx); 4365 mutex_unlock(&dev->mode_config.mutex); 4366 4367 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 4368 } 4369 4370 static void icl_set_pipe_chicken(struct intel_crtc *crtc) 4371 { 4372 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4373 enum pipe pipe = crtc->pipe; 4374 u32 tmp; 4375 4376 tmp = I915_READ(PIPE_CHICKEN(pipe)); 4377 4378 /* 4379 * Display WA #1153: icl 4380 * enable hardware to bypass the alpha math 4381 * and rounding for per-pixel values 00 and 0xff 4382 */ 4383 tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 4384 /* 4385 * Display WA # 1605353570: icl 4386 * Set the pixel rounding bit to 1 for allowing 4387 * passthrough of Frame buffer pixels unmodified 4388 * across pipe 4389 */ 4390 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 4391 I915_WRITE(PIPE_CHICKEN(pipe), tmp); 4392 } 4393 4394 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state, 4395 const struct intel_crtc_state *new_crtc_state) 4396 { 4397 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 4398 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4399 4400 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ 4401 crtc->base.mode = new_crtc_state->base.mode; 4402 4403 /* 4404 * Update pipe size and adjust fitter if needed: the reason for this is 4405 * that in compute_mode_changes we check the native mode (not the pfit 4406 * mode) to see if we can flip rather than do a full mode set. In the 4407 * fastboot case, we'll flip, but if we don't update the pipesrc and 4408 * pfit state, we'll end up with a big fb scanned out into the wrong 4409 * sized surface. 4410 */ 4411 4412 I915_WRITE(PIPESRC(crtc->pipe), 4413 ((new_crtc_state->pipe_src_w - 1) << 16) | 4414 (new_crtc_state->pipe_src_h - 1)); 4415 4416 /* on skylake this is done by detaching scalers */ 4417 if (INTEL_GEN(dev_priv) >= 9) { 4418 skl_detach_scalers(new_crtc_state); 4419 4420 if (new_crtc_state->pch_pfit.enabled) 4421 skylake_pfit_enable(new_crtc_state); 4422 } else if (HAS_PCH_SPLIT(dev_priv)) { 4423 if (new_crtc_state->pch_pfit.enabled) 4424 ironlake_pfit_enable(new_crtc_state); 4425 else if (old_crtc_state->pch_pfit.enabled) 4426 ironlake_pfit_disable(old_crtc_state); 4427 } 4428 4429 if (INTEL_GEN(dev_priv) >= 11) 4430 icl_set_pipe_chicken(crtc); 4431 } 4432 4433 static void intel_fdi_normal_train(struct intel_crtc *crtc) 4434 { 4435 struct drm_device *dev = crtc->base.dev; 4436 struct drm_i915_private *dev_priv = to_i915(dev); 4437 int pipe = crtc->pipe; 4438 i915_reg_t reg; 4439 u32 temp; 4440 4441 /* enable normal train */ 4442 reg = FDI_TX_CTL(pipe); 4443 temp = I915_READ(reg); 4444 if (IS_IVYBRIDGE(dev_priv)) { 4445 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 4446 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 4447 } else { 4448 temp &= ~FDI_LINK_TRAIN_NONE; 4449 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 4450 } 4451 I915_WRITE(reg, temp); 4452 4453 reg = FDI_RX_CTL(pipe); 4454 temp = I915_READ(reg); 4455 if (HAS_PCH_CPT(dev_priv)) { 4456 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4457 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 4458 } else { 4459 temp &= ~FDI_LINK_TRAIN_NONE; 4460 temp |= FDI_LINK_TRAIN_NONE; 4461 } 4462 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 4463 4464 /* wait one idle pattern time */ 4465 POSTING_READ(reg); 4466 udelay(1000); 4467 4468 /* IVB wants error correction enabled */ 4469 if (IS_IVYBRIDGE(dev_priv)) 4470 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 4471 FDI_FE_ERRC_ENABLE); 4472 } 4473 4474 /* The FDI link training functions for ILK/Ibexpeak. */ 4475 static void ironlake_fdi_link_train(struct intel_crtc *crtc, 4476 const struct intel_crtc_state *crtc_state) 4477 { 4478 struct drm_device *dev = crtc->base.dev; 4479 struct drm_i915_private *dev_priv = to_i915(dev); 4480 int pipe = crtc->pipe; 4481 i915_reg_t reg; 4482 u32 temp, tries; 4483 4484 /* FDI needs bits from pipe first */ 4485 assert_pipe_enabled(dev_priv, pipe); 4486 4487 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 4488 for train result */ 4489 reg = FDI_RX_IMR(pipe); 4490 temp = I915_READ(reg); 4491 temp &= ~FDI_RX_SYMBOL_LOCK; 4492 temp &= ~FDI_RX_BIT_LOCK; 4493 I915_WRITE(reg, temp); 4494 I915_READ(reg); 4495 udelay(150); 4496 4497 /* enable CPU FDI TX and PCH FDI RX */ 4498 reg = FDI_TX_CTL(pipe); 4499 temp = I915_READ(reg); 4500 temp &= ~FDI_DP_PORT_WIDTH_MASK; 4501 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 4502 temp &= ~FDI_LINK_TRAIN_NONE; 4503 temp |= FDI_LINK_TRAIN_PATTERN_1; 4504 I915_WRITE(reg, temp | FDI_TX_ENABLE); 4505 4506 reg = FDI_RX_CTL(pipe); 4507 temp = I915_READ(reg); 4508 temp &= ~FDI_LINK_TRAIN_NONE; 4509 temp |= FDI_LINK_TRAIN_PATTERN_1; 4510 I915_WRITE(reg, temp | FDI_RX_ENABLE); 4511 4512 POSTING_READ(reg); 4513 udelay(150); 4514 4515 /* Ironlake workaround, enable clock pointer after FDI enable*/ 4516 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 4517 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 4518 FDI_RX_PHASE_SYNC_POINTER_EN); 4519 4520 reg = FDI_RX_IIR(pipe); 4521 for (tries = 0; tries < 5; tries++) { 4522 temp = I915_READ(reg); 4523 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4524 4525 if ((temp & FDI_RX_BIT_LOCK)) { 4526 DRM_DEBUG_KMS("FDI train 1 done.\n"); 4527 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 4528 break; 4529 } 4530 } 4531 if (tries == 5) 4532 DRM_ERROR("FDI train 1 fail!\n"); 4533 4534 /* Train 2 */ 4535 reg = FDI_TX_CTL(pipe); 4536 temp = I915_READ(reg); 4537 temp &= ~FDI_LINK_TRAIN_NONE; 4538 temp |= FDI_LINK_TRAIN_PATTERN_2; 4539 I915_WRITE(reg, temp); 4540 4541 reg = FDI_RX_CTL(pipe); 4542 temp = I915_READ(reg); 4543 temp &= ~FDI_LINK_TRAIN_NONE; 4544 temp |= FDI_LINK_TRAIN_PATTERN_2; 4545 I915_WRITE(reg, temp); 4546 4547 POSTING_READ(reg); 4548 udelay(150); 4549 4550 reg = FDI_RX_IIR(pipe); 4551 for (tries = 0; tries < 5; tries++) { 4552 temp = I915_READ(reg); 4553 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4554 4555 if (temp & FDI_RX_SYMBOL_LOCK) { 4556 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 4557 DRM_DEBUG_KMS("FDI train 2 done.\n"); 4558 break; 4559 } 4560 } 4561 if (tries == 5) 4562 DRM_ERROR("FDI train 2 fail!\n"); 4563 4564 DRM_DEBUG_KMS("FDI train done\n"); 4565 4566 } 4567 4568 static const int snb_b_fdi_train_param[] = { 4569 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 4570 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 4571 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 4572 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 4573 }; 4574 4575 /* The FDI link training functions for SNB/Cougarpoint. */ 4576 static void gen6_fdi_link_train(struct intel_crtc *crtc, 4577 const struct intel_crtc_state *crtc_state) 4578 { 4579 struct drm_device *dev = crtc->base.dev; 4580 struct drm_i915_private *dev_priv = to_i915(dev); 4581 int pipe = crtc->pipe; 4582 i915_reg_t reg; 4583 u32 temp, i, retry; 4584 4585 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 4586 for train result */ 4587 reg = FDI_RX_IMR(pipe); 4588 temp = I915_READ(reg); 4589 temp &= ~FDI_RX_SYMBOL_LOCK; 4590 temp &= ~FDI_RX_BIT_LOCK; 4591 I915_WRITE(reg, temp); 4592 4593 POSTING_READ(reg); 4594 udelay(150); 4595 4596 /* enable CPU FDI TX and PCH FDI RX */ 4597 reg = FDI_TX_CTL(pipe); 4598 temp = I915_READ(reg); 4599 temp &= ~FDI_DP_PORT_WIDTH_MASK; 4600 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 4601 temp &= ~FDI_LINK_TRAIN_NONE; 4602 temp |= FDI_LINK_TRAIN_PATTERN_1; 4603 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4604 /* SNB-B */ 4605 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 4606 I915_WRITE(reg, temp | FDI_TX_ENABLE); 4607 4608 I915_WRITE(FDI_RX_MISC(pipe), 4609 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 4610 4611 reg = FDI_RX_CTL(pipe); 4612 temp = I915_READ(reg); 4613 if (HAS_PCH_CPT(dev_priv)) { 4614 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4615 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 4616 } else { 4617 temp &= ~FDI_LINK_TRAIN_NONE; 4618 temp |= FDI_LINK_TRAIN_PATTERN_1; 4619 } 4620 I915_WRITE(reg, temp | FDI_RX_ENABLE); 4621 4622 POSTING_READ(reg); 4623 udelay(150); 4624 4625 for (i = 0; i < 4; i++) { 4626 reg = FDI_TX_CTL(pipe); 4627 temp = I915_READ(reg); 4628 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4629 temp |= snb_b_fdi_train_param[i]; 4630 I915_WRITE(reg, temp); 4631 4632 POSTING_READ(reg); 4633 udelay(500); 4634 4635 for (retry = 0; retry < 5; retry++) { 4636 reg = FDI_RX_IIR(pipe); 4637 temp = I915_READ(reg); 4638 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4639 if (temp & FDI_RX_BIT_LOCK) { 4640 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 4641 DRM_DEBUG_KMS("FDI train 1 done.\n"); 4642 break; 4643 } 4644 udelay(50); 4645 } 4646 if (retry < 5) 4647 break; 4648 } 4649 if (i == 4) 4650 DRM_ERROR("FDI train 1 fail!\n"); 4651 4652 /* Train 2 */ 4653 reg = FDI_TX_CTL(pipe); 4654 temp = I915_READ(reg); 4655 temp &= ~FDI_LINK_TRAIN_NONE; 4656 temp |= FDI_LINK_TRAIN_PATTERN_2; 4657 if (IS_GEN(dev_priv, 6)) { 4658 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4659 /* SNB-B */ 4660 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 4661 } 4662 I915_WRITE(reg, temp); 4663 4664 reg = FDI_RX_CTL(pipe); 4665 temp = I915_READ(reg); 4666 if (HAS_PCH_CPT(dev_priv)) { 4667 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4668 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 4669 } else { 4670 temp &= ~FDI_LINK_TRAIN_NONE; 4671 temp |= FDI_LINK_TRAIN_PATTERN_2; 4672 } 4673 I915_WRITE(reg, temp); 4674 4675 POSTING_READ(reg); 4676 udelay(150); 4677 4678 for (i = 0; i < 4; i++) { 4679 reg = FDI_TX_CTL(pipe); 4680 temp = I915_READ(reg); 4681 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4682 temp |= snb_b_fdi_train_param[i]; 4683 I915_WRITE(reg, temp); 4684 4685 POSTING_READ(reg); 4686 udelay(500); 4687 4688 for (retry = 0; retry < 5; retry++) { 4689 reg = FDI_RX_IIR(pipe); 4690 temp = I915_READ(reg); 4691 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4692 if (temp & FDI_RX_SYMBOL_LOCK) { 4693 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 4694 DRM_DEBUG_KMS("FDI train 2 done.\n"); 4695 break; 4696 } 4697 udelay(50); 4698 } 4699 if (retry < 5) 4700 break; 4701 } 4702 if (i == 4) 4703 DRM_ERROR("FDI train 2 fail!\n"); 4704 4705 DRM_DEBUG_KMS("FDI train done.\n"); 4706 } 4707 4708 /* Manual link training for Ivy Bridge A0 parts */ 4709 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, 4710 const struct intel_crtc_state *crtc_state) 4711 { 4712 struct drm_device *dev = crtc->base.dev; 4713 struct drm_i915_private *dev_priv = to_i915(dev); 4714 int pipe = crtc->pipe; 4715 i915_reg_t reg; 4716 u32 temp, i, j; 4717 4718 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 4719 for train result */ 4720 reg = FDI_RX_IMR(pipe); 4721 temp = I915_READ(reg); 4722 temp &= ~FDI_RX_SYMBOL_LOCK; 4723 temp &= ~FDI_RX_BIT_LOCK; 4724 I915_WRITE(reg, temp); 4725 4726 POSTING_READ(reg); 4727 udelay(150); 4728 4729 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 4730 I915_READ(FDI_RX_IIR(pipe))); 4731 4732 /* Try each vswing and preemphasis setting twice before moving on */ 4733 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 4734 /* disable first in case we need to retry */ 4735 reg = FDI_TX_CTL(pipe); 4736 temp = I915_READ(reg); 4737 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 4738 temp &= ~FDI_TX_ENABLE; 4739 I915_WRITE(reg, temp); 4740 4741 reg = FDI_RX_CTL(pipe); 4742 temp = I915_READ(reg); 4743 temp &= ~FDI_LINK_TRAIN_AUTO; 4744 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4745 temp &= ~FDI_RX_ENABLE; 4746 I915_WRITE(reg, temp); 4747 4748 /* enable CPU FDI TX and PCH FDI RX */ 4749 reg = FDI_TX_CTL(pipe); 4750 temp = I915_READ(reg); 4751 temp &= ~FDI_DP_PORT_WIDTH_MASK; 4752 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 4753 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 4754 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4755 temp |= snb_b_fdi_train_param[j/2]; 4756 temp |= FDI_COMPOSITE_SYNC; 4757 I915_WRITE(reg, temp | FDI_TX_ENABLE); 4758 4759 I915_WRITE(FDI_RX_MISC(pipe), 4760 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 4761 4762 reg = FDI_RX_CTL(pipe); 4763 temp = I915_READ(reg); 4764 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 4765 temp |= FDI_COMPOSITE_SYNC; 4766 I915_WRITE(reg, temp | FDI_RX_ENABLE); 4767 4768 POSTING_READ(reg); 4769 udelay(1); /* should be 0.5us */ 4770 4771 for (i = 0; i < 4; i++) { 4772 reg = FDI_RX_IIR(pipe); 4773 temp = I915_READ(reg); 4774 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4775 4776 if (temp & FDI_RX_BIT_LOCK || 4777 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 4778 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 4779 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", 4780 i); 4781 break; 4782 } 4783 udelay(1); /* should be 0.5us */ 4784 } 4785 if (i == 4) { 4786 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); 4787 continue; 4788 } 4789 4790 /* Train 2 */ 4791 reg = FDI_TX_CTL(pipe); 4792 temp = I915_READ(reg); 4793 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 4794 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 4795 I915_WRITE(reg, temp); 4796 4797 reg = FDI_RX_CTL(pipe); 4798 temp = I915_READ(reg); 4799 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4800 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 4801 I915_WRITE(reg, temp); 4802 4803 POSTING_READ(reg); 4804 udelay(2); /* should be 1.5us */ 4805 4806 for (i = 0; i < 4; i++) { 4807 reg = FDI_RX_IIR(pipe); 4808 temp = I915_READ(reg); 4809 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4810 4811 if (temp & FDI_RX_SYMBOL_LOCK || 4812 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { 4813 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 4814 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", 4815 i); 4816 goto train_done; 4817 } 4818 udelay(2); /* should be 1.5us */ 4819 } 4820 if (i == 4) 4821 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); 4822 } 4823 4824 train_done: 4825 DRM_DEBUG_KMS("FDI train done.\n"); 4826 } 4827 4828 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state) 4829 { 4830 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 4831 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 4832 int pipe = intel_crtc->pipe; 4833 i915_reg_t reg; 4834 u32 temp; 4835 4836 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 4837 reg = FDI_RX_CTL(pipe); 4838 temp = I915_READ(reg); 4839 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 4840 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 4841 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4842 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 4843 4844 POSTING_READ(reg); 4845 udelay(200); 4846 4847 /* Switch from Rawclk to PCDclk */ 4848 temp = I915_READ(reg); 4849 I915_WRITE(reg, temp | FDI_PCDCLK); 4850 4851 POSTING_READ(reg); 4852 udelay(200); 4853 4854 /* Enable CPU FDI TX PLL, always on for Ironlake */ 4855 reg = FDI_TX_CTL(pipe); 4856 temp = I915_READ(reg); 4857 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 4858 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 4859 4860 POSTING_READ(reg); 4861 udelay(100); 4862 } 4863 } 4864 4865 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 4866 { 4867 struct drm_device *dev = intel_crtc->base.dev; 4868 struct drm_i915_private *dev_priv = to_i915(dev); 4869 int pipe = intel_crtc->pipe; 4870 i915_reg_t reg; 4871 u32 temp; 4872 4873 /* Switch from PCDclk to Rawclk */ 4874 reg = FDI_RX_CTL(pipe); 4875 temp = I915_READ(reg); 4876 I915_WRITE(reg, temp & ~FDI_PCDCLK); 4877 4878 /* Disable CPU FDI TX PLL */ 4879 reg = FDI_TX_CTL(pipe); 4880 temp = I915_READ(reg); 4881 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 4882 4883 POSTING_READ(reg); 4884 udelay(100); 4885 4886 reg = FDI_RX_CTL(pipe); 4887 temp = I915_READ(reg); 4888 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 4889 4890 /* Wait for the clocks to turn off. */ 4891 POSTING_READ(reg); 4892 udelay(100); 4893 } 4894 4895 static void ironlake_fdi_disable(struct drm_crtc *crtc) 4896 { 4897 struct drm_device *dev = crtc->dev; 4898 struct drm_i915_private *dev_priv = to_i915(dev); 4899 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4900 int pipe = intel_crtc->pipe; 4901 i915_reg_t reg; 4902 u32 temp; 4903 4904 /* disable CPU FDI tx and PCH FDI rx */ 4905 reg = FDI_TX_CTL(pipe); 4906 temp = I915_READ(reg); 4907 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 4908 POSTING_READ(reg); 4909 4910 reg = FDI_RX_CTL(pipe); 4911 temp = I915_READ(reg); 4912 temp &= ~(0x7 << 16); 4913 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4914 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 4915 4916 POSTING_READ(reg); 4917 udelay(100); 4918 4919 /* Ironlake workaround, disable clock pointer after downing FDI */ 4920 if (HAS_PCH_IBX(dev_priv)) 4921 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 4922 4923 /* still set train pattern 1 */ 4924 reg = FDI_TX_CTL(pipe); 4925 temp = I915_READ(reg); 4926 temp &= ~FDI_LINK_TRAIN_NONE; 4927 temp |= FDI_LINK_TRAIN_PATTERN_1; 4928 I915_WRITE(reg, temp); 4929 4930 reg = FDI_RX_CTL(pipe); 4931 temp = I915_READ(reg); 4932 if (HAS_PCH_CPT(dev_priv)) { 4933 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4934 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 4935 } else { 4936 temp &= ~FDI_LINK_TRAIN_NONE; 4937 temp |= FDI_LINK_TRAIN_PATTERN_1; 4938 } 4939 /* BPC in FDI rx is consistent with that in PIPECONF */ 4940 temp &= ~(0x07 << 16); 4941 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4942 I915_WRITE(reg, temp); 4943 4944 POSTING_READ(reg); 4945 udelay(100); 4946 } 4947 4948 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 4949 { 4950 struct drm_crtc *crtc; 4951 bool cleanup_done; 4952 4953 drm_for_each_crtc(crtc, &dev_priv->drm) { 4954 struct drm_crtc_commit *commit; 4955 spin_lock(&crtc->commit_lock); 4956 commit = list_first_entry_or_null(&crtc->commit_list, 4957 struct drm_crtc_commit, commit_entry); 4958 cleanup_done = commit ? 4959 try_wait_for_completion(&commit->cleanup_done) : true; 4960 spin_unlock(&crtc->commit_lock); 4961 4962 if (cleanup_done) 4963 continue; 4964 4965 drm_crtc_wait_one_vblank(crtc); 4966 4967 return true; 4968 } 4969 4970 return false; 4971 } 4972 4973 void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 4974 { 4975 u32 temp; 4976 4977 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 4978 4979 mutex_lock(&dev_priv->sb_lock); 4980 4981 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4982 temp |= SBI_SSCCTL_DISABLE; 4983 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 4984 4985 mutex_unlock(&dev_priv->sb_lock); 4986 } 4987 4988 /* Program iCLKIP clock to the desired frequency */ 4989 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) 4990 { 4991 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 4992 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4993 int clock = crtc_state->base.adjusted_mode.crtc_clock; 4994 u32 divsel, phaseinc, auxdiv, phasedir = 0; 4995 u32 temp; 4996 4997 lpt_disable_iclkip(dev_priv); 4998 4999 /* The iCLK virtual clock root frequency is in MHz, 5000 * but the adjusted_mode->crtc_clock in in KHz. To get the 5001 * divisors, it is necessary to divide one by another, so we 5002 * convert the virtual clock precision to KHz here for higher 5003 * precision. 5004 */ 5005 for (auxdiv = 0; auxdiv < 2; auxdiv++) { 5006 u32 iclk_virtual_root_freq = 172800 * 1000; 5007 u32 iclk_pi_range = 64; 5008 u32 desired_divisor; 5009 5010 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 5011 clock << auxdiv); 5012 divsel = (desired_divisor / iclk_pi_range) - 2; 5013 phaseinc = desired_divisor % iclk_pi_range; 5014 5015 /* 5016 * Near 20MHz is a corner case which is 5017 * out of range for the 7-bit divisor 5018 */ 5019 if (divsel <= 0x7f) 5020 break; 5021 } 5022 5023 /* This should not happen with any sane values */ 5024 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 5025 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 5026 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 5027 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 5028 5029 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 5030 clock, 5031 auxdiv, 5032 divsel, 5033 phasedir, 5034 phaseinc); 5035 5036 mutex_lock(&dev_priv->sb_lock); 5037 5038 /* Program SSCDIVINTPHASE6 */ 5039 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 5040 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 5041 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 5042 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 5043 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 5044 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 5045 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 5046 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 5047 5048 /* Program SSCAUXDIV */ 5049 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 5050 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 5051 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 5052 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 5053 5054 /* Enable modulator and associated divider */ 5055 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5056 temp &= ~SBI_SSCCTL_DISABLE; 5057 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 5058 5059 mutex_unlock(&dev_priv->sb_lock); 5060 5061 /* Wait for initialization time */ 5062 udelay(24); 5063 5064 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 5065 } 5066 5067 int lpt_get_iclkip(struct drm_i915_private *dev_priv) 5068 { 5069 u32 divsel, phaseinc, auxdiv; 5070 u32 iclk_virtual_root_freq = 172800 * 1000; 5071 u32 iclk_pi_range = 64; 5072 u32 desired_divisor; 5073 u32 temp; 5074 5075 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) 5076 return 0; 5077 5078 mutex_lock(&dev_priv->sb_lock); 5079 5080 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5081 if (temp & SBI_SSCCTL_DISABLE) { 5082 mutex_unlock(&dev_priv->sb_lock); 5083 return 0; 5084 } 5085 5086 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 5087 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> 5088 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; 5089 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> 5090 SBI_SSCDIVINTPHASE_INCVAL_SHIFT; 5091 5092 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 5093 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> 5094 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; 5095 5096 mutex_unlock(&dev_priv->sb_lock); 5097 5098 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; 5099 5100 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 5101 desired_divisor << auxdiv); 5102 } 5103 5104 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, 5105 enum pipe pch_transcoder) 5106 { 5107 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5108 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5109 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 5110 5111 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 5112 I915_READ(HTOTAL(cpu_transcoder))); 5113 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), 5114 I915_READ(HBLANK(cpu_transcoder))); 5115 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), 5116 I915_READ(HSYNC(cpu_transcoder))); 5117 5118 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), 5119 I915_READ(VTOTAL(cpu_transcoder))); 5120 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), 5121 I915_READ(VBLANK(cpu_transcoder))); 5122 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), 5123 I915_READ(VSYNC(cpu_transcoder))); 5124 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), 5125 I915_READ(VSYNCSHIFT(cpu_transcoder))); 5126 } 5127 5128 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable) 5129 { 5130 u32 temp; 5131 5132 temp = I915_READ(SOUTH_CHICKEN1); 5133 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 5134 return; 5135 5136 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 5137 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 5138 5139 temp &= ~FDI_BC_BIFURCATION_SELECT; 5140 if (enable) 5141 temp |= FDI_BC_BIFURCATION_SELECT; 5142 5143 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); 5144 I915_WRITE(SOUTH_CHICKEN1, temp); 5145 POSTING_READ(SOUTH_CHICKEN1); 5146 } 5147 5148 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) 5149 { 5150 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5151 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5152 5153 switch (crtc->pipe) { 5154 case PIPE_A: 5155 break; 5156 case PIPE_B: 5157 if (crtc_state->fdi_lanes > 2) 5158 cpt_set_fdi_bc_bifurcation(dev_priv, false); 5159 else 5160 cpt_set_fdi_bc_bifurcation(dev_priv, true); 5161 5162 break; 5163 case PIPE_C: 5164 cpt_set_fdi_bc_bifurcation(dev_priv, true); 5165 5166 break; 5167 default: 5168 BUG(); 5169 } 5170 } 5171 5172 /* 5173 * Finds the encoder associated with the given CRTC. This can only be 5174 * used when we know that the CRTC isn't feeding multiple encoders! 5175 */ 5176 static struct intel_encoder * 5177 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 5178 const struct intel_crtc_state *crtc_state) 5179 { 5180 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5181 const struct drm_connector_state *connector_state; 5182 const struct drm_connector *connector; 5183 struct intel_encoder *encoder = NULL; 5184 int num_encoders = 0; 5185 int i; 5186 5187 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 5188 if (connector_state->crtc != &crtc->base) 5189 continue; 5190 5191 encoder = to_intel_encoder(connector_state->best_encoder); 5192 num_encoders++; 5193 } 5194 5195 WARN(num_encoders != 1, "%d encoders for pipe %c\n", 5196 num_encoders, pipe_name(crtc->pipe)); 5197 5198 return encoder; 5199 } 5200 5201 /* 5202 * Enable PCH resources required for PCH ports: 5203 * - PCH PLLs 5204 * - FDI training & RX/TX 5205 * - update transcoder timings 5206 * - DP transcoding bits 5207 * - transcoder 5208 */ 5209 static void ironlake_pch_enable(const struct intel_atomic_state *state, 5210 const struct intel_crtc_state *crtc_state) 5211 { 5212 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5213 struct drm_device *dev = crtc->base.dev; 5214 struct drm_i915_private *dev_priv = to_i915(dev); 5215 int pipe = crtc->pipe; 5216 u32 temp; 5217 5218 assert_pch_transcoder_disabled(dev_priv, pipe); 5219 5220 if (IS_IVYBRIDGE(dev_priv)) 5221 ivybridge_update_fdi_bc_bifurcation(crtc_state); 5222 5223 /* Write the TU size bits before fdi link training, so that error 5224 * detection works. */ 5225 I915_WRITE(FDI_RX_TUSIZE1(pipe), 5226 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 5227 5228 /* For PCH output, training FDI link */ 5229 dev_priv->display.fdi_link_train(crtc, crtc_state); 5230 5231 /* We need to program the right clock selection before writing the pixel 5232 * mutliplier into the DPLL. */ 5233 if (HAS_PCH_CPT(dev_priv)) { 5234 u32 sel; 5235 5236 temp = I915_READ(PCH_DPLL_SEL); 5237 temp |= TRANS_DPLL_ENABLE(pipe); 5238 sel = TRANS_DPLLB_SEL(pipe); 5239 if (crtc_state->shared_dpll == 5240 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) 5241 temp |= sel; 5242 else 5243 temp &= ~sel; 5244 I915_WRITE(PCH_DPLL_SEL, temp); 5245 } 5246 5247 /* XXX: pch pll's can be enabled any time before we enable the PCH 5248 * transcoder, and we actually should do this to not upset any PCH 5249 * transcoder that already use the clock when we share it. 5250 * 5251 * Note that enable_shared_dpll tries to do the right thing, but 5252 * get_shared_dpll unconditionally resets the pll - we need that to have 5253 * the right LVDS enable sequence. */ 5254 intel_enable_shared_dpll(crtc_state); 5255 5256 /* set transcoder timing, panel must allow it */ 5257 assert_panel_unlocked(dev_priv, pipe); 5258 ironlake_pch_transcoder_set_timings(crtc_state, pipe); 5259 5260 intel_fdi_normal_train(crtc); 5261 5262 /* For PCH DP, enable TRANS_DP_CTL */ 5263 if (HAS_PCH_CPT(dev_priv) && 5264 intel_crtc_has_dp_encoder(crtc_state)) { 5265 const struct drm_display_mode *adjusted_mode = 5266 &crtc_state->base.adjusted_mode; 5267 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 5268 i915_reg_t reg = TRANS_DP_CTL(pipe); 5269 enum port port; 5270 5271 temp = I915_READ(reg); 5272 temp &= ~(TRANS_DP_PORT_SEL_MASK | 5273 TRANS_DP_SYNC_MASK | 5274 TRANS_DP_BPC_MASK); 5275 temp |= TRANS_DP_OUTPUT_ENABLE; 5276 temp |= bpc << 9; /* same format but at 11:9 */ 5277 5278 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 5279 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 5280 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 5281 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 5282 5283 port = intel_get_crtc_new_encoder(state, crtc_state)->port; 5284 WARN_ON(port < PORT_B || port > PORT_D); 5285 temp |= TRANS_DP_PORT_SEL(port); 5286 5287 I915_WRITE(reg, temp); 5288 } 5289 5290 ironlake_enable_pch_transcoder(crtc_state); 5291 } 5292 5293 static void lpt_pch_enable(const struct intel_atomic_state *state, 5294 const struct intel_crtc_state *crtc_state) 5295 { 5296 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5297 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5298 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 5299 5300 assert_pch_transcoder_disabled(dev_priv, PIPE_A); 5301 5302 lpt_program_iclkip(crtc_state); 5303 5304 /* Set transcoder timing. */ 5305 ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A); 5306 5307 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 5308 } 5309 5310 static void cpt_verify_modeset(struct drm_device *dev, int pipe) 5311 { 5312 struct drm_i915_private *dev_priv = to_i915(dev); 5313 i915_reg_t dslreg = PIPEDSL(pipe); 5314 u32 temp; 5315 5316 temp = I915_READ(dslreg); 5317 udelay(500); 5318 if (wait_for(I915_READ(dslreg) != temp, 5)) { 5319 if (wait_for(I915_READ(dslreg) != temp, 5)) 5320 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); 5321 } 5322 } 5323 5324 /* 5325 * The hardware phase 0.0 refers to the center of the pixel. 5326 * We want to start from the top/left edge which is phase 5327 * -0.5. That matches how the hardware calculates the scaling 5328 * factors (from top-left of the first pixel to bottom-right 5329 * of the last pixel, as opposed to the pixel centers). 5330 * 5331 * For 4:2:0 subsampled chroma planes we obviously have to 5332 * adjust that so that the chroma sample position lands in 5333 * the right spot. 5334 * 5335 * Note that for packed YCbCr 4:2:2 formats there is no way to 5336 * control chroma siting. The hardware simply replicates the 5337 * chroma samples for both of the luma samples, and thus we don't 5338 * actually get the expected MPEG2 chroma siting convention :( 5339 * The same behaviour is observed on pre-SKL platforms as well. 5340 * 5341 * Theory behind the formula (note that we ignore sub-pixel 5342 * source coordinates): 5343 * s = source sample position 5344 * d = destination sample position 5345 * 5346 * Downscaling 4:1: 5347 * -0.5 5348 * | 0.0 5349 * | | 1.5 (initial phase) 5350 * | | | 5351 * v v v 5352 * | s | s | s | s | 5353 * | d | 5354 * 5355 * Upscaling 1:4: 5356 * -0.5 5357 * | -0.375 (initial phase) 5358 * | | 0.0 5359 * | | | 5360 * v v v 5361 * | s | 5362 * | d | d | d | d | 5363 */ 5364 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited) 5365 { 5366 int phase = -0x8000; 5367 u16 trip = 0; 5368 5369 if (chroma_cosited) 5370 phase += (sub - 1) * 0x8000 / sub; 5371 5372 phase += scale / (2 * sub); 5373 5374 /* 5375 * Hardware initial phase limited to [-0.5:1.5]. 5376 * Since the max hardware scale factor is 3.0, we 5377 * should never actually excdeed 1.0 here. 5378 */ 5379 WARN_ON(phase < -0x8000 || phase > 0x18000); 5380 5381 if (phase < 0) 5382 phase = 0x10000 + phase; 5383 else 5384 trip = PS_PHASE_TRIP; 5385 5386 return ((phase >> 2) & PS_PHASE_MASK) | trip; 5387 } 5388 5389 #define SKL_MIN_SRC_W 8 5390 #define SKL_MAX_SRC_W 4096 5391 #define SKL_MIN_SRC_H 8 5392 #define SKL_MAX_SRC_H 4096 5393 #define SKL_MIN_DST_W 8 5394 #define SKL_MAX_DST_W 4096 5395 #define SKL_MIN_DST_H 8 5396 #define SKL_MAX_DST_H 4096 5397 #define ICL_MAX_SRC_W 5120 5398 #define ICL_MAX_SRC_H 4096 5399 #define ICL_MAX_DST_W 5120 5400 #define ICL_MAX_DST_H 4096 5401 #define SKL_MIN_YUV_420_SRC_W 16 5402 #define SKL_MIN_YUV_420_SRC_H 16 5403 5404 static int 5405 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 5406 unsigned int scaler_user, int *scaler_id, 5407 int src_w, int src_h, int dst_w, int dst_h, 5408 const struct drm_format_info *format, bool need_scaler) 5409 { 5410 struct intel_crtc_scaler_state *scaler_state = 5411 &crtc_state->scaler_state; 5412 struct intel_crtc *intel_crtc = 5413 to_intel_crtc(crtc_state->base.crtc); 5414 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 5415 const struct drm_display_mode *adjusted_mode = 5416 &crtc_state->base.adjusted_mode; 5417 5418 /* 5419 * Src coordinates are already rotated by 270 degrees for 5420 * the 90/270 degree plane rotation cases (to match the 5421 * GTT mapping), hence no need to account for rotation here. 5422 */ 5423 if (src_w != dst_w || src_h != dst_h) 5424 need_scaler = true; 5425 5426 /* 5427 * Scaling/fitting not supported in IF-ID mode in GEN9+ 5428 * TODO: Interlace fetch mode doesn't support YUV420 planar formats. 5429 * Once NV12 is enabled, handle it here while allocating scaler 5430 * for NV12. 5431 */ 5432 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable && 5433 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 5434 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n"); 5435 return -EINVAL; 5436 } 5437 5438 /* 5439 * if plane is being disabled or scaler is no more required or force detach 5440 * - free scaler binded to this plane/crtc 5441 * - in order to do this, update crtc->scaler_usage 5442 * 5443 * Here scaler state in crtc_state is set free so that 5444 * scaler can be assigned to other user. Actual register 5445 * update to free the scaler is done in plane/panel-fit programming. 5446 * For this purpose crtc/plane_state->scaler_id isn't reset here. 5447 */ 5448 if (force_detach || !need_scaler) { 5449 if (*scaler_id >= 0) { 5450 scaler_state->scaler_users &= ~(1 << scaler_user); 5451 scaler_state->scalers[*scaler_id].in_use = 0; 5452 5453 DRM_DEBUG_KMS("scaler_user index %u.%u: " 5454 "Staged freeing scaler id %d scaler_users = 0x%x\n", 5455 intel_crtc->pipe, scaler_user, *scaler_id, 5456 scaler_state->scaler_users); 5457 *scaler_id = -1; 5458 } 5459 return 0; 5460 } 5461 5462 if (format && is_planar_yuv_format(format->format) && 5463 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { 5464 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n"); 5465 return -EINVAL; 5466 } 5467 5468 /* range checks */ 5469 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 5470 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 5471 (INTEL_GEN(dev_priv) >= 11 && 5472 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H || 5473 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) || 5474 (INTEL_GEN(dev_priv) < 11 && 5475 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 5476 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) { 5477 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " 5478 "size is out of scaler range\n", 5479 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h); 5480 return -EINVAL; 5481 } 5482 5483 /* mark this plane as a scaler user in crtc_state */ 5484 scaler_state->scaler_users |= (1 << scaler_user); 5485 DRM_DEBUG_KMS("scaler_user index %u.%u: " 5486 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 5487 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 5488 scaler_state->scaler_users); 5489 5490 return 0; 5491 } 5492 5493 /** 5494 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc. 5495 * 5496 * @state: crtc's scaler state 5497 * 5498 * Return 5499 * 0 - scaler_usage updated successfully 5500 * error - requested scaling cannot be supported or other error condition 5501 */ 5502 int skl_update_scaler_crtc(struct intel_crtc_state *state) 5503 { 5504 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 5505 bool need_scaler = false; 5506 5507 if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 5508 need_scaler = true; 5509 5510 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 5511 &state->scaler_state.scaler_id, 5512 state->pipe_src_w, state->pipe_src_h, 5513 adjusted_mode->crtc_hdisplay, 5514 adjusted_mode->crtc_vdisplay, NULL, need_scaler); 5515 } 5516 5517 /** 5518 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 5519 * @crtc_state: crtc's scaler state 5520 * @plane_state: atomic plane state to update 5521 * 5522 * Return 5523 * 0 - scaler_usage updated successfully 5524 * error - requested scaling cannot be supported or other error condition 5525 */ 5526 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 5527 struct intel_plane_state *plane_state) 5528 { 5529 struct intel_plane *intel_plane = 5530 to_intel_plane(plane_state->base.plane); 5531 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 5532 struct drm_framebuffer *fb = plane_state->base.fb; 5533 int ret; 5534 bool force_detach = !fb || !plane_state->base.visible; 5535 bool need_scaler = false; 5536 5537 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ 5538 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) && 5539 fb && is_planar_yuv_format(fb->format->format)) 5540 need_scaler = true; 5541 5542 ret = skl_update_scaler(crtc_state, force_detach, 5543 drm_plane_index(&intel_plane->base), 5544 &plane_state->scaler_id, 5545 drm_rect_width(&plane_state->base.src) >> 16, 5546 drm_rect_height(&plane_state->base.src) >> 16, 5547 drm_rect_width(&plane_state->base.dst), 5548 drm_rect_height(&plane_state->base.dst), 5549 fb ? fb->format : NULL, need_scaler); 5550 5551 if (ret || plane_state->scaler_id < 0) 5552 return ret; 5553 5554 /* check colorkey */ 5555 if (plane_state->ckey.flags) { 5556 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed", 5557 intel_plane->base.base.id, 5558 intel_plane->base.name); 5559 return -EINVAL; 5560 } 5561 5562 /* Check src format */ 5563 switch (fb->format->format) { 5564 case DRM_FORMAT_RGB565: 5565 case DRM_FORMAT_XBGR8888: 5566 case DRM_FORMAT_XRGB8888: 5567 case DRM_FORMAT_ABGR8888: 5568 case DRM_FORMAT_ARGB8888: 5569 case DRM_FORMAT_XRGB2101010: 5570 case DRM_FORMAT_XBGR2101010: 5571 case DRM_FORMAT_XBGR16161616F: 5572 case DRM_FORMAT_ABGR16161616F: 5573 case DRM_FORMAT_XRGB16161616F: 5574 case DRM_FORMAT_ARGB16161616F: 5575 case DRM_FORMAT_YUYV: 5576 case DRM_FORMAT_YVYU: 5577 case DRM_FORMAT_UYVY: 5578 case DRM_FORMAT_VYUY: 5579 case DRM_FORMAT_NV12: 5580 case DRM_FORMAT_P010: 5581 case DRM_FORMAT_P012: 5582 case DRM_FORMAT_P016: 5583 case DRM_FORMAT_Y210: 5584 case DRM_FORMAT_Y212: 5585 case DRM_FORMAT_Y216: 5586 case DRM_FORMAT_XVYU2101010: 5587 case DRM_FORMAT_XVYU12_16161616: 5588 case DRM_FORMAT_XVYU16161616: 5589 break; 5590 default: 5591 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", 5592 intel_plane->base.base.id, intel_plane->base.name, 5593 fb->base.id, fb->format->format); 5594 return -EINVAL; 5595 } 5596 5597 return 0; 5598 } 5599 5600 static void skylake_scaler_disable(struct intel_crtc *crtc) 5601 { 5602 int i; 5603 5604 for (i = 0; i < crtc->num_scalers; i++) 5605 skl_detach_scaler(crtc, i); 5606 } 5607 5608 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state) 5609 { 5610 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5611 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5612 enum pipe pipe = crtc->pipe; 5613 const struct intel_crtc_scaler_state *scaler_state = 5614 &crtc_state->scaler_state; 5615 5616 if (crtc_state->pch_pfit.enabled) { 5617 u16 uv_rgb_hphase, uv_rgb_vphase; 5618 int pfit_w, pfit_h, hscale, vscale; 5619 int id; 5620 5621 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0)) 5622 return; 5623 5624 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF; 5625 pfit_h = crtc_state->pch_pfit.size & 0xFFFF; 5626 5627 hscale = (crtc_state->pipe_src_w << 16) / pfit_w; 5628 vscale = (crtc_state->pipe_src_h << 16) / pfit_h; 5629 5630 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); 5631 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); 5632 5633 id = scaler_state->scaler_id; 5634 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 5635 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 5636 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id), 5637 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); 5638 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id), 5639 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); 5640 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos); 5641 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size); 5642 } 5643 } 5644 5645 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state) 5646 { 5647 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5648 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5649 int pipe = crtc->pipe; 5650 5651 if (crtc_state->pch_pfit.enabled) { 5652 /* Force use of hard-coded filter coefficients 5653 * as some pre-programmed values are broken, 5654 * e.g. x201. 5655 */ 5656 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 5657 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 5658 PF_PIPE_SEL_IVB(pipe)); 5659 else 5660 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 5661 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos); 5662 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size); 5663 } 5664 } 5665 5666 void hsw_enable_ips(const struct intel_crtc_state *crtc_state) 5667 { 5668 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5669 struct drm_device *dev = crtc->base.dev; 5670 struct drm_i915_private *dev_priv = to_i915(dev); 5671 5672 if (!crtc_state->ips_enabled) 5673 return; 5674 5675 /* 5676 * We can only enable IPS after we enable a plane and wait for a vblank 5677 * This function is called from post_plane_update, which is run after 5678 * a vblank wait. 5679 */ 5680 WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); 5681 5682 if (IS_BROADWELL(dev_priv)) { 5683 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 5684 IPS_ENABLE | IPS_PCODE_CONTROL)); 5685 /* Quoting Art Runyan: "its not safe to expect any particular 5686 * value in IPS_CTL bit 31 after enabling IPS through the 5687 * mailbox." Moreover, the mailbox may return a bogus state, 5688 * so we need to just enable it and continue on. 5689 */ 5690 } else { 5691 I915_WRITE(IPS_CTL, IPS_ENABLE); 5692 /* The bit only becomes 1 in the next vblank, so this wait here 5693 * is essentially intel_wait_for_vblank. If we don't have this 5694 * and don't wait for vblanks until the end of crtc_enable, then 5695 * the HW state readout code will complain that the expected 5696 * IPS_CTL value is not the one we read. */ 5697 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50)) 5698 DRM_ERROR("Timed out waiting for IPS enable\n"); 5699 } 5700 } 5701 5702 void hsw_disable_ips(const struct intel_crtc_state *crtc_state) 5703 { 5704 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5705 struct drm_device *dev = crtc->base.dev; 5706 struct drm_i915_private *dev_priv = to_i915(dev); 5707 5708 if (!crtc_state->ips_enabled) 5709 return; 5710 5711 if (IS_BROADWELL(dev_priv)) { 5712 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 5713 /* 5714 * Wait for PCODE to finish disabling IPS. The BSpec specified 5715 * 42ms timeout value leads to occasional timeouts so use 100ms 5716 * instead. 5717 */ 5718 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100)) 5719 DRM_ERROR("Timed out waiting for IPS disable\n"); 5720 } else { 5721 I915_WRITE(IPS_CTL, 0); 5722 POSTING_READ(IPS_CTL); 5723 } 5724 5725 /* We need to wait for a vblank before we can disable the plane. */ 5726 intel_wait_for_vblank(dev_priv, crtc->pipe); 5727 } 5728 5729 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) 5730 { 5731 if (intel_crtc->overlay) { 5732 struct drm_device *dev = intel_crtc->base.dev; 5733 5734 mutex_lock(&dev->struct_mutex); 5735 (void) intel_overlay_switch_off(intel_crtc->overlay); 5736 mutex_unlock(&dev->struct_mutex); 5737 } 5738 5739 /* Let userspace switch the overlay on again. In most cases userspace 5740 * has to recompute where to put it anyway. 5741 */ 5742 } 5743 5744 /** 5745 * intel_post_enable_primary - Perform operations after enabling primary plane 5746 * @crtc: the CRTC whose primary plane was just enabled 5747 * @new_crtc_state: the enabling state 5748 * 5749 * Performs potentially sleeping operations that must be done after the primary 5750 * plane is enabled, such as updating FBC and IPS. Note that this may be 5751 * called due to an explicit primary plane update, or due to an implicit 5752 * re-enable that is caused when a sprite plane is updated to no longer 5753 * completely hide the primary plane. 5754 */ 5755 static void 5756 intel_post_enable_primary(struct drm_crtc *crtc, 5757 const struct intel_crtc_state *new_crtc_state) 5758 { 5759 struct drm_device *dev = crtc->dev; 5760 struct drm_i915_private *dev_priv = to_i915(dev); 5761 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5762 int pipe = intel_crtc->pipe; 5763 5764 /* 5765 * Gen2 reports pipe underruns whenever all planes are disabled. 5766 * So don't enable underrun reporting before at least some planes 5767 * are enabled. 5768 * FIXME: Need to fix the logic to work when we turn off all planes 5769 * but leave the pipe running. 5770 */ 5771 if (IS_GEN(dev_priv, 2)) 5772 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5773 5774 /* Underruns don't always raise interrupts, so check manually. */ 5775 intel_check_cpu_fifo_underruns(dev_priv); 5776 intel_check_pch_fifo_underruns(dev_priv); 5777 } 5778 5779 /* FIXME get rid of this and use pre_plane_update */ 5780 static void 5781 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) 5782 { 5783 struct drm_device *dev = crtc->dev; 5784 struct drm_i915_private *dev_priv = to_i915(dev); 5785 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5786 int pipe = intel_crtc->pipe; 5787 5788 /* 5789 * Gen2 reports pipe underruns whenever all planes are disabled. 5790 * So disable underrun reporting before all the planes get disabled. 5791 */ 5792 if (IS_GEN(dev_priv, 2)) 5793 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5794 5795 hsw_disable_ips(to_intel_crtc_state(crtc->state)); 5796 5797 /* 5798 * Vblank time updates from the shadow to live plane control register 5799 * are blocked if the memory self-refresh mode is active at that 5800 * moment. So to make sure the plane gets truly disabled, disable 5801 * first the self-refresh mode. The self-refresh enable bit in turn 5802 * will be checked/applied by the HW only at the next frame start 5803 * event which is after the vblank start event, so we need to have a 5804 * wait-for-vblank between disabling the plane and the pipe. 5805 */ 5806 if (HAS_GMCH(dev_priv) && 5807 intel_set_memory_cxsr(dev_priv, false)) 5808 intel_wait_for_vblank(dev_priv, pipe); 5809 } 5810 5811 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state, 5812 const struct intel_crtc_state *new_crtc_state) 5813 { 5814 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 5815 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5816 5817 if (!old_crtc_state->ips_enabled) 5818 return false; 5819 5820 if (needs_modeset(new_crtc_state)) 5821 return true; 5822 5823 /* 5824 * Workaround : Do not read or write the pipe palette/gamma data while 5825 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 5826 * 5827 * Disable IPS before we program the LUT. 5828 */ 5829 if (IS_HASWELL(dev_priv) && 5830 (new_crtc_state->base.color_mgmt_changed || 5831 new_crtc_state->update_pipe) && 5832 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) 5833 return true; 5834 5835 return !new_crtc_state->ips_enabled; 5836 } 5837 5838 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state, 5839 const struct intel_crtc_state *new_crtc_state) 5840 { 5841 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 5842 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5843 5844 if (!new_crtc_state->ips_enabled) 5845 return false; 5846 5847 if (needs_modeset(new_crtc_state)) 5848 return true; 5849 5850 /* 5851 * Workaround : Do not read or write the pipe palette/gamma data while 5852 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 5853 * 5854 * Re-enable IPS after the LUT has been programmed. 5855 */ 5856 if (IS_HASWELL(dev_priv) && 5857 (new_crtc_state->base.color_mgmt_changed || 5858 new_crtc_state->update_pipe) && 5859 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) 5860 return true; 5861 5862 /* 5863 * We can't read out IPS on broadwell, assume the worst and 5864 * forcibly enable IPS on the first fastset. 5865 */ 5866 if (new_crtc_state->update_pipe && 5867 old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED) 5868 return true; 5869 5870 return !old_crtc_state->ips_enabled; 5871 } 5872 5873 static bool needs_nv12_wa(struct drm_i915_private *dev_priv, 5874 const struct intel_crtc_state *crtc_state) 5875 { 5876 if (!crtc_state->nv12_planes) 5877 return false; 5878 5879 /* WA Display #0827: Gen9:all */ 5880 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) 5881 return true; 5882 5883 return false; 5884 } 5885 5886 static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv, 5887 const struct intel_crtc_state *crtc_state) 5888 { 5889 /* Wa_2006604312:icl */ 5890 if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv)) 5891 return true; 5892 5893 return false; 5894 } 5895 5896 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) 5897 { 5898 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 5899 struct drm_device *dev = crtc->base.dev; 5900 struct drm_i915_private *dev_priv = to_i915(dev); 5901 struct drm_atomic_state *state = old_crtc_state->base.state; 5902 struct intel_crtc_state *pipe_config = 5903 intel_atomic_get_new_crtc_state(to_intel_atomic_state(state), 5904 crtc); 5905 struct drm_plane *primary = crtc->base.primary; 5906 struct drm_plane_state *old_primary_state = 5907 drm_atomic_get_old_plane_state(state, primary); 5908 5909 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits); 5910 5911 if (pipe_config->update_wm_post && pipe_config->base.active) 5912 intel_update_watermarks(crtc); 5913 5914 if (hsw_post_update_enable_ips(old_crtc_state, pipe_config)) 5915 hsw_enable_ips(pipe_config); 5916 5917 if (old_primary_state) { 5918 struct drm_plane_state *new_primary_state = 5919 drm_atomic_get_new_plane_state(state, primary); 5920 5921 intel_fbc_post_update(crtc); 5922 5923 if (new_primary_state->visible && 5924 (needs_modeset(pipe_config) || 5925 !old_primary_state->visible)) 5926 intel_post_enable_primary(&crtc->base, pipe_config); 5927 } 5928 5929 if (needs_nv12_wa(dev_priv, old_crtc_state) && 5930 !needs_nv12_wa(dev_priv, pipe_config)) 5931 skl_wa_827(dev_priv, crtc->pipe, false); 5932 5933 if (needs_scalerclk_wa(dev_priv, old_crtc_state) && 5934 !needs_scalerclk_wa(dev_priv, pipe_config)) 5935 icl_wa_scalerclkgating(dev_priv, crtc->pipe, false); 5936 } 5937 5938 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, 5939 struct intel_crtc_state *pipe_config) 5940 { 5941 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 5942 struct drm_device *dev = crtc->base.dev; 5943 struct drm_i915_private *dev_priv = to_i915(dev); 5944 struct drm_atomic_state *state = old_crtc_state->base.state; 5945 struct drm_plane *primary = crtc->base.primary; 5946 struct drm_plane_state *old_primary_state = 5947 drm_atomic_get_old_plane_state(state, primary); 5948 bool modeset = needs_modeset(pipe_config); 5949 struct intel_atomic_state *intel_state = 5950 to_intel_atomic_state(state); 5951 5952 if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config)) 5953 hsw_disable_ips(old_crtc_state); 5954 5955 if (old_primary_state) { 5956 struct intel_plane_state *new_primary_state = 5957 intel_atomic_get_new_plane_state(intel_state, 5958 to_intel_plane(primary)); 5959 5960 intel_fbc_pre_update(crtc, pipe_config, new_primary_state); 5961 /* 5962 * Gen2 reports pipe underruns whenever all planes are disabled. 5963 * So disable underrun reporting before all the planes get disabled. 5964 */ 5965 if (IS_GEN(dev_priv, 2) && old_primary_state->visible && 5966 (modeset || !new_primary_state->base.visible)) 5967 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 5968 } 5969 5970 /* Display WA 827 */ 5971 if (!needs_nv12_wa(dev_priv, old_crtc_state) && 5972 needs_nv12_wa(dev_priv, pipe_config)) 5973 skl_wa_827(dev_priv, crtc->pipe, true); 5974 5975 /* Wa_2006604312:icl */ 5976 if (!needs_scalerclk_wa(dev_priv, old_crtc_state) && 5977 needs_scalerclk_wa(dev_priv, pipe_config)) 5978 icl_wa_scalerclkgating(dev_priv, crtc->pipe, true); 5979 5980 /* 5981 * Vblank time updates from the shadow to live plane control register 5982 * are blocked if the memory self-refresh mode is active at that 5983 * moment. So to make sure the plane gets truly disabled, disable 5984 * first the self-refresh mode. The self-refresh enable bit in turn 5985 * will be checked/applied by the HW only at the next frame start 5986 * event which is after the vblank start event, so we need to have a 5987 * wait-for-vblank between disabling the plane and the pipe. 5988 */ 5989 if (HAS_GMCH(dev_priv) && old_crtc_state->base.active && 5990 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 5991 intel_wait_for_vblank(dev_priv, crtc->pipe); 5992 5993 /* 5994 * IVB workaround: must disable low power watermarks for at least 5995 * one frame before enabling scaling. LP watermarks can be re-enabled 5996 * when scaling is disabled. 5997 * 5998 * WaCxSRDisabledForSpriteScaling:ivb 5999 */ 6000 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) && 6001 old_crtc_state->base.active) 6002 intel_wait_for_vblank(dev_priv, crtc->pipe); 6003 6004 /* 6005 * If we're doing a modeset, we're done. No need to do any pre-vblank 6006 * watermark programming here. 6007 */ 6008 if (needs_modeset(pipe_config)) 6009 return; 6010 6011 /* 6012 * For platforms that support atomic watermarks, program the 6013 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 6014 * will be the intermediate values that are safe for both pre- and 6015 * post- vblank; when vblank happens, the 'active' values will be set 6016 * to the final 'target' values and we'll do this again to get the 6017 * optimal watermarks. For gen9+ platforms, the values we program here 6018 * will be the final target values which will get automatically latched 6019 * at vblank time; no further programming will be necessary. 6020 * 6021 * If a platform hasn't been transitioned to atomic watermarks yet, 6022 * we'll continue to update watermarks the old way, if flags tell 6023 * us to. 6024 */ 6025 if (dev_priv->display.initial_watermarks != NULL) 6026 dev_priv->display.initial_watermarks(intel_state, 6027 pipe_config); 6028 else if (pipe_config->update_wm_pre) 6029 intel_update_watermarks(crtc); 6030 } 6031 6032 static void intel_crtc_disable_planes(struct intel_atomic_state *state, 6033 struct intel_crtc *crtc) 6034 { 6035 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6036 const struct intel_crtc_state *new_crtc_state = 6037 intel_atomic_get_new_crtc_state(state, crtc); 6038 unsigned int update_mask = new_crtc_state->update_planes; 6039 const struct intel_plane_state *old_plane_state; 6040 struct intel_plane *plane; 6041 unsigned fb_bits = 0; 6042 int i; 6043 6044 intel_crtc_dpms_overlay_disable(crtc); 6045 6046 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 6047 if (crtc->pipe != plane->pipe || 6048 !(update_mask & BIT(plane->id))) 6049 continue; 6050 6051 intel_disable_plane(plane, new_crtc_state); 6052 6053 if (old_plane_state->base.visible) 6054 fb_bits |= plane->frontbuffer_bit; 6055 } 6056 6057 intel_frontbuffer_flip(dev_priv, fb_bits); 6058 } 6059 6060 /* 6061 * intel_connector_primary_encoder - get the primary encoder for a connector 6062 * @connector: connector for which to return the encoder 6063 * 6064 * Returns the primary encoder for a connector. There is a 1:1 mapping from 6065 * all connectors to their encoder, except for DP-MST connectors which have 6066 * both a virtual and a primary encoder. These DP-MST primary encoders can be 6067 * pointed to by as many DP-MST connectors as there are pipes. 6068 */ 6069 static struct intel_encoder * 6070 intel_connector_primary_encoder(struct intel_connector *connector) 6071 { 6072 struct intel_encoder *encoder; 6073 6074 if (connector->mst_port) 6075 return &dp_to_dig_port(connector->mst_port)->base; 6076 6077 encoder = intel_attached_encoder(&connector->base); 6078 WARN_ON(!encoder); 6079 6080 return encoder; 6081 } 6082 6083 static bool 6084 intel_connector_needs_modeset(struct intel_atomic_state *state, 6085 const struct drm_connector_state *old_conn_state, 6086 const struct drm_connector_state *new_conn_state) 6087 { 6088 struct intel_crtc *old_crtc = old_conn_state->crtc ? 6089 to_intel_crtc(old_conn_state->crtc) : NULL; 6090 struct intel_crtc *new_crtc = new_conn_state->crtc ? 6091 to_intel_crtc(new_conn_state->crtc) : NULL; 6092 6093 return new_crtc != old_crtc || 6094 (new_crtc && 6095 needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc))); 6096 } 6097 6098 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 6099 { 6100 struct drm_connector_state *old_conn_state; 6101 struct drm_connector_state *new_conn_state; 6102 struct drm_connector *conn; 6103 int i; 6104 6105 for_each_oldnew_connector_in_state(&state->base, conn, 6106 old_conn_state, new_conn_state, i) { 6107 struct intel_encoder *encoder; 6108 struct intel_crtc *crtc; 6109 6110 if (!intel_connector_needs_modeset(state, 6111 old_conn_state, 6112 new_conn_state)) 6113 continue; 6114 6115 encoder = intel_connector_primary_encoder(to_intel_connector(conn)); 6116 if (!encoder->update_prepare) 6117 continue; 6118 6119 crtc = new_conn_state->crtc ? 6120 to_intel_crtc(new_conn_state->crtc) : NULL; 6121 encoder->update_prepare(state, encoder, crtc); 6122 } 6123 } 6124 6125 static void intel_encoders_update_complete(struct intel_atomic_state *state) 6126 { 6127 struct drm_connector_state *old_conn_state; 6128 struct drm_connector_state *new_conn_state; 6129 struct drm_connector *conn; 6130 int i; 6131 6132 for_each_oldnew_connector_in_state(&state->base, conn, 6133 old_conn_state, new_conn_state, i) { 6134 struct intel_encoder *encoder; 6135 struct intel_crtc *crtc; 6136 6137 if (!intel_connector_needs_modeset(state, 6138 old_conn_state, 6139 new_conn_state)) 6140 continue; 6141 6142 encoder = intel_connector_primary_encoder(to_intel_connector(conn)); 6143 if (!encoder->update_complete) 6144 continue; 6145 6146 crtc = new_conn_state->crtc ? 6147 to_intel_crtc(new_conn_state->crtc) : NULL; 6148 encoder->update_complete(state, encoder, crtc); 6149 } 6150 } 6151 6152 static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc, 6153 struct intel_crtc_state *crtc_state, 6154 struct intel_atomic_state *state) 6155 { 6156 struct drm_connector_state *conn_state; 6157 struct drm_connector *conn; 6158 int i; 6159 6160 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6161 struct intel_encoder *encoder = 6162 to_intel_encoder(conn_state->best_encoder); 6163 6164 if (conn_state->crtc != &crtc->base) 6165 continue; 6166 6167 if (encoder->pre_pll_enable) 6168 encoder->pre_pll_enable(encoder, crtc_state, conn_state); 6169 } 6170 } 6171 6172 static void intel_encoders_pre_enable(struct intel_crtc *crtc, 6173 struct intel_crtc_state *crtc_state, 6174 struct intel_atomic_state *state) 6175 { 6176 struct drm_connector_state *conn_state; 6177 struct drm_connector *conn; 6178 int i; 6179 6180 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6181 struct intel_encoder *encoder = 6182 to_intel_encoder(conn_state->best_encoder); 6183 6184 if (conn_state->crtc != &crtc->base) 6185 continue; 6186 6187 if (encoder->pre_enable) 6188 encoder->pre_enable(encoder, crtc_state, conn_state); 6189 } 6190 } 6191 6192 static void intel_encoders_enable(struct intel_crtc *crtc, 6193 struct intel_crtc_state *crtc_state, 6194 struct intel_atomic_state *state) 6195 { 6196 struct drm_connector_state *conn_state; 6197 struct drm_connector *conn; 6198 int i; 6199 6200 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6201 struct intel_encoder *encoder = 6202 to_intel_encoder(conn_state->best_encoder); 6203 6204 if (conn_state->crtc != &crtc->base) 6205 continue; 6206 6207 if (encoder->enable) 6208 encoder->enable(encoder, crtc_state, conn_state); 6209 intel_opregion_notify_encoder(encoder, true); 6210 } 6211 } 6212 6213 static void intel_encoders_disable(struct intel_crtc *crtc, 6214 struct intel_crtc_state *old_crtc_state, 6215 struct intel_atomic_state *state) 6216 { 6217 struct drm_connector_state *old_conn_state; 6218 struct drm_connector *conn; 6219 int i; 6220 6221 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6222 struct intel_encoder *encoder = 6223 to_intel_encoder(old_conn_state->best_encoder); 6224 6225 if (old_conn_state->crtc != &crtc->base) 6226 continue; 6227 6228 intel_opregion_notify_encoder(encoder, false); 6229 if (encoder->disable) 6230 encoder->disable(encoder, old_crtc_state, old_conn_state); 6231 } 6232 } 6233 6234 static void intel_encoders_post_disable(struct intel_crtc *crtc, 6235 struct intel_crtc_state *old_crtc_state, 6236 struct intel_atomic_state *state) 6237 { 6238 struct drm_connector_state *old_conn_state; 6239 struct drm_connector *conn; 6240 int i; 6241 6242 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6243 struct intel_encoder *encoder = 6244 to_intel_encoder(old_conn_state->best_encoder); 6245 6246 if (old_conn_state->crtc != &crtc->base) 6247 continue; 6248 6249 if (encoder->post_disable) 6250 encoder->post_disable(encoder, old_crtc_state, old_conn_state); 6251 } 6252 } 6253 6254 static void intel_encoders_post_pll_disable(struct intel_crtc *crtc, 6255 struct intel_crtc_state *old_crtc_state, 6256 struct intel_atomic_state *state) 6257 { 6258 struct drm_connector_state *old_conn_state; 6259 struct drm_connector *conn; 6260 int i; 6261 6262 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6263 struct intel_encoder *encoder = 6264 to_intel_encoder(old_conn_state->best_encoder); 6265 6266 if (old_conn_state->crtc != &crtc->base) 6267 continue; 6268 6269 if (encoder->post_pll_disable) 6270 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state); 6271 } 6272 } 6273 6274 static void intel_encoders_update_pipe(struct intel_crtc *crtc, 6275 struct intel_crtc_state *crtc_state, 6276 struct intel_atomic_state *state) 6277 { 6278 struct drm_connector_state *conn_state; 6279 struct drm_connector *conn; 6280 int i; 6281 6282 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6283 struct intel_encoder *encoder = 6284 to_intel_encoder(conn_state->best_encoder); 6285 6286 if (conn_state->crtc != &crtc->base) 6287 continue; 6288 6289 if (encoder->update_pipe) 6290 encoder->update_pipe(encoder, crtc_state, conn_state); 6291 } 6292 } 6293 6294 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) 6295 { 6296 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 6297 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 6298 6299 plane->disable_plane(plane, crtc_state); 6300 } 6301 6302 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, 6303 struct intel_atomic_state *state) 6304 { 6305 struct drm_crtc *crtc = pipe_config->base.crtc; 6306 struct drm_device *dev = crtc->dev; 6307 struct drm_i915_private *dev_priv = to_i915(dev); 6308 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6309 int pipe = intel_crtc->pipe; 6310 6311 if (WARN_ON(intel_crtc->active)) 6312 return; 6313 6314 /* 6315 * Sometimes spurious CPU pipe underruns happen during FDI 6316 * training, at least with VGA+HDMI cloning. Suppress them. 6317 * 6318 * On ILK we get an occasional spurious CPU pipe underruns 6319 * between eDP port A enable and vdd enable. Also PCH port 6320 * enable seems to result in the occasional CPU pipe underrun. 6321 * 6322 * Spurious PCH underruns also occur during PCH enabling. 6323 */ 6324 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6325 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 6326 6327 if (pipe_config->has_pch_encoder) 6328 intel_prepare_shared_dpll(pipe_config); 6329 6330 if (intel_crtc_has_dp_encoder(pipe_config)) 6331 intel_dp_set_m_n(pipe_config, M1_N1); 6332 6333 intel_set_pipe_timings(pipe_config); 6334 intel_set_pipe_src_size(pipe_config); 6335 6336 if (pipe_config->has_pch_encoder) { 6337 intel_cpu_transcoder_set_m_n(pipe_config, 6338 &pipe_config->fdi_m_n, NULL); 6339 } 6340 6341 ironlake_set_pipeconf(pipe_config); 6342 6343 intel_crtc->active = true; 6344 6345 intel_encoders_pre_enable(intel_crtc, pipe_config, state); 6346 6347 if (pipe_config->has_pch_encoder) { 6348 /* Note: FDI PLL enabling _must_ be done before we enable the 6349 * cpu pipes, hence this is separate from all the other fdi/pch 6350 * enabling. */ 6351 ironlake_fdi_pll_enable(pipe_config); 6352 } else { 6353 assert_fdi_tx_disabled(dev_priv, pipe); 6354 assert_fdi_rx_disabled(dev_priv, pipe); 6355 } 6356 6357 ironlake_pfit_enable(pipe_config); 6358 6359 /* 6360 * On ILK+ LUT must be loaded before the pipe is running but with 6361 * clocks enabled 6362 */ 6363 intel_color_load_luts(pipe_config); 6364 intel_color_commit(pipe_config); 6365 /* update DSPCNTR to configure gamma for pipe bottom color */ 6366 intel_disable_primary_plane(pipe_config); 6367 6368 if (dev_priv->display.initial_watermarks != NULL) 6369 dev_priv->display.initial_watermarks(state, pipe_config); 6370 intel_enable_pipe(pipe_config); 6371 6372 if (pipe_config->has_pch_encoder) 6373 ironlake_pch_enable(state, pipe_config); 6374 6375 assert_vblank_disabled(crtc); 6376 intel_crtc_vblank_on(pipe_config); 6377 6378 intel_encoders_enable(intel_crtc, pipe_config, state); 6379 6380 if (HAS_PCH_CPT(dev_priv)) 6381 cpt_verify_modeset(dev, intel_crtc->pipe); 6382 6383 /* 6384 * Must wait for vblank to avoid spurious PCH FIFO underruns. 6385 * And a second vblank wait is needed at least on ILK with 6386 * some interlaced HDMI modes. Let's do the double wait always 6387 * in case there are more corner cases we don't know about. 6388 */ 6389 if (pipe_config->has_pch_encoder) { 6390 intel_wait_for_vblank(dev_priv, pipe); 6391 intel_wait_for_vblank(dev_priv, pipe); 6392 } 6393 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6394 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 6395 } 6396 6397 /* IPS only exists on ULT machines and is tied to pipe A. */ 6398 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 6399 { 6400 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A; 6401 } 6402 6403 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, 6404 enum pipe pipe, bool apply) 6405 { 6406 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe)); 6407 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 6408 6409 if (apply) 6410 val |= mask; 6411 else 6412 val &= ~mask; 6413 6414 I915_WRITE(CLKGATE_DIS_PSL(pipe), val); 6415 } 6416 6417 static void icl_pipe_mbus_enable(struct intel_crtc *crtc) 6418 { 6419 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6420 enum pipe pipe = crtc->pipe; 6421 u32 val; 6422 6423 val = MBUS_DBOX_A_CREDIT(2); 6424 6425 if (INTEL_GEN(dev_priv) >= 12) { 6426 val |= MBUS_DBOX_BW_CREDIT(2); 6427 val |= MBUS_DBOX_B_CREDIT(12); 6428 } else { 6429 val |= MBUS_DBOX_BW_CREDIT(1); 6430 val |= MBUS_DBOX_B_CREDIT(8); 6431 } 6432 6433 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val); 6434 } 6435 6436 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, 6437 struct intel_atomic_state *state) 6438 { 6439 struct drm_crtc *crtc = pipe_config->base.crtc; 6440 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 6441 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6442 int pipe = intel_crtc->pipe, hsw_workaround_pipe; 6443 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 6444 bool psl_clkgate_wa; 6445 6446 if (WARN_ON(intel_crtc->active)) 6447 return; 6448 6449 intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state); 6450 6451 if (pipe_config->shared_dpll) 6452 intel_enable_shared_dpll(pipe_config); 6453 6454 intel_encoders_pre_enable(intel_crtc, pipe_config, state); 6455 6456 if (intel_crtc_has_dp_encoder(pipe_config)) 6457 intel_dp_set_m_n(pipe_config, M1_N1); 6458 6459 if (!transcoder_is_dsi(cpu_transcoder)) 6460 intel_set_pipe_timings(pipe_config); 6461 6462 intel_set_pipe_src_size(pipe_config); 6463 6464 if (cpu_transcoder != TRANSCODER_EDP && 6465 !transcoder_is_dsi(cpu_transcoder)) { 6466 I915_WRITE(PIPE_MULT(cpu_transcoder), 6467 pipe_config->pixel_multiplier - 1); 6468 } 6469 6470 if (pipe_config->has_pch_encoder) { 6471 intel_cpu_transcoder_set_m_n(pipe_config, 6472 &pipe_config->fdi_m_n, NULL); 6473 } 6474 6475 if (!transcoder_is_dsi(cpu_transcoder)) 6476 haswell_set_pipeconf(pipe_config); 6477 6478 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 6479 bdw_set_pipemisc(pipe_config); 6480 6481 intel_crtc->active = true; 6482 6483 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */ 6484 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && 6485 pipe_config->pch_pfit.enabled; 6486 if (psl_clkgate_wa) 6487 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); 6488 6489 if (INTEL_GEN(dev_priv) >= 9) 6490 skylake_pfit_enable(pipe_config); 6491 else 6492 ironlake_pfit_enable(pipe_config); 6493 6494 /* 6495 * On ILK+ LUT must be loaded before the pipe is running but with 6496 * clocks enabled 6497 */ 6498 intel_color_load_luts(pipe_config); 6499 intel_color_commit(pipe_config); 6500 /* update DSPCNTR to configure gamma/csc for pipe bottom color */ 6501 if (INTEL_GEN(dev_priv) < 9) 6502 intel_disable_primary_plane(pipe_config); 6503 6504 if (INTEL_GEN(dev_priv) >= 11) 6505 icl_set_pipe_chicken(intel_crtc); 6506 6507 intel_ddi_set_pipe_settings(pipe_config); 6508 if (!transcoder_is_dsi(cpu_transcoder)) 6509 intel_ddi_enable_transcoder_func(pipe_config); 6510 6511 if (dev_priv->display.initial_watermarks != NULL) 6512 dev_priv->display.initial_watermarks(state, pipe_config); 6513 6514 if (INTEL_GEN(dev_priv) >= 11) 6515 icl_pipe_mbus_enable(intel_crtc); 6516 6517 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 6518 if (!transcoder_is_dsi(cpu_transcoder)) 6519 intel_enable_pipe(pipe_config); 6520 6521 if (pipe_config->has_pch_encoder) 6522 lpt_pch_enable(state, pipe_config); 6523 6524 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) 6525 intel_ddi_set_vc_payload_alloc(pipe_config, true); 6526 6527 assert_vblank_disabled(crtc); 6528 intel_crtc_vblank_on(pipe_config); 6529 6530 intel_encoders_enable(intel_crtc, pipe_config, state); 6531 6532 if (psl_clkgate_wa) { 6533 intel_wait_for_vblank(dev_priv, pipe); 6534 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); 6535 } 6536 6537 /* If we change the relative order between pipe/planes enabling, we need 6538 * to change the workaround. */ 6539 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; 6540 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 6541 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 6542 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 6543 } 6544 } 6545 6546 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state) 6547 { 6548 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 6549 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6550 enum pipe pipe = crtc->pipe; 6551 6552 /* To avoid upsetting the power well on haswell only disable the pfit if 6553 * it's in use. The hw state code will make sure we get this right. */ 6554 if (old_crtc_state->pch_pfit.enabled) { 6555 I915_WRITE(PF_CTL(pipe), 0); 6556 I915_WRITE(PF_WIN_POS(pipe), 0); 6557 I915_WRITE(PF_WIN_SZ(pipe), 0); 6558 } 6559 } 6560 6561 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, 6562 struct intel_atomic_state *state) 6563 { 6564 struct drm_crtc *crtc = old_crtc_state->base.crtc; 6565 struct drm_device *dev = crtc->dev; 6566 struct drm_i915_private *dev_priv = to_i915(dev); 6567 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6568 int pipe = intel_crtc->pipe; 6569 6570 /* 6571 * Sometimes spurious CPU pipe underruns happen when the 6572 * pipe is already disabled, but FDI RX/TX is still enabled. 6573 * Happens at least with VGA+HDMI cloning. Suppress them. 6574 */ 6575 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6576 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 6577 6578 intel_encoders_disable(intel_crtc, old_crtc_state, state); 6579 6580 drm_crtc_vblank_off(crtc); 6581 assert_vblank_disabled(crtc); 6582 6583 intel_disable_pipe(old_crtc_state); 6584 6585 ironlake_pfit_disable(old_crtc_state); 6586 6587 if (old_crtc_state->has_pch_encoder) 6588 ironlake_fdi_disable(crtc); 6589 6590 intel_encoders_post_disable(intel_crtc, old_crtc_state, state); 6591 6592 if (old_crtc_state->has_pch_encoder) { 6593 ironlake_disable_pch_transcoder(dev_priv, pipe); 6594 6595 if (HAS_PCH_CPT(dev_priv)) { 6596 i915_reg_t reg; 6597 u32 temp; 6598 6599 /* disable TRANS_DP_CTL */ 6600 reg = TRANS_DP_CTL(pipe); 6601 temp = I915_READ(reg); 6602 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 6603 TRANS_DP_PORT_SEL_MASK); 6604 temp |= TRANS_DP_PORT_SEL_NONE; 6605 I915_WRITE(reg, temp); 6606 6607 /* disable DPLL_SEL */ 6608 temp = I915_READ(PCH_DPLL_SEL); 6609 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 6610 I915_WRITE(PCH_DPLL_SEL, temp); 6611 } 6612 6613 ironlake_fdi_pll_disable(intel_crtc); 6614 } 6615 6616 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6617 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 6618 } 6619 6620 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, 6621 struct intel_atomic_state *state) 6622 { 6623 struct drm_crtc *crtc = old_crtc_state->base.crtc; 6624 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 6625 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6626 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 6627 6628 intel_encoders_disable(intel_crtc, old_crtc_state, state); 6629 6630 drm_crtc_vblank_off(crtc); 6631 assert_vblank_disabled(crtc); 6632 6633 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 6634 if (!transcoder_is_dsi(cpu_transcoder)) 6635 intel_disable_pipe(old_crtc_state); 6636 6637 if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) 6638 intel_ddi_set_vc_payload_alloc(old_crtc_state, false); 6639 6640 if (!transcoder_is_dsi(cpu_transcoder)) 6641 intel_ddi_disable_transcoder_func(old_crtc_state); 6642 6643 intel_dsc_disable(old_crtc_state); 6644 6645 if (INTEL_GEN(dev_priv) >= 9) 6646 skylake_scaler_disable(intel_crtc); 6647 else 6648 ironlake_pfit_disable(old_crtc_state); 6649 6650 intel_encoders_post_disable(intel_crtc, old_crtc_state, state); 6651 6652 intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state); 6653 } 6654 6655 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) 6656 { 6657 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 6658 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6659 6660 if (!crtc_state->gmch_pfit.control) 6661 return; 6662 6663 /* 6664 * The panel fitter should only be adjusted whilst the pipe is disabled, 6665 * according to register description and PRM. 6666 */ 6667 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 6668 assert_pipe_disabled(dev_priv, crtc->pipe); 6669 6670 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios); 6671 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control); 6672 6673 /* Border color in case we don't scale up to the full screen. Black by 6674 * default, change to something else for debugging. */ 6675 I915_WRITE(BCLRPAT(crtc->pipe), 0); 6676 } 6677 6678 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) 6679 { 6680 if (phy == PHY_NONE) 6681 return false; 6682 6683 if (IS_ELKHARTLAKE(dev_priv)) 6684 return phy <= PHY_C; 6685 6686 if (INTEL_GEN(dev_priv) >= 11) 6687 return phy <= PHY_B; 6688 6689 return false; 6690 } 6691 6692 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) 6693 { 6694 if (INTEL_GEN(dev_priv) >= 12) 6695 return phy >= PHY_D && phy <= PHY_I; 6696 6697 if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv)) 6698 return phy >= PHY_C && phy <= PHY_F; 6699 6700 return false; 6701 } 6702 6703 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) 6704 { 6705 if (IS_ELKHARTLAKE(i915) && port == PORT_D) 6706 return PHY_A; 6707 6708 return (enum phy)port; 6709 } 6710 6711 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) 6712 { 6713 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) 6714 return PORT_TC_NONE; 6715 6716 if (INTEL_GEN(dev_priv) >= 12) 6717 return port - PORT_D; 6718 6719 return port - PORT_C; 6720 } 6721 6722 enum intel_display_power_domain intel_port_to_power_domain(enum port port) 6723 { 6724 switch (port) { 6725 case PORT_A: 6726 return POWER_DOMAIN_PORT_DDI_A_LANES; 6727 case PORT_B: 6728 return POWER_DOMAIN_PORT_DDI_B_LANES; 6729 case PORT_C: 6730 return POWER_DOMAIN_PORT_DDI_C_LANES; 6731 case PORT_D: 6732 return POWER_DOMAIN_PORT_DDI_D_LANES; 6733 case PORT_E: 6734 return POWER_DOMAIN_PORT_DDI_E_LANES; 6735 case PORT_F: 6736 return POWER_DOMAIN_PORT_DDI_F_LANES; 6737 default: 6738 MISSING_CASE(port); 6739 return POWER_DOMAIN_PORT_OTHER; 6740 } 6741 } 6742 6743 enum intel_display_power_domain 6744 intel_aux_power_domain(struct intel_digital_port *dig_port) 6745 { 6746 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 6747 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 6748 6749 if (intel_phy_is_tc(dev_priv, phy) && 6750 dig_port->tc_mode == TC_PORT_TBT_ALT) { 6751 switch (dig_port->aux_ch) { 6752 case AUX_CH_C: 6753 return POWER_DOMAIN_AUX_TBT1; 6754 case AUX_CH_D: 6755 return POWER_DOMAIN_AUX_TBT2; 6756 case AUX_CH_E: 6757 return POWER_DOMAIN_AUX_TBT3; 6758 case AUX_CH_F: 6759 return POWER_DOMAIN_AUX_TBT4; 6760 default: 6761 MISSING_CASE(dig_port->aux_ch); 6762 return POWER_DOMAIN_AUX_TBT1; 6763 } 6764 } 6765 6766 switch (dig_port->aux_ch) { 6767 case AUX_CH_A: 6768 return POWER_DOMAIN_AUX_A; 6769 case AUX_CH_B: 6770 return POWER_DOMAIN_AUX_B; 6771 case AUX_CH_C: 6772 return POWER_DOMAIN_AUX_C; 6773 case AUX_CH_D: 6774 return POWER_DOMAIN_AUX_D; 6775 case AUX_CH_E: 6776 return POWER_DOMAIN_AUX_E; 6777 case AUX_CH_F: 6778 return POWER_DOMAIN_AUX_F; 6779 default: 6780 MISSING_CASE(dig_port->aux_ch); 6781 return POWER_DOMAIN_AUX_A; 6782 } 6783 } 6784 6785 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) 6786 { 6787 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 6788 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6789 struct drm_encoder *encoder; 6790 enum pipe pipe = crtc->pipe; 6791 u64 mask; 6792 enum transcoder transcoder = crtc_state->cpu_transcoder; 6793 6794 if (!crtc_state->base.active) 6795 return 0; 6796 6797 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe)); 6798 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder)); 6799 if (crtc_state->pch_pfit.enabled || 6800 crtc_state->pch_pfit.force_thru) 6801 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 6802 6803 drm_for_each_encoder_mask(encoder, &dev_priv->drm, 6804 crtc_state->base.encoder_mask) { 6805 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 6806 6807 mask |= BIT_ULL(intel_encoder->power_domain); 6808 } 6809 6810 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 6811 mask |= BIT_ULL(POWER_DOMAIN_AUDIO); 6812 6813 if (crtc_state->shared_dpll) 6814 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE); 6815 6816 return mask; 6817 } 6818 6819 static u64 6820 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state) 6821 { 6822 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 6823 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6824 enum intel_display_power_domain domain; 6825 u64 domains, new_domains, old_domains; 6826 6827 old_domains = crtc->enabled_power_domains; 6828 crtc->enabled_power_domains = new_domains = 6829 get_crtc_power_domains(crtc_state); 6830 6831 domains = new_domains & ~old_domains; 6832 6833 for_each_power_domain(domain, domains) 6834 intel_display_power_get(dev_priv, domain); 6835 6836 return old_domains & ~new_domains; 6837 } 6838 6839 static void modeset_put_power_domains(struct drm_i915_private *dev_priv, 6840 u64 domains) 6841 { 6842 enum intel_display_power_domain domain; 6843 6844 for_each_power_domain(domain, domains) 6845 intel_display_power_put_unchecked(dev_priv, domain); 6846 } 6847 6848 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, 6849 struct intel_atomic_state *state) 6850 { 6851 struct drm_crtc *crtc = pipe_config->base.crtc; 6852 struct drm_device *dev = crtc->dev; 6853 struct drm_i915_private *dev_priv = to_i915(dev); 6854 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6855 int pipe = intel_crtc->pipe; 6856 6857 if (WARN_ON(intel_crtc->active)) 6858 return; 6859 6860 if (intel_crtc_has_dp_encoder(pipe_config)) 6861 intel_dp_set_m_n(pipe_config, M1_N1); 6862 6863 intel_set_pipe_timings(pipe_config); 6864 intel_set_pipe_src_size(pipe_config); 6865 6866 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 6867 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 6868 I915_WRITE(CHV_CANVAS(pipe), 0); 6869 } 6870 6871 i9xx_set_pipeconf(pipe_config); 6872 6873 intel_crtc->active = true; 6874 6875 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6876 6877 intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state); 6878 6879 if (IS_CHERRYVIEW(dev_priv)) { 6880 chv_prepare_pll(intel_crtc, pipe_config); 6881 chv_enable_pll(intel_crtc, pipe_config); 6882 } else { 6883 vlv_prepare_pll(intel_crtc, pipe_config); 6884 vlv_enable_pll(intel_crtc, pipe_config); 6885 } 6886 6887 intel_encoders_pre_enable(intel_crtc, pipe_config, state); 6888 6889 i9xx_pfit_enable(pipe_config); 6890 6891 intel_color_load_luts(pipe_config); 6892 intel_color_commit(pipe_config); 6893 /* update DSPCNTR to configure gamma for pipe bottom color */ 6894 intel_disable_primary_plane(pipe_config); 6895 6896 dev_priv->display.initial_watermarks(state, pipe_config); 6897 intel_enable_pipe(pipe_config); 6898 6899 assert_vblank_disabled(crtc); 6900 intel_crtc_vblank_on(pipe_config); 6901 6902 intel_encoders_enable(intel_crtc, pipe_config, state); 6903 } 6904 6905 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state) 6906 { 6907 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 6908 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6909 6910 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0); 6911 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1); 6912 } 6913 6914 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, 6915 struct intel_atomic_state *state) 6916 { 6917 struct drm_crtc *crtc = pipe_config->base.crtc; 6918 struct drm_device *dev = crtc->dev; 6919 struct drm_i915_private *dev_priv = to_i915(dev); 6920 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6921 enum pipe pipe = intel_crtc->pipe; 6922 6923 if (WARN_ON(intel_crtc->active)) 6924 return; 6925 6926 i9xx_set_pll_dividers(pipe_config); 6927 6928 if (intel_crtc_has_dp_encoder(pipe_config)) 6929 intel_dp_set_m_n(pipe_config, M1_N1); 6930 6931 intel_set_pipe_timings(pipe_config); 6932 intel_set_pipe_src_size(pipe_config); 6933 6934 i9xx_set_pipeconf(pipe_config); 6935 6936 intel_crtc->active = true; 6937 6938 if (!IS_GEN(dev_priv, 2)) 6939 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6940 6941 intel_encoders_pre_enable(intel_crtc, pipe_config, state); 6942 6943 i9xx_enable_pll(intel_crtc, pipe_config); 6944 6945 i9xx_pfit_enable(pipe_config); 6946 6947 intel_color_load_luts(pipe_config); 6948 intel_color_commit(pipe_config); 6949 /* update DSPCNTR to configure gamma for pipe bottom color */ 6950 intel_disable_primary_plane(pipe_config); 6951 6952 if (dev_priv->display.initial_watermarks != NULL) 6953 dev_priv->display.initial_watermarks(state, 6954 pipe_config); 6955 else 6956 intel_update_watermarks(intel_crtc); 6957 intel_enable_pipe(pipe_config); 6958 6959 assert_vblank_disabled(crtc); 6960 intel_crtc_vblank_on(pipe_config); 6961 6962 intel_encoders_enable(intel_crtc, pipe_config, state); 6963 } 6964 6965 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) 6966 { 6967 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 6968 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6969 6970 if (!old_crtc_state->gmch_pfit.control) 6971 return; 6972 6973 assert_pipe_disabled(dev_priv, crtc->pipe); 6974 6975 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n", 6976 I915_READ(PFIT_CONTROL)); 6977 I915_WRITE(PFIT_CONTROL, 0); 6978 } 6979 6980 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, 6981 struct intel_atomic_state *state) 6982 { 6983 struct drm_crtc *crtc = old_crtc_state->base.crtc; 6984 struct drm_device *dev = crtc->dev; 6985 struct drm_i915_private *dev_priv = to_i915(dev); 6986 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6987 int pipe = intel_crtc->pipe; 6988 6989 /* 6990 * On gen2 planes are double buffered but the pipe isn't, so we must 6991 * wait for planes to fully turn off before disabling the pipe. 6992 */ 6993 if (IS_GEN(dev_priv, 2)) 6994 intel_wait_for_vblank(dev_priv, pipe); 6995 6996 intel_encoders_disable(intel_crtc, old_crtc_state, state); 6997 6998 drm_crtc_vblank_off(crtc); 6999 assert_vblank_disabled(crtc); 7000 7001 intel_disable_pipe(old_crtc_state); 7002 7003 i9xx_pfit_disable(old_crtc_state); 7004 7005 intel_encoders_post_disable(intel_crtc, old_crtc_state, state); 7006 7007 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 7008 if (IS_CHERRYVIEW(dev_priv)) 7009 chv_disable_pll(dev_priv, pipe); 7010 else if (IS_VALLEYVIEW(dev_priv)) 7011 vlv_disable_pll(dev_priv, pipe); 7012 else 7013 i9xx_disable_pll(old_crtc_state); 7014 } 7015 7016 intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state); 7017 7018 if (!IS_GEN(dev_priv, 2)) 7019 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 7020 7021 if (!dev_priv->display.initial_watermarks) 7022 intel_update_watermarks(intel_crtc); 7023 7024 /* clock the pipe down to 640x480@60 to potentially save power */ 7025 if (IS_I830(dev_priv)) 7026 i830_enable_pipe(dev_priv, pipe); 7027 } 7028 7029 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, 7030 struct drm_modeset_acquire_ctx *ctx) 7031 { 7032 struct intel_encoder *encoder; 7033 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7034 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 7035 struct intel_bw_state *bw_state = 7036 to_intel_bw_state(dev_priv->bw_obj.state); 7037 enum intel_display_power_domain domain; 7038 struct intel_plane *plane; 7039 u64 domains; 7040 struct drm_atomic_state *state; 7041 struct intel_crtc_state *crtc_state; 7042 int ret; 7043 7044 if (!intel_crtc->active) 7045 return; 7046 7047 for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) { 7048 const struct intel_plane_state *plane_state = 7049 to_intel_plane_state(plane->base.state); 7050 7051 if (plane_state->base.visible) 7052 intel_plane_disable_noatomic(intel_crtc, plane); 7053 } 7054 7055 state = drm_atomic_state_alloc(crtc->dev); 7056 if (!state) { 7057 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory", 7058 crtc->base.id, crtc->name); 7059 return; 7060 } 7061 7062 state->acquire_ctx = ctx; 7063 7064 /* Everything's already locked, -EDEADLK can't happen. */ 7065 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 7066 ret = drm_atomic_add_affected_connectors(state, crtc); 7067 7068 WARN_ON(IS_ERR(crtc_state) || ret); 7069 7070 dev_priv->display.crtc_disable(crtc_state, to_intel_atomic_state(state)); 7071 7072 drm_atomic_state_put(state); 7073 7074 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", 7075 crtc->base.id, crtc->name); 7076 7077 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0); 7078 crtc->state->active = false; 7079 intel_crtc->active = false; 7080 crtc->enabled = false; 7081 crtc->state->connector_mask = 0; 7082 crtc->state->encoder_mask = 0; 7083 7084 for_each_encoder_on_crtc(crtc->dev, crtc, encoder) 7085 encoder->base.crtc = NULL; 7086 7087 intel_fbc_disable(intel_crtc); 7088 intel_update_watermarks(intel_crtc); 7089 intel_disable_shared_dpll(to_intel_crtc_state(crtc->state)); 7090 7091 domains = intel_crtc->enabled_power_domains; 7092 for_each_power_domain(domain, domains) 7093 intel_display_power_put_unchecked(dev_priv, domain); 7094 intel_crtc->enabled_power_domains = 0; 7095 7096 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe); 7097 dev_priv->min_cdclk[intel_crtc->pipe] = 0; 7098 dev_priv->min_voltage_level[intel_crtc->pipe] = 0; 7099 7100 bw_state->data_rate[intel_crtc->pipe] = 0; 7101 bw_state->num_active_planes[intel_crtc->pipe] = 0; 7102 } 7103 7104 /* 7105 * turn all crtc's off, but do not adjust state 7106 * This has to be paired with a call to intel_modeset_setup_hw_state. 7107 */ 7108 int intel_display_suspend(struct drm_device *dev) 7109 { 7110 struct drm_i915_private *dev_priv = to_i915(dev); 7111 struct drm_atomic_state *state; 7112 int ret; 7113 7114 state = drm_atomic_helper_suspend(dev); 7115 ret = PTR_ERR_OR_ZERO(state); 7116 if (ret) 7117 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 7118 else 7119 dev_priv->modeset_restore_state = state; 7120 return ret; 7121 } 7122 7123 void intel_encoder_destroy(struct drm_encoder *encoder) 7124 { 7125 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 7126 7127 drm_encoder_cleanup(encoder); 7128 kfree(intel_encoder); 7129 } 7130 7131 /* Cross check the actual hw state with our own modeset state tracking (and it's 7132 * internal consistency). */ 7133 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, 7134 struct drm_connector_state *conn_state) 7135 { 7136 struct intel_connector *connector = to_intel_connector(conn_state->connector); 7137 7138 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 7139 connector->base.base.id, 7140 connector->base.name); 7141 7142 if (connector->get_hw_state(connector)) { 7143 struct intel_encoder *encoder = connector->encoder; 7144 7145 I915_STATE_WARN(!crtc_state, 7146 "connector enabled without attached crtc\n"); 7147 7148 if (!crtc_state) 7149 return; 7150 7151 I915_STATE_WARN(!crtc_state->base.active, 7152 "connector is active, but attached crtc isn't\n"); 7153 7154 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 7155 return; 7156 7157 I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 7158 "atomic encoder doesn't match attached encoder\n"); 7159 7160 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 7161 "attached encoder crtc differs from connector crtc\n"); 7162 } else { 7163 I915_STATE_WARN(crtc_state && crtc_state->base.active, 7164 "attached crtc is active, but connector isn't\n"); 7165 I915_STATE_WARN(!crtc_state && conn_state->best_encoder, 7166 "best encoder set without crtc!\n"); 7167 } 7168 } 7169 7170 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 7171 { 7172 if (crtc_state->base.enable && crtc_state->has_pch_encoder) 7173 return crtc_state->fdi_lanes; 7174 7175 return 0; 7176 } 7177 7178 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, 7179 struct intel_crtc_state *pipe_config) 7180 { 7181 struct drm_i915_private *dev_priv = to_i915(dev); 7182 struct drm_atomic_state *state = pipe_config->base.state; 7183 struct intel_crtc *other_crtc; 7184 struct intel_crtc_state *other_crtc_state; 7185 7186 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", 7187 pipe_name(pipe), pipe_config->fdi_lanes); 7188 if (pipe_config->fdi_lanes > 4) { 7189 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", 7190 pipe_name(pipe), pipe_config->fdi_lanes); 7191 return -EINVAL; 7192 } 7193 7194 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 7195 if (pipe_config->fdi_lanes > 2) { 7196 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 7197 pipe_config->fdi_lanes); 7198 return -EINVAL; 7199 } else { 7200 return 0; 7201 } 7202 } 7203 7204 if (INTEL_INFO(dev_priv)->num_pipes == 2) 7205 return 0; 7206 7207 /* Ivybridge 3 pipe is really complicated */ 7208 switch (pipe) { 7209 case PIPE_A: 7210 return 0; 7211 case PIPE_B: 7212 if (pipe_config->fdi_lanes <= 2) 7213 return 0; 7214 7215 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C); 7216 other_crtc_state = 7217 intel_atomic_get_crtc_state(state, other_crtc); 7218 if (IS_ERR(other_crtc_state)) 7219 return PTR_ERR(other_crtc_state); 7220 7221 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 7222 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 7223 pipe_name(pipe), pipe_config->fdi_lanes); 7224 return -EINVAL; 7225 } 7226 return 0; 7227 case PIPE_C: 7228 if (pipe_config->fdi_lanes > 2) { 7229 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", 7230 pipe_name(pipe), pipe_config->fdi_lanes); 7231 return -EINVAL; 7232 } 7233 7234 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B); 7235 other_crtc_state = 7236 intel_atomic_get_crtc_state(state, other_crtc); 7237 if (IS_ERR(other_crtc_state)) 7238 return PTR_ERR(other_crtc_state); 7239 7240 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 7241 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 7242 return -EINVAL; 7243 } 7244 return 0; 7245 default: 7246 BUG(); 7247 } 7248 } 7249 7250 #define RETRY 1 7251 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, 7252 struct intel_crtc_state *pipe_config) 7253 { 7254 struct drm_device *dev = intel_crtc->base.dev; 7255 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 7256 int lane, link_bw, fdi_dotclock, ret; 7257 bool needs_recompute = false; 7258 7259 retry: 7260 /* FDI is a binary signal running at ~2.7GHz, encoding 7261 * each output octet as 10 bits. The actual frequency 7262 * is stored as a divider into a 100MHz clock, and the 7263 * mode pixel clock is stored in units of 1KHz. 7264 * Hence the bw of each lane in terms of the mode signal 7265 * is: 7266 */ 7267 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config); 7268 7269 fdi_dotclock = adjusted_mode->crtc_clock; 7270 7271 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 7272 pipe_config->pipe_bpp); 7273 7274 pipe_config->fdi_lanes = lane; 7275 7276 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 7277 link_bw, &pipe_config->fdi_m_n, false, false); 7278 7279 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 7280 if (ret == -EDEADLK) 7281 return ret; 7282 7283 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 7284 pipe_config->pipe_bpp -= 2*3; 7285 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 7286 pipe_config->pipe_bpp); 7287 needs_recompute = true; 7288 pipe_config->bw_constrained = true; 7289 7290 goto retry; 7291 } 7292 7293 if (needs_recompute) 7294 return RETRY; 7295 7296 return ret; 7297 } 7298 7299 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) 7300 { 7301 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 7302 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7303 7304 /* IPS only exists on ULT machines and is tied to pipe A. */ 7305 if (!hsw_crtc_supports_ips(crtc)) 7306 return false; 7307 7308 if (!i915_modparams.enable_ips) 7309 return false; 7310 7311 if (crtc_state->pipe_bpp > 24) 7312 return false; 7313 7314 /* 7315 * We compare against max which means we must take 7316 * the increased cdclk requirement into account when 7317 * calculating the new cdclk. 7318 * 7319 * Should measure whether using a lower cdclk w/o IPS 7320 */ 7321 if (IS_BROADWELL(dev_priv) && 7322 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100) 7323 return false; 7324 7325 return true; 7326 } 7327 7328 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state) 7329 { 7330 struct drm_i915_private *dev_priv = 7331 to_i915(crtc_state->base.crtc->dev); 7332 struct intel_atomic_state *intel_state = 7333 to_intel_atomic_state(crtc_state->base.state); 7334 7335 if (!hsw_crtc_state_ips_capable(crtc_state)) 7336 return false; 7337 7338 /* 7339 * When IPS gets enabled, the pipe CRC changes. Since IPS gets 7340 * enabled and disabled dynamically based on package C states, 7341 * user space can't make reliable use of the CRCs, so let's just 7342 * completely disable it. 7343 */ 7344 if (crtc_state->crc_enabled) 7345 return false; 7346 7347 /* IPS should be fine as long as at least one plane is enabled. */ 7348 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))) 7349 return false; 7350 7351 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 7352 if (IS_BROADWELL(dev_priv) && 7353 crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100) 7354 return false; 7355 7356 return true; 7357 } 7358 7359 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 7360 { 7361 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7362 7363 /* GDG double wide on either pipe, otherwise pipe A only */ 7364 return INTEL_GEN(dev_priv) < 4 && 7365 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 7366 } 7367 7368 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) 7369 { 7370 u32 pixel_rate; 7371 7372 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock; 7373 7374 /* 7375 * We only use IF-ID interlacing. If we ever use 7376 * PF-ID we'll need to adjust the pixel_rate here. 7377 */ 7378 7379 if (pipe_config->pch_pfit.enabled) { 7380 u64 pipe_w, pipe_h, pfit_w, pfit_h; 7381 u32 pfit_size = pipe_config->pch_pfit.size; 7382 7383 pipe_w = pipe_config->pipe_src_w; 7384 pipe_h = pipe_config->pipe_src_h; 7385 7386 pfit_w = (pfit_size >> 16) & 0xFFFF; 7387 pfit_h = pfit_size & 0xFFFF; 7388 if (pipe_w < pfit_w) 7389 pipe_w = pfit_w; 7390 if (pipe_h < pfit_h) 7391 pipe_h = pfit_h; 7392 7393 if (WARN_ON(!pfit_w || !pfit_h)) 7394 return pixel_rate; 7395 7396 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h), 7397 pfit_w * pfit_h); 7398 } 7399 7400 return pixel_rate; 7401 } 7402 7403 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 7404 { 7405 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 7406 7407 if (HAS_GMCH(dev_priv)) 7408 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 7409 crtc_state->pixel_rate = 7410 crtc_state->base.adjusted_mode.crtc_clock; 7411 else 7412 crtc_state->pixel_rate = 7413 ilk_pipe_pixel_rate(crtc_state); 7414 } 7415 7416 static int intel_crtc_compute_config(struct intel_crtc *crtc, 7417 struct intel_crtc_state *pipe_config) 7418 { 7419 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7420 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 7421 int clock_limit = dev_priv->max_dotclk_freq; 7422 7423 if (INTEL_GEN(dev_priv) < 4) { 7424 clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 7425 7426 /* 7427 * Enable double wide mode when the dot clock 7428 * is > 90% of the (display) core speed. 7429 */ 7430 if (intel_crtc_supports_double_wide(crtc) && 7431 adjusted_mode->crtc_clock > clock_limit) { 7432 clock_limit = dev_priv->max_dotclk_freq; 7433 pipe_config->double_wide = true; 7434 } 7435 } 7436 7437 if (adjusted_mode->crtc_clock > clock_limit) { 7438 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 7439 adjusted_mode->crtc_clock, clock_limit, 7440 yesno(pipe_config->double_wide)); 7441 return -EINVAL; 7442 } 7443 7444 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 7445 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) && 7446 pipe_config->base.ctm) { 7447 /* 7448 * There is only one pipe CSC unit per pipe, and we need that 7449 * for output conversion from RGB->YCBCR. So if CTM is already 7450 * applied we can't support YCBCR420 output. 7451 */ 7452 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n"); 7453 return -EINVAL; 7454 } 7455 7456 /* 7457 * Pipe horizontal size must be even in: 7458 * - DVO ganged mode 7459 * - LVDS dual channel mode 7460 * - Double wide pipe 7461 */ 7462 if (pipe_config->pipe_src_w & 1) { 7463 if (pipe_config->double_wide) { 7464 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n"); 7465 return -EINVAL; 7466 } 7467 7468 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && 7469 intel_is_dual_link_lvds(dev_priv)) { 7470 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n"); 7471 return -EINVAL; 7472 } 7473 } 7474 7475 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 7476 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 7477 */ 7478 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) && 7479 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay) 7480 return -EINVAL; 7481 7482 intel_crtc_compute_pixel_rate(pipe_config); 7483 7484 if (pipe_config->has_pch_encoder) 7485 return ironlake_fdi_compute_config(crtc, pipe_config); 7486 7487 return 0; 7488 } 7489 7490 static void 7491 intel_reduce_m_n_ratio(u32 *num, u32 *den) 7492 { 7493 while (*num > DATA_LINK_M_N_MASK || 7494 *den > DATA_LINK_M_N_MASK) { 7495 *num >>= 1; 7496 *den >>= 1; 7497 } 7498 } 7499 7500 static void compute_m_n(unsigned int m, unsigned int n, 7501 u32 *ret_m, u32 *ret_n, 7502 bool constant_n) 7503 { 7504 /* 7505 * Several DP dongles in particular seem to be fussy about 7506 * too large link M/N values. Give N value as 0x8000 that 7507 * should be acceptable by specific devices. 0x8000 is the 7508 * specified fixed N value for asynchronous clock mode, 7509 * which the devices expect also in synchronous clock mode. 7510 */ 7511 if (constant_n) 7512 *ret_n = 0x8000; 7513 else 7514 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 7515 7516 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 7517 intel_reduce_m_n_ratio(ret_m, ret_n); 7518 } 7519 7520 void 7521 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, 7522 int pixel_clock, int link_clock, 7523 struct intel_link_m_n *m_n, 7524 bool constant_n, bool fec_enable) 7525 { 7526 u32 data_clock = bits_per_pixel * pixel_clock; 7527 7528 if (fec_enable) 7529 data_clock = intel_dp_mode_to_fec_clock(data_clock); 7530 7531 m_n->tu = 64; 7532 compute_m_n(data_clock, 7533 link_clock * nlanes * 8, 7534 &m_n->gmch_m, &m_n->gmch_n, 7535 constant_n); 7536 7537 compute_m_n(pixel_clock, link_clock, 7538 &m_n->link_m, &m_n->link_n, 7539 constant_n); 7540 } 7541 7542 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 7543 { 7544 if (i915_modparams.panel_use_ssc >= 0) 7545 return i915_modparams.panel_use_ssc != 0; 7546 return dev_priv->vbt.lvds_use_ssc 7547 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 7548 } 7549 7550 static u32 pnv_dpll_compute_fp(struct dpll *dpll) 7551 { 7552 return (1 << dpll->n) << 16 | dpll->m2; 7553 } 7554 7555 static u32 i9xx_dpll_compute_fp(struct dpll *dpll) 7556 { 7557 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 7558 } 7559 7560 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 7561 struct intel_crtc_state *crtc_state, 7562 struct dpll *reduced_clock) 7563 { 7564 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7565 u32 fp, fp2 = 0; 7566 7567 if (IS_PINEVIEW(dev_priv)) { 7568 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 7569 if (reduced_clock) 7570 fp2 = pnv_dpll_compute_fp(reduced_clock); 7571 } else { 7572 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 7573 if (reduced_clock) 7574 fp2 = i9xx_dpll_compute_fp(reduced_clock); 7575 } 7576 7577 crtc_state->dpll_hw_state.fp0 = fp; 7578 7579 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 7580 reduced_clock) { 7581 crtc_state->dpll_hw_state.fp1 = fp2; 7582 } else { 7583 crtc_state->dpll_hw_state.fp1 = fp; 7584 } 7585 } 7586 7587 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe 7588 pipe) 7589 { 7590 u32 reg_val; 7591 7592 /* 7593 * PLLB opamp always calibrates to max value of 0x3f, force enable it 7594 * and set it to a reasonable value instead. 7595 */ 7596 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 7597 reg_val &= 0xffffff00; 7598 reg_val |= 0x00000030; 7599 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 7600 7601 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 7602 reg_val &= 0x00ffffff; 7603 reg_val |= 0x8c000000; 7604 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 7605 7606 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 7607 reg_val &= 0xffffff00; 7608 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 7609 7610 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 7611 reg_val &= 0x00ffffff; 7612 reg_val |= 0xb0000000; 7613 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 7614 } 7615 7616 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 7617 const struct intel_link_m_n *m_n) 7618 { 7619 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 7620 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7621 enum pipe pipe = crtc->pipe; 7622 7623 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7624 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 7625 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); 7626 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 7627 } 7628 7629 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, 7630 enum transcoder transcoder) 7631 { 7632 if (IS_HASWELL(dev_priv)) 7633 return transcoder == TRANSCODER_EDP; 7634 7635 /* 7636 * Strictly speaking some registers are available before 7637 * gen7, but we only support DRRS on gen7+ 7638 */ 7639 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv); 7640 } 7641 7642 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 7643 const struct intel_link_m_n *m_n, 7644 const struct intel_link_m_n *m2_n2) 7645 { 7646 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 7647 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7648 enum pipe pipe = crtc->pipe; 7649 enum transcoder transcoder = crtc_state->cpu_transcoder; 7650 7651 if (INTEL_GEN(dev_priv) >= 5) { 7652 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 7653 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 7654 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 7655 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 7656 /* 7657 * M2_N2 registers are set only if DRRS is supported 7658 * (to make sure the registers are not unnecessarily accessed). 7659 */ 7660 if (m2_n2 && crtc_state->has_drrs && 7661 transcoder_has_m2_n2(dev_priv, transcoder)) { 7662 I915_WRITE(PIPE_DATA_M2(transcoder), 7663 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 7664 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 7665 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); 7666 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); 7667 } 7668 } else { 7669 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7670 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 7671 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); 7672 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); 7673 } 7674 } 7675 7676 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n) 7677 { 7678 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 7679 7680 if (m_n == M1_N1) { 7681 dp_m_n = &crtc_state->dp_m_n; 7682 dp_m2_n2 = &crtc_state->dp_m2_n2; 7683 } else if (m_n == M2_N2) { 7684 7685 /* 7686 * M2_N2 registers are not supported. Hence m2_n2 divider value 7687 * needs to be programmed into M1_N1. 7688 */ 7689 dp_m_n = &crtc_state->dp_m2_n2; 7690 } else { 7691 DRM_ERROR("Unsupported divider value\n"); 7692 return; 7693 } 7694 7695 if (crtc_state->has_pch_encoder) 7696 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n); 7697 else 7698 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2); 7699 } 7700 7701 static void vlv_compute_dpll(struct intel_crtc *crtc, 7702 struct intel_crtc_state *pipe_config) 7703 { 7704 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | 7705 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 7706 if (crtc->pipe != PIPE_A) 7707 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7708 7709 /* DPLL not used with DSI, but still need the rest set up */ 7710 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 7711 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | 7712 DPLL_EXT_BUFFER_ENABLE_VLV; 7713 7714 pipe_config->dpll_hw_state.dpll_md = 7715 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7716 } 7717 7718 static void chv_compute_dpll(struct intel_crtc *crtc, 7719 struct intel_crtc_state *pipe_config) 7720 { 7721 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | 7722 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 7723 if (crtc->pipe != PIPE_A) 7724 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7725 7726 /* DPLL not used with DSI, but still need the rest set up */ 7727 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 7728 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; 7729 7730 pipe_config->dpll_hw_state.dpll_md = 7731 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7732 } 7733 7734 static void vlv_prepare_pll(struct intel_crtc *crtc, 7735 const struct intel_crtc_state *pipe_config) 7736 { 7737 struct drm_device *dev = crtc->base.dev; 7738 struct drm_i915_private *dev_priv = to_i915(dev); 7739 enum pipe pipe = crtc->pipe; 7740 u32 mdiv; 7741 u32 bestn, bestm1, bestm2, bestp1, bestp2; 7742 u32 coreclk, reg_val; 7743 7744 /* Enable Refclk */ 7745 I915_WRITE(DPLL(pipe), 7746 pipe_config->dpll_hw_state.dpll & 7747 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); 7748 7749 /* No need to actually set up the DPLL with DSI */ 7750 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7751 return; 7752 7753 vlv_dpio_get(dev_priv); 7754 7755 bestn = pipe_config->dpll.n; 7756 bestm1 = pipe_config->dpll.m1; 7757 bestm2 = pipe_config->dpll.m2; 7758 bestp1 = pipe_config->dpll.p1; 7759 bestp2 = pipe_config->dpll.p2; 7760 7761 /* See eDP HDMI DPIO driver vbios notes doc */ 7762 7763 /* PLL B needs special handling */ 7764 if (pipe == PIPE_B) 7765 vlv_pllb_recal_opamp(dev_priv, pipe); 7766 7767 /* Set up Tx target for periodic Rcomp update */ 7768 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 7769 7770 /* Disable target IRef on PLL */ 7771 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 7772 reg_val &= 0x00ffffff; 7773 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 7774 7775 /* Disable fast lock */ 7776 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 7777 7778 /* Set idtafcrecal before PLL is enabled */ 7779 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 7780 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 7781 mdiv |= ((bestn << DPIO_N_SHIFT)); 7782 mdiv |= (1 << DPIO_K_SHIFT); 7783 7784 /* 7785 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 7786 * but we don't support that). 7787 * Note: don't use the DAC post divider as it seems unstable. 7788 */ 7789 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 7790 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 7791 7792 mdiv |= DPIO_ENABLE_CALIBRATION; 7793 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 7794 7795 /* Set HBR and RBR LPF coefficients */ 7796 if (pipe_config->port_clock == 162000 || 7797 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) || 7798 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) 7799 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7800 0x009f0003); 7801 else 7802 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7803 0x00d0000f); 7804 7805 if (intel_crtc_has_dp_encoder(pipe_config)) { 7806 /* Use SSC source */ 7807 if (pipe == PIPE_A) 7808 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7809 0x0df40000); 7810 else 7811 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7812 0x0df70000); 7813 } else { /* HDMI or VGA */ 7814 /* Use bend source */ 7815 if (pipe == PIPE_A) 7816 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7817 0x0df70000); 7818 else 7819 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7820 0x0df40000); 7821 } 7822 7823 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 7824 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 7825 if (intel_crtc_has_dp_encoder(pipe_config)) 7826 coreclk |= 0x01000000; 7827 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 7828 7829 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 7830 7831 vlv_dpio_put(dev_priv); 7832 } 7833 7834 static void chv_prepare_pll(struct intel_crtc *crtc, 7835 const struct intel_crtc_state *pipe_config) 7836 { 7837 struct drm_device *dev = crtc->base.dev; 7838 struct drm_i915_private *dev_priv = to_i915(dev); 7839 enum pipe pipe = crtc->pipe; 7840 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7841 u32 loopfilter, tribuf_calcntr; 7842 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 7843 u32 dpio_val; 7844 int vco; 7845 7846 /* Enable Refclk and SSC */ 7847 I915_WRITE(DPLL(pipe), 7848 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 7849 7850 /* No need to actually set up the DPLL with DSI */ 7851 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7852 return; 7853 7854 bestn = pipe_config->dpll.n; 7855 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 7856 bestm1 = pipe_config->dpll.m1; 7857 bestm2 = pipe_config->dpll.m2 >> 22; 7858 bestp1 = pipe_config->dpll.p1; 7859 bestp2 = pipe_config->dpll.p2; 7860 vco = pipe_config->dpll.vco; 7861 dpio_val = 0; 7862 loopfilter = 0; 7863 7864 vlv_dpio_get(dev_priv); 7865 7866 /* p1 and p2 divider */ 7867 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 7868 5 << DPIO_CHV_S1_DIV_SHIFT | 7869 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 7870 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 7871 1 << DPIO_CHV_K_DIV_SHIFT); 7872 7873 /* Feedback post-divider - m2 */ 7874 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 7875 7876 /* Feedback refclk divider - n and m1 */ 7877 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 7878 DPIO_CHV_M1_DIV_BY_2 | 7879 1 << DPIO_CHV_N_DIV_SHIFT); 7880 7881 /* M2 fraction division */ 7882 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 7883 7884 /* M2 fraction division enable */ 7885 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 7886 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 7887 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 7888 if (bestm2_frac) 7889 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 7890 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 7891 7892 /* Program digital lock detect threshold */ 7893 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 7894 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 7895 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 7896 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 7897 if (!bestm2_frac) 7898 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 7899 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 7900 7901 /* Loop filter */ 7902 if (vco == 5400000) { 7903 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 7904 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 7905 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 7906 tribuf_calcntr = 0x9; 7907 } else if (vco <= 6200000) { 7908 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 7909 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 7910 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7911 tribuf_calcntr = 0x9; 7912 } else if (vco <= 6480000) { 7913 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 7914 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 7915 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7916 tribuf_calcntr = 0x8; 7917 } else { 7918 /* Not supported. Apply the same limits as in the max case */ 7919 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 7920 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 7921 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7922 tribuf_calcntr = 0; 7923 } 7924 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 7925 7926 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 7927 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 7928 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 7929 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 7930 7931 /* AFC Recal */ 7932 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 7933 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 7934 DPIO_AFC_RECAL); 7935 7936 vlv_dpio_put(dev_priv); 7937 } 7938 7939 /** 7940 * vlv_force_pll_on - forcibly enable just the PLL 7941 * @dev_priv: i915 private structure 7942 * @pipe: pipe PLL to enable 7943 * @dpll: PLL configuration 7944 * 7945 * Enable the PLL for @pipe using the supplied @dpll config. To be used 7946 * in cases where we need the PLL enabled even when @pipe is not going to 7947 * be enabled. 7948 */ 7949 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, 7950 const struct dpll *dpll) 7951 { 7952 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 7953 struct intel_crtc_state *pipe_config; 7954 7955 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 7956 if (!pipe_config) 7957 return -ENOMEM; 7958 7959 pipe_config->base.crtc = &crtc->base; 7960 pipe_config->pixel_multiplier = 1; 7961 pipe_config->dpll = *dpll; 7962 7963 if (IS_CHERRYVIEW(dev_priv)) { 7964 chv_compute_dpll(crtc, pipe_config); 7965 chv_prepare_pll(crtc, pipe_config); 7966 chv_enable_pll(crtc, pipe_config); 7967 } else { 7968 vlv_compute_dpll(crtc, pipe_config); 7969 vlv_prepare_pll(crtc, pipe_config); 7970 vlv_enable_pll(crtc, pipe_config); 7971 } 7972 7973 kfree(pipe_config); 7974 7975 return 0; 7976 } 7977 7978 /** 7979 * vlv_force_pll_off - forcibly disable just the PLL 7980 * @dev_priv: i915 private structure 7981 * @pipe: pipe PLL to disable 7982 * 7983 * Disable the PLL for @pipe. To be used in cases where we need 7984 * the PLL enabled even when @pipe is not going to be enabled. 7985 */ 7986 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) 7987 { 7988 if (IS_CHERRYVIEW(dev_priv)) 7989 chv_disable_pll(dev_priv, pipe); 7990 else 7991 vlv_disable_pll(dev_priv, pipe); 7992 } 7993 7994 static void i9xx_compute_dpll(struct intel_crtc *crtc, 7995 struct intel_crtc_state *crtc_state, 7996 struct dpll *reduced_clock) 7997 { 7998 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7999 u32 dpll; 8000 struct dpll *clock = &crtc_state->dpll; 8001 8002 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 8003 8004 dpll = DPLL_VGA_MODE_DIS; 8005 8006 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 8007 dpll |= DPLLB_MODE_LVDS; 8008 else 8009 dpll |= DPLLB_MODE_DAC_SERIAL; 8010 8011 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 8012 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 8013 dpll |= (crtc_state->pixel_multiplier - 1) 8014 << SDVO_MULTIPLIER_SHIFT_HIRES; 8015 } 8016 8017 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 8018 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 8019 dpll |= DPLL_SDVO_HIGH_SPEED; 8020 8021 if (intel_crtc_has_dp_encoder(crtc_state)) 8022 dpll |= DPLL_SDVO_HIGH_SPEED; 8023 8024 /* compute bitmask from p1 value */ 8025 if (IS_PINEVIEW(dev_priv)) 8026 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 8027 else { 8028 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8029 if (IS_G4X(dev_priv) && reduced_clock) 8030 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 8031 } 8032 switch (clock->p2) { 8033 case 5: 8034 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 8035 break; 8036 case 7: 8037 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 8038 break; 8039 case 10: 8040 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 8041 break; 8042 case 14: 8043 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 8044 break; 8045 } 8046 if (INTEL_GEN(dev_priv) >= 4) 8047 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 8048 8049 if (crtc_state->sdvo_tv_clock) 8050 dpll |= PLL_REF_INPUT_TVCLKINBC; 8051 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8052 intel_panel_use_ssc(dev_priv)) 8053 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8054 else 8055 dpll |= PLL_REF_INPUT_DREFCLK; 8056 8057 dpll |= DPLL_VCO_ENABLE; 8058 crtc_state->dpll_hw_state.dpll = dpll; 8059 8060 if (INTEL_GEN(dev_priv) >= 4) { 8061 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 8062 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 8063 crtc_state->dpll_hw_state.dpll_md = dpll_md; 8064 } 8065 } 8066 8067 static void i8xx_compute_dpll(struct intel_crtc *crtc, 8068 struct intel_crtc_state *crtc_state, 8069 struct dpll *reduced_clock) 8070 { 8071 struct drm_device *dev = crtc->base.dev; 8072 struct drm_i915_private *dev_priv = to_i915(dev); 8073 u32 dpll; 8074 struct dpll *clock = &crtc_state->dpll; 8075 8076 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 8077 8078 dpll = DPLL_VGA_MODE_DIS; 8079 8080 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8081 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8082 } else { 8083 if (clock->p1 == 2) 8084 dpll |= PLL_P1_DIVIDE_BY_TWO; 8085 else 8086 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8087 if (clock->p2 == 4) 8088 dpll |= PLL_P2_DIVIDE_BY_4; 8089 } 8090 8091 /* 8092 * Bspec: 8093 * "[Almador Errata}: For the correct operation of the muxed DVO pins 8094 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data, 8095 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock 8096 * Enable) must be set to “1” in both the DPLL A Control Register 8097 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)." 8098 * 8099 * For simplicity We simply keep both bits always enabled in 8100 * both DPLLS. The spec says we should disable the DVO 2X clock 8101 * when not needed, but this seems to work fine in practice. 8102 */ 8103 if (IS_I830(dev_priv) || 8104 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) 8105 dpll |= DPLL_DVO_2X_MODE; 8106 8107 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8108 intel_panel_use_ssc(dev_priv)) 8109 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8110 else 8111 dpll |= PLL_REF_INPUT_DREFCLK; 8112 8113 dpll |= DPLL_VCO_ENABLE; 8114 crtc_state->dpll_hw_state.dpll = dpll; 8115 } 8116 8117 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state) 8118 { 8119 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 8120 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8121 enum pipe pipe = crtc->pipe; 8122 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 8123 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; 8124 u32 crtc_vtotal, crtc_vblank_end; 8125 int vsyncshift = 0; 8126 8127 /* We need to be careful not to changed the adjusted mode, for otherwise 8128 * the hw state checker will get angry at the mismatch. */ 8129 crtc_vtotal = adjusted_mode->crtc_vtotal; 8130 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 8131 8132 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 8133 /* the chip adds 2 halflines automatically */ 8134 crtc_vtotal -= 1; 8135 crtc_vblank_end -= 1; 8136 8137 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 8138 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 8139 else 8140 vsyncshift = adjusted_mode->crtc_hsync_start - 8141 adjusted_mode->crtc_htotal / 2; 8142 if (vsyncshift < 0) 8143 vsyncshift += adjusted_mode->crtc_htotal; 8144 } 8145 8146 if (INTEL_GEN(dev_priv) > 3) 8147 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 8148 8149 I915_WRITE(HTOTAL(cpu_transcoder), 8150 (adjusted_mode->crtc_hdisplay - 1) | 8151 ((adjusted_mode->crtc_htotal - 1) << 16)); 8152 I915_WRITE(HBLANK(cpu_transcoder), 8153 (adjusted_mode->crtc_hblank_start - 1) | 8154 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 8155 I915_WRITE(HSYNC(cpu_transcoder), 8156 (adjusted_mode->crtc_hsync_start - 1) | 8157 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 8158 8159 I915_WRITE(VTOTAL(cpu_transcoder), 8160 (adjusted_mode->crtc_vdisplay - 1) | 8161 ((crtc_vtotal - 1) << 16)); 8162 I915_WRITE(VBLANK(cpu_transcoder), 8163 (adjusted_mode->crtc_vblank_start - 1) | 8164 ((crtc_vblank_end - 1) << 16)); 8165 I915_WRITE(VSYNC(cpu_transcoder), 8166 (adjusted_mode->crtc_vsync_start - 1) | 8167 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 8168 8169 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 8170 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 8171 * documented on the DDI_FUNC_CTL register description, EDP Input Select 8172 * bits. */ 8173 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 8174 (pipe == PIPE_B || pipe == PIPE_C)) 8175 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 8176 8177 } 8178 8179 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 8180 { 8181 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 8182 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8183 enum pipe pipe = crtc->pipe; 8184 8185 /* pipesrc controls the size that is scaled from, which should 8186 * always be the user's requested size. 8187 */ 8188 I915_WRITE(PIPESRC(pipe), 8189 ((crtc_state->pipe_src_w - 1) << 16) | 8190 (crtc_state->pipe_src_h - 1)); 8191 } 8192 8193 static void intel_get_pipe_timings(struct intel_crtc *crtc, 8194 struct intel_crtc_state *pipe_config) 8195 { 8196 struct drm_device *dev = crtc->base.dev; 8197 struct drm_i915_private *dev_priv = to_i915(dev); 8198 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 8199 u32 tmp; 8200 8201 tmp = I915_READ(HTOTAL(cpu_transcoder)); 8202 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 8203 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 8204 8205 if (!transcoder_is_dsi(cpu_transcoder)) { 8206 tmp = I915_READ(HBLANK(cpu_transcoder)); 8207 pipe_config->base.adjusted_mode.crtc_hblank_start = 8208 (tmp & 0xffff) + 1; 8209 pipe_config->base.adjusted_mode.crtc_hblank_end = 8210 ((tmp >> 16) & 0xffff) + 1; 8211 } 8212 tmp = I915_READ(HSYNC(cpu_transcoder)); 8213 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 8214 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 8215 8216 tmp = I915_READ(VTOTAL(cpu_transcoder)); 8217 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 8218 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 8219 8220 if (!transcoder_is_dsi(cpu_transcoder)) { 8221 tmp = I915_READ(VBLANK(cpu_transcoder)); 8222 pipe_config->base.adjusted_mode.crtc_vblank_start = 8223 (tmp & 0xffff) + 1; 8224 pipe_config->base.adjusted_mode.crtc_vblank_end = 8225 ((tmp >> 16) & 0xffff) + 1; 8226 } 8227 tmp = I915_READ(VSYNC(cpu_transcoder)); 8228 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 8229 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 8230 8231 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) { 8232 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 8233 pipe_config->base.adjusted_mode.crtc_vtotal += 1; 8234 pipe_config->base.adjusted_mode.crtc_vblank_end += 1; 8235 } 8236 } 8237 8238 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 8239 struct intel_crtc_state *pipe_config) 8240 { 8241 struct drm_device *dev = crtc->base.dev; 8242 struct drm_i915_private *dev_priv = to_i915(dev); 8243 u32 tmp; 8244 8245 tmp = I915_READ(PIPESRC(crtc->pipe)); 8246 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 8247 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 8248 8249 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h; 8250 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w; 8251 } 8252 8253 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 8254 struct intel_crtc_state *pipe_config) 8255 { 8256 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay; 8257 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal; 8258 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start; 8259 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end; 8260 8261 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay; 8262 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal; 8263 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start; 8264 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end; 8265 8266 mode->flags = pipe_config->base.adjusted_mode.flags; 8267 mode->type = DRM_MODE_TYPE_DRIVER; 8268 8269 mode->clock = pipe_config->base.adjusted_mode.crtc_clock; 8270 8271 mode->hsync = drm_mode_hsync(mode); 8272 mode->vrefresh = drm_mode_vrefresh(mode); 8273 drm_mode_set_name(mode); 8274 } 8275 8276 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 8277 { 8278 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 8279 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8280 u32 pipeconf; 8281 8282 pipeconf = 0; 8283 8284 /* we keep both pipes enabled on 830 */ 8285 if (IS_I830(dev_priv)) 8286 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE; 8287 8288 if (crtc_state->double_wide) 8289 pipeconf |= PIPECONF_DOUBLE_WIDE; 8290 8291 /* only g4x and later have fancy bpc/dither controls */ 8292 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 8293 IS_CHERRYVIEW(dev_priv)) { 8294 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 8295 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 8296 pipeconf |= PIPECONF_DITHER_EN | 8297 PIPECONF_DITHER_TYPE_SP; 8298 8299 switch (crtc_state->pipe_bpp) { 8300 case 18: 8301 pipeconf |= PIPECONF_6BPC; 8302 break; 8303 case 24: 8304 pipeconf |= PIPECONF_8BPC; 8305 break; 8306 case 30: 8307 pipeconf |= PIPECONF_10BPC; 8308 break; 8309 default: 8310 /* Case prevented by intel_choose_pipe_bpp_dither. */ 8311 BUG(); 8312 } 8313 } 8314 8315 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 8316 if (INTEL_GEN(dev_priv) < 4 || 8317 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 8318 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 8319 else 8320 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 8321 } else { 8322 pipeconf |= PIPECONF_PROGRESSIVE; 8323 } 8324 8325 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 8326 crtc_state->limited_color_range) 8327 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 8328 8329 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 8330 8331 I915_WRITE(PIPECONF(crtc->pipe), pipeconf); 8332 POSTING_READ(PIPECONF(crtc->pipe)); 8333 } 8334 8335 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, 8336 struct intel_crtc_state *crtc_state) 8337 { 8338 struct drm_device *dev = crtc->base.dev; 8339 struct drm_i915_private *dev_priv = to_i915(dev); 8340 const struct intel_limit *limit; 8341 int refclk = 48000; 8342 8343 memset(&crtc_state->dpll_hw_state, 0, 8344 sizeof(crtc_state->dpll_hw_state)); 8345 8346 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8347 if (intel_panel_use_ssc(dev_priv)) { 8348 refclk = dev_priv->vbt.lvds_ssc_freq; 8349 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8350 } 8351 8352 limit = &intel_limits_i8xx_lvds; 8353 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) { 8354 limit = &intel_limits_i8xx_dvo; 8355 } else { 8356 limit = &intel_limits_i8xx_dac; 8357 } 8358 8359 if (!crtc_state->clock_set && 8360 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8361 refclk, NULL, &crtc_state->dpll)) { 8362 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8363 return -EINVAL; 8364 } 8365 8366 i8xx_compute_dpll(crtc, crtc_state, NULL); 8367 8368 return 0; 8369 } 8370 8371 static int g4x_crtc_compute_clock(struct intel_crtc *crtc, 8372 struct intel_crtc_state *crtc_state) 8373 { 8374 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8375 const struct intel_limit *limit; 8376 int refclk = 96000; 8377 8378 memset(&crtc_state->dpll_hw_state, 0, 8379 sizeof(crtc_state->dpll_hw_state)); 8380 8381 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8382 if (intel_panel_use_ssc(dev_priv)) { 8383 refclk = dev_priv->vbt.lvds_ssc_freq; 8384 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8385 } 8386 8387 if (intel_is_dual_link_lvds(dev_priv)) 8388 limit = &intel_limits_g4x_dual_channel_lvds; 8389 else 8390 limit = &intel_limits_g4x_single_channel_lvds; 8391 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || 8392 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 8393 limit = &intel_limits_g4x_hdmi; 8394 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) { 8395 limit = &intel_limits_g4x_sdvo; 8396 } else { 8397 /* The option is for other outputs */ 8398 limit = &intel_limits_i9xx_sdvo; 8399 } 8400 8401 if (!crtc_state->clock_set && 8402 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8403 refclk, NULL, &crtc_state->dpll)) { 8404 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8405 return -EINVAL; 8406 } 8407 8408 i9xx_compute_dpll(crtc, crtc_state, NULL); 8409 8410 return 0; 8411 } 8412 8413 static int pnv_crtc_compute_clock(struct intel_crtc *crtc, 8414 struct intel_crtc_state *crtc_state) 8415 { 8416 struct drm_device *dev = crtc->base.dev; 8417 struct drm_i915_private *dev_priv = to_i915(dev); 8418 const struct intel_limit *limit; 8419 int refclk = 96000; 8420 8421 memset(&crtc_state->dpll_hw_state, 0, 8422 sizeof(crtc_state->dpll_hw_state)); 8423 8424 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8425 if (intel_panel_use_ssc(dev_priv)) { 8426 refclk = dev_priv->vbt.lvds_ssc_freq; 8427 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8428 } 8429 8430 limit = &intel_limits_pineview_lvds; 8431 } else { 8432 limit = &intel_limits_pineview_sdvo; 8433 } 8434 8435 if (!crtc_state->clock_set && 8436 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8437 refclk, NULL, &crtc_state->dpll)) { 8438 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8439 return -EINVAL; 8440 } 8441 8442 i9xx_compute_dpll(crtc, crtc_state, NULL); 8443 8444 return 0; 8445 } 8446 8447 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 8448 struct intel_crtc_state *crtc_state) 8449 { 8450 struct drm_device *dev = crtc->base.dev; 8451 struct drm_i915_private *dev_priv = to_i915(dev); 8452 const struct intel_limit *limit; 8453 int refclk = 96000; 8454 8455 memset(&crtc_state->dpll_hw_state, 0, 8456 sizeof(crtc_state->dpll_hw_state)); 8457 8458 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8459 if (intel_panel_use_ssc(dev_priv)) { 8460 refclk = dev_priv->vbt.lvds_ssc_freq; 8461 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8462 } 8463 8464 limit = &intel_limits_i9xx_lvds; 8465 } else { 8466 limit = &intel_limits_i9xx_sdvo; 8467 } 8468 8469 if (!crtc_state->clock_set && 8470 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8471 refclk, NULL, &crtc_state->dpll)) { 8472 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8473 return -EINVAL; 8474 } 8475 8476 i9xx_compute_dpll(crtc, crtc_state, NULL); 8477 8478 return 0; 8479 } 8480 8481 static int chv_crtc_compute_clock(struct intel_crtc *crtc, 8482 struct intel_crtc_state *crtc_state) 8483 { 8484 int refclk = 100000; 8485 const struct intel_limit *limit = &intel_limits_chv; 8486 8487 memset(&crtc_state->dpll_hw_state, 0, 8488 sizeof(crtc_state->dpll_hw_state)); 8489 8490 if (!crtc_state->clock_set && 8491 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8492 refclk, NULL, &crtc_state->dpll)) { 8493 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8494 return -EINVAL; 8495 } 8496 8497 chv_compute_dpll(crtc, crtc_state); 8498 8499 return 0; 8500 } 8501 8502 static int vlv_crtc_compute_clock(struct intel_crtc *crtc, 8503 struct intel_crtc_state *crtc_state) 8504 { 8505 int refclk = 100000; 8506 const struct intel_limit *limit = &intel_limits_vlv; 8507 8508 memset(&crtc_state->dpll_hw_state, 0, 8509 sizeof(crtc_state->dpll_hw_state)); 8510 8511 if (!crtc_state->clock_set && 8512 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8513 refclk, NULL, &crtc_state->dpll)) { 8514 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8515 return -EINVAL; 8516 } 8517 8518 vlv_compute_dpll(crtc, crtc_state); 8519 8520 return 0; 8521 } 8522 8523 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) 8524 { 8525 if (IS_I830(dev_priv)) 8526 return false; 8527 8528 return INTEL_GEN(dev_priv) >= 4 || 8529 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 8530 } 8531 8532 static void i9xx_get_pfit_config(struct intel_crtc *crtc, 8533 struct intel_crtc_state *pipe_config) 8534 { 8535 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8536 u32 tmp; 8537 8538 if (!i9xx_has_pfit(dev_priv)) 8539 return; 8540 8541 tmp = I915_READ(PFIT_CONTROL); 8542 if (!(tmp & PFIT_ENABLE)) 8543 return; 8544 8545 /* Check whether the pfit is attached to our pipe. */ 8546 if (INTEL_GEN(dev_priv) < 4) { 8547 if (crtc->pipe != PIPE_B) 8548 return; 8549 } else { 8550 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 8551 return; 8552 } 8553 8554 pipe_config->gmch_pfit.control = tmp; 8555 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 8556 } 8557 8558 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 8559 struct intel_crtc_state *pipe_config) 8560 { 8561 struct drm_device *dev = crtc->base.dev; 8562 struct drm_i915_private *dev_priv = to_i915(dev); 8563 int pipe = pipe_config->cpu_transcoder; 8564 struct dpll clock; 8565 u32 mdiv; 8566 int refclk = 100000; 8567 8568 /* In case of DSI, DPLL will not be used */ 8569 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8570 return; 8571 8572 vlv_dpio_get(dev_priv); 8573 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 8574 vlv_dpio_put(dev_priv); 8575 8576 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 8577 clock.m2 = mdiv & DPIO_M2DIV_MASK; 8578 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 8579 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 8580 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 8581 8582 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 8583 } 8584 8585 static void 8586 i9xx_get_initial_plane_config(struct intel_crtc *crtc, 8587 struct intel_initial_plane_config *plane_config) 8588 { 8589 struct drm_device *dev = crtc->base.dev; 8590 struct drm_i915_private *dev_priv = to_i915(dev); 8591 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 8592 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 8593 enum pipe pipe; 8594 u32 val, base, offset; 8595 int fourcc, pixel_format; 8596 unsigned int aligned_height; 8597 struct drm_framebuffer *fb; 8598 struct intel_framebuffer *intel_fb; 8599 8600 if (!plane->get_hw_state(plane, &pipe)) 8601 return; 8602 8603 WARN_ON(pipe != crtc->pipe); 8604 8605 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8606 if (!intel_fb) { 8607 DRM_DEBUG_KMS("failed to alloc fb\n"); 8608 return; 8609 } 8610 8611 fb = &intel_fb->base; 8612 8613 fb->dev = dev; 8614 8615 val = I915_READ(DSPCNTR(i9xx_plane)); 8616 8617 if (INTEL_GEN(dev_priv) >= 4) { 8618 if (val & DISPPLANE_TILED) { 8619 plane_config->tiling = I915_TILING_X; 8620 fb->modifier = I915_FORMAT_MOD_X_TILED; 8621 } 8622 8623 if (val & DISPPLANE_ROTATE_180) 8624 plane_config->rotation = DRM_MODE_ROTATE_180; 8625 } 8626 8627 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B && 8628 val & DISPPLANE_MIRROR) 8629 plane_config->rotation |= DRM_MODE_REFLECT_X; 8630 8631 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 8632 fourcc = i9xx_format_to_fourcc(pixel_format); 8633 fb->format = drm_format_info(fourcc); 8634 8635 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 8636 offset = I915_READ(DSPOFFSET(i9xx_plane)); 8637 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000; 8638 } else if (INTEL_GEN(dev_priv) >= 4) { 8639 if (plane_config->tiling) 8640 offset = I915_READ(DSPTILEOFF(i9xx_plane)); 8641 else 8642 offset = I915_READ(DSPLINOFF(i9xx_plane)); 8643 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000; 8644 } else { 8645 base = I915_READ(DSPADDR(i9xx_plane)); 8646 } 8647 plane_config->base = base; 8648 8649 val = I915_READ(PIPESRC(pipe)); 8650 fb->width = ((val >> 16) & 0xfff) + 1; 8651 fb->height = ((val >> 0) & 0xfff) + 1; 8652 8653 val = I915_READ(DSPSTRIDE(i9xx_plane)); 8654 fb->pitches[0] = val & 0xffffffc0; 8655 8656 aligned_height = intel_fb_align_height(fb, 0, fb->height); 8657 8658 plane_config->size = fb->pitches[0] * aligned_height; 8659 8660 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8661 crtc->base.name, plane->base.name, fb->width, fb->height, 8662 fb->format->cpp[0] * 8, base, fb->pitches[0], 8663 plane_config->size); 8664 8665 plane_config->fb = intel_fb; 8666 } 8667 8668 static void chv_crtc_clock_get(struct intel_crtc *crtc, 8669 struct intel_crtc_state *pipe_config) 8670 { 8671 struct drm_device *dev = crtc->base.dev; 8672 struct drm_i915_private *dev_priv = to_i915(dev); 8673 int pipe = pipe_config->cpu_transcoder; 8674 enum dpio_channel port = vlv_pipe_to_channel(pipe); 8675 struct dpll clock; 8676 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 8677 int refclk = 100000; 8678 8679 /* In case of DSI, DPLL will not be used */ 8680 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8681 return; 8682 8683 vlv_dpio_get(dev_priv); 8684 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 8685 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 8686 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 8687 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 8688 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 8689 vlv_dpio_put(dev_priv); 8690 8691 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 8692 clock.m2 = (pll_dw0 & 0xff) << 22; 8693 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 8694 clock.m2 |= pll_dw2 & 0x3fffff; 8695 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 8696 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 8697 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 8698 8699 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 8700 } 8701 8702 static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc, 8703 struct intel_crtc_state *pipe_config) 8704 { 8705 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8706 enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB; 8707 8708 pipe_config->lspcon_downsampling = false; 8709 8710 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) { 8711 u32 tmp = I915_READ(PIPEMISC(crtc->pipe)); 8712 8713 if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { 8714 bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE; 8715 bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND; 8716 8717 if (ycbcr420_enabled) { 8718 /* We support 4:2:0 in full blend mode only */ 8719 if (!blend) 8720 output = INTEL_OUTPUT_FORMAT_INVALID; 8721 else if (!(IS_GEMINILAKE(dev_priv) || 8722 INTEL_GEN(dev_priv) >= 10)) 8723 output = INTEL_OUTPUT_FORMAT_INVALID; 8724 else 8725 output = INTEL_OUTPUT_FORMAT_YCBCR420; 8726 } else { 8727 /* 8728 * Currently there is no interface defined to 8729 * check user preference between RGB/YCBCR444 8730 * or YCBCR420. So the only possible case for 8731 * YCBCR444 usage is driving YCBCR420 output 8732 * with LSPCON, when pipe is configured for 8733 * YCBCR444 output and LSPCON takes care of 8734 * downsampling it. 8735 */ 8736 pipe_config->lspcon_downsampling = true; 8737 output = INTEL_OUTPUT_FORMAT_YCBCR444; 8738 } 8739 } 8740 } 8741 8742 pipe_config->output_format = output; 8743 } 8744 8745 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) 8746 { 8747 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 8748 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 8749 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8750 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 8751 u32 tmp; 8752 8753 tmp = I915_READ(DSPCNTR(i9xx_plane)); 8754 8755 if (tmp & DISPPLANE_GAMMA_ENABLE) 8756 crtc_state->gamma_enable = true; 8757 8758 if (!HAS_GMCH(dev_priv) && 8759 tmp & DISPPLANE_PIPE_CSC_ENABLE) 8760 crtc_state->csc_enable = true; 8761 } 8762 8763 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 8764 struct intel_crtc_state *pipe_config) 8765 { 8766 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8767 enum intel_display_power_domain power_domain; 8768 intel_wakeref_t wakeref; 8769 u32 tmp; 8770 bool ret; 8771 8772 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 8773 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 8774 if (!wakeref) 8775 return false; 8776 8777 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 8778 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8779 pipe_config->shared_dpll = NULL; 8780 8781 ret = false; 8782 8783 tmp = I915_READ(PIPECONF(crtc->pipe)); 8784 if (!(tmp & PIPECONF_ENABLE)) 8785 goto out; 8786 8787 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 8788 IS_CHERRYVIEW(dev_priv)) { 8789 switch (tmp & PIPECONF_BPC_MASK) { 8790 case PIPECONF_6BPC: 8791 pipe_config->pipe_bpp = 18; 8792 break; 8793 case PIPECONF_8BPC: 8794 pipe_config->pipe_bpp = 24; 8795 break; 8796 case PIPECONF_10BPC: 8797 pipe_config->pipe_bpp = 30; 8798 break; 8799 default: 8800 break; 8801 } 8802 } 8803 8804 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 8805 (tmp & PIPECONF_COLOR_RANGE_SELECT)) 8806 pipe_config->limited_color_range = true; 8807 8808 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >> 8809 PIPECONF_GAMMA_MODE_SHIFT; 8810 8811 if (IS_CHERRYVIEW(dev_priv)) 8812 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe)); 8813 8814 i9xx_get_pipe_color_config(pipe_config); 8815 intel_color_get_config(pipe_config); 8816 8817 if (INTEL_GEN(dev_priv) < 4) 8818 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 8819 8820 intel_get_pipe_timings(crtc, pipe_config); 8821 intel_get_pipe_src_size(crtc, pipe_config); 8822 8823 i9xx_get_pfit_config(crtc, pipe_config); 8824 8825 if (INTEL_GEN(dev_priv) >= 4) { 8826 /* No way to read it out on pipes B and C */ 8827 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 8828 tmp = dev_priv->chv_dpll_md[crtc->pipe]; 8829 else 8830 tmp = I915_READ(DPLL_MD(crtc->pipe)); 8831 pipe_config->pixel_multiplier = 8832 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 8833 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 8834 pipe_config->dpll_hw_state.dpll_md = tmp; 8835 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 8836 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 8837 tmp = I915_READ(DPLL(crtc->pipe)); 8838 pipe_config->pixel_multiplier = 8839 ((tmp & SDVO_MULTIPLIER_MASK) 8840 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 8841 } else { 8842 /* Note that on i915G/GM the pixel multiplier is in the sdvo 8843 * port and will be fixed up in the encoder->get_config 8844 * function. */ 8845 pipe_config->pixel_multiplier = 1; 8846 } 8847 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 8848 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 8849 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 8850 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 8851 } else { 8852 /* Mask out read-only status bits. */ 8853 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 8854 DPLL_PORTC_READY_MASK | 8855 DPLL_PORTB_READY_MASK); 8856 } 8857 8858 if (IS_CHERRYVIEW(dev_priv)) 8859 chv_crtc_clock_get(crtc, pipe_config); 8860 else if (IS_VALLEYVIEW(dev_priv)) 8861 vlv_crtc_clock_get(crtc, pipe_config); 8862 else 8863 i9xx_crtc_clock_get(crtc, pipe_config); 8864 8865 /* 8866 * Normally the dotclock is filled in by the encoder .get_config() 8867 * but in case the pipe is enabled w/o any ports we need a sane 8868 * default. 8869 */ 8870 pipe_config->base.adjusted_mode.crtc_clock = 8871 pipe_config->port_clock / pipe_config->pixel_multiplier; 8872 8873 ret = true; 8874 8875 out: 8876 intel_display_power_put(dev_priv, power_domain, wakeref); 8877 8878 return ret; 8879 } 8880 8881 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv) 8882 { 8883 struct intel_encoder *encoder; 8884 int i; 8885 u32 val, final; 8886 bool has_lvds = false; 8887 bool has_cpu_edp = false; 8888 bool has_panel = false; 8889 bool has_ck505 = false; 8890 bool can_ssc = false; 8891 bool using_ssc_source = false; 8892 8893 /* We need to take the global config into account */ 8894 for_each_intel_encoder(&dev_priv->drm, encoder) { 8895 switch (encoder->type) { 8896 case INTEL_OUTPUT_LVDS: 8897 has_panel = true; 8898 has_lvds = true; 8899 break; 8900 case INTEL_OUTPUT_EDP: 8901 has_panel = true; 8902 if (encoder->port == PORT_A) 8903 has_cpu_edp = true; 8904 break; 8905 default: 8906 break; 8907 } 8908 } 8909 8910 if (HAS_PCH_IBX(dev_priv)) { 8911 has_ck505 = dev_priv->vbt.display_clock_mode; 8912 can_ssc = has_ck505; 8913 } else { 8914 has_ck505 = false; 8915 can_ssc = true; 8916 } 8917 8918 /* Check if any DPLLs are using the SSC source */ 8919 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 8920 u32 temp = I915_READ(PCH_DPLL(i)); 8921 8922 if (!(temp & DPLL_VCO_ENABLE)) 8923 continue; 8924 8925 if ((temp & PLL_REF_INPUT_MASK) == 8926 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 8927 using_ssc_source = true; 8928 break; 8929 } 8930 } 8931 8932 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 8933 has_panel, has_lvds, has_ck505, using_ssc_source); 8934 8935 /* Ironlake: try to setup display ref clock before DPLL 8936 * enabling. This is only under driver's control after 8937 * PCH B stepping, previous chipset stepping should be 8938 * ignoring this setting. 8939 */ 8940 val = I915_READ(PCH_DREF_CONTROL); 8941 8942 /* As we must carefully and slowly disable/enable each source in turn, 8943 * compute the final state we want first and check if we need to 8944 * make any changes at all. 8945 */ 8946 final = val; 8947 final &= ~DREF_NONSPREAD_SOURCE_MASK; 8948 if (has_ck505) 8949 final |= DREF_NONSPREAD_CK505_ENABLE; 8950 else 8951 final |= DREF_NONSPREAD_SOURCE_ENABLE; 8952 8953 final &= ~DREF_SSC_SOURCE_MASK; 8954 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8955 final &= ~DREF_SSC1_ENABLE; 8956 8957 if (has_panel) { 8958 final |= DREF_SSC_SOURCE_ENABLE; 8959 8960 if (intel_panel_use_ssc(dev_priv) && can_ssc) 8961 final |= DREF_SSC1_ENABLE; 8962 8963 if (has_cpu_edp) { 8964 if (intel_panel_use_ssc(dev_priv) && can_ssc) 8965 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 8966 else 8967 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 8968 } else 8969 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8970 } else if (using_ssc_source) { 8971 final |= DREF_SSC_SOURCE_ENABLE; 8972 final |= DREF_SSC1_ENABLE; 8973 } 8974 8975 if (final == val) 8976 return; 8977 8978 /* Always enable nonspread source */ 8979 val &= ~DREF_NONSPREAD_SOURCE_MASK; 8980 8981 if (has_ck505) 8982 val |= DREF_NONSPREAD_CK505_ENABLE; 8983 else 8984 val |= DREF_NONSPREAD_SOURCE_ENABLE; 8985 8986 if (has_panel) { 8987 val &= ~DREF_SSC_SOURCE_MASK; 8988 val |= DREF_SSC_SOURCE_ENABLE; 8989 8990 /* SSC must be turned on before enabling the CPU output */ 8991 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 8992 DRM_DEBUG_KMS("Using SSC on panel\n"); 8993 val |= DREF_SSC1_ENABLE; 8994 } else 8995 val &= ~DREF_SSC1_ENABLE; 8996 8997 /* Get SSC going before enabling the outputs */ 8998 I915_WRITE(PCH_DREF_CONTROL, val); 8999 POSTING_READ(PCH_DREF_CONTROL); 9000 udelay(200); 9001 9002 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9003 9004 /* Enable CPU source on CPU attached eDP */ 9005 if (has_cpu_edp) { 9006 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 9007 DRM_DEBUG_KMS("Using SSC on eDP\n"); 9008 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 9009 } else 9010 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 9011 } else 9012 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9013 9014 I915_WRITE(PCH_DREF_CONTROL, val); 9015 POSTING_READ(PCH_DREF_CONTROL); 9016 udelay(200); 9017 } else { 9018 DRM_DEBUG_KMS("Disabling CPU source output\n"); 9019 9020 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9021 9022 /* Turn off CPU output */ 9023 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9024 9025 I915_WRITE(PCH_DREF_CONTROL, val); 9026 POSTING_READ(PCH_DREF_CONTROL); 9027 udelay(200); 9028 9029 if (!using_ssc_source) { 9030 DRM_DEBUG_KMS("Disabling SSC source\n"); 9031 9032 /* Turn off the SSC source */ 9033 val &= ~DREF_SSC_SOURCE_MASK; 9034 val |= DREF_SSC_SOURCE_DISABLE; 9035 9036 /* Turn off SSC1 */ 9037 val &= ~DREF_SSC1_ENABLE; 9038 9039 I915_WRITE(PCH_DREF_CONTROL, val); 9040 POSTING_READ(PCH_DREF_CONTROL); 9041 udelay(200); 9042 } 9043 } 9044 9045 BUG_ON(val != final); 9046 } 9047 9048 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 9049 { 9050 u32 tmp; 9051 9052 tmp = I915_READ(SOUTH_CHICKEN2); 9053 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 9054 I915_WRITE(SOUTH_CHICKEN2, tmp); 9055 9056 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) & 9057 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 9058 DRM_ERROR("FDI mPHY reset assert timeout\n"); 9059 9060 tmp = I915_READ(SOUTH_CHICKEN2); 9061 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 9062 I915_WRITE(SOUTH_CHICKEN2, tmp); 9063 9064 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) & 9065 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 9066 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 9067 } 9068 9069 /* WaMPhyProgramming:hsw */ 9070 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 9071 { 9072 u32 tmp; 9073 9074 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 9075 tmp &= ~(0xFF << 24); 9076 tmp |= (0x12 << 24); 9077 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 9078 9079 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 9080 tmp |= (1 << 11); 9081 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 9082 9083 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 9084 tmp |= (1 << 11); 9085 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 9086 9087 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 9088 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 9089 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 9090 9091 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 9092 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 9093 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 9094 9095 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 9096 tmp &= ~(7 << 13); 9097 tmp |= (5 << 13); 9098 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 9099 9100 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 9101 tmp &= ~(7 << 13); 9102 tmp |= (5 << 13); 9103 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 9104 9105 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 9106 tmp &= ~0xFF; 9107 tmp |= 0x1C; 9108 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 9109 9110 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 9111 tmp &= ~0xFF; 9112 tmp |= 0x1C; 9113 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 9114 9115 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 9116 tmp &= ~(0xFF << 16); 9117 tmp |= (0x1C << 16); 9118 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 9119 9120 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 9121 tmp &= ~(0xFF << 16); 9122 tmp |= (0x1C << 16); 9123 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 9124 9125 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 9126 tmp |= (1 << 27); 9127 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 9128 9129 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 9130 tmp |= (1 << 27); 9131 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 9132 9133 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 9134 tmp &= ~(0xF << 28); 9135 tmp |= (4 << 28); 9136 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 9137 9138 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 9139 tmp &= ~(0xF << 28); 9140 tmp |= (4 << 28); 9141 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 9142 } 9143 9144 /* Implements 3 different sequences from BSpec chapter "Display iCLK 9145 * Programming" based on the parameters passed: 9146 * - Sequence to enable CLKOUT_DP 9147 * - Sequence to enable CLKOUT_DP without spread 9148 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 9149 */ 9150 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, 9151 bool with_spread, bool with_fdi) 9152 { 9153 u32 reg, tmp; 9154 9155 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 9156 with_spread = true; 9157 if (WARN(HAS_PCH_LPT_LP(dev_priv) && 9158 with_fdi, "LP PCH doesn't have FDI\n")) 9159 with_fdi = false; 9160 9161 mutex_lock(&dev_priv->sb_lock); 9162 9163 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9164 tmp &= ~SBI_SSCCTL_DISABLE; 9165 tmp |= SBI_SSCCTL_PATHALT; 9166 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9167 9168 udelay(24); 9169 9170 if (with_spread) { 9171 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9172 tmp &= ~SBI_SSCCTL_PATHALT; 9173 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9174 9175 if (with_fdi) { 9176 lpt_reset_fdi_mphy(dev_priv); 9177 lpt_program_fdi_mphy(dev_priv); 9178 } 9179 } 9180 9181 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 9182 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 9183 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 9184 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 9185 9186 mutex_unlock(&dev_priv->sb_lock); 9187 } 9188 9189 /* Sequence to disable CLKOUT_DP */ 9190 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) 9191 { 9192 u32 reg, tmp; 9193 9194 mutex_lock(&dev_priv->sb_lock); 9195 9196 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 9197 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 9198 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 9199 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 9200 9201 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9202 if (!(tmp & SBI_SSCCTL_DISABLE)) { 9203 if (!(tmp & SBI_SSCCTL_PATHALT)) { 9204 tmp |= SBI_SSCCTL_PATHALT; 9205 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9206 udelay(32); 9207 } 9208 tmp |= SBI_SSCCTL_DISABLE; 9209 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9210 } 9211 9212 mutex_unlock(&dev_priv->sb_lock); 9213 } 9214 9215 #define BEND_IDX(steps) ((50 + (steps)) / 5) 9216 9217 static const u16 sscdivintphase[] = { 9218 [BEND_IDX( 50)] = 0x3B23, 9219 [BEND_IDX( 45)] = 0x3B23, 9220 [BEND_IDX( 40)] = 0x3C23, 9221 [BEND_IDX( 35)] = 0x3C23, 9222 [BEND_IDX( 30)] = 0x3D23, 9223 [BEND_IDX( 25)] = 0x3D23, 9224 [BEND_IDX( 20)] = 0x3E23, 9225 [BEND_IDX( 15)] = 0x3E23, 9226 [BEND_IDX( 10)] = 0x3F23, 9227 [BEND_IDX( 5)] = 0x3F23, 9228 [BEND_IDX( 0)] = 0x0025, 9229 [BEND_IDX( -5)] = 0x0025, 9230 [BEND_IDX(-10)] = 0x0125, 9231 [BEND_IDX(-15)] = 0x0125, 9232 [BEND_IDX(-20)] = 0x0225, 9233 [BEND_IDX(-25)] = 0x0225, 9234 [BEND_IDX(-30)] = 0x0325, 9235 [BEND_IDX(-35)] = 0x0325, 9236 [BEND_IDX(-40)] = 0x0425, 9237 [BEND_IDX(-45)] = 0x0425, 9238 [BEND_IDX(-50)] = 0x0525, 9239 }; 9240 9241 /* 9242 * Bend CLKOUT_DP 9243 * steps -50 to 50 inclusive, in steps of 5 9244 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 9245 * change in clock period = -(steps / 10) * 5.787 ps 9246 */ 9247 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 9248 { 9249 u32 tmp; 9250 int idx = BEND_IDX(steps); 9251 9252 if (WARN_ON(steps % 5 != 0)) 9253 return; 9254 9255 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase))) 9256 return; 9257 9258 mutex_lock(&dev_priv->sb_lock); 9259 9260 if (steps % 10 != 0) 9261 tmp = 0xAAAAAAAB; 9262 else 9263 tmp = 0x00000000; 9264 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 9265 9266 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 9267 tmp &= 0xffff0000; 9268 tmp |= sscdivintphase[idx]; 9269 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 9270 9271 mutex_unlock(&dev_priv->sb_lock); 9272 } 9273 9274 #undef BEND_IDX 9275 9276 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) 9277 { 9278 u32 fuse_strap = I915_READ(FUSE_STRAP); 9279 u32 ctl = I915_READ(SPLL_CTL); 9280 9281 if ((ctl & SPLL_PLL_ENABLE) == 0) 9282 return false; 9283 9284 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && 9285 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 9286 return true; 9287 9288 if (IS_BROADWELL(dev_priv) && 9289 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) 9290 return true; 9291 9292 return false; 9293 } 9294 9295 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, 9296 enum intel_dpll_id id) 9297 { 9298 u32 fuse_strap = I915_READ(FUSE_STRAP); 9299 u32 ctl = I915_READ(WRPLL_CTL(id)); 9300 9301 if ((ctl & WRPLL_PLL_ENABLE) == 0) 9302 return false; 9303 9304 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) 9305 return true; 9306 9307 if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && 9308 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && 9309 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 9310 return true; 9311 9312 return false; 9313 } 9314 9315 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) 9316 { 9317 struct intel_encoder *encoder; 9318 bool has_fdi = false; 9319 9320 for_each_intel_encoder(&dev_priv->drm, encoder) { 9321 switch (encoder->type) { 9322 case INTEL_OUTPUT_ANALOG: 9323 has_fdi = true; 9324 break; 9325 default: 9326 break; 9327 } 9328 } 9329 9330 /* 9331 * The BIOS may have decided to use the PCH SSC 9332 * reference so we must not disable it until the 9333 * relevant PLLs have stopped relying on it. We'll 9334 * just leave the PCH SSC reference enabled in case 9335 * any active PLL is using it. It will get disabled 9336 * after runtime suspend if we don't have FDI. 9337 * 9338 * TODO: Move the whole reference clock handling 9339 * to the modeset sequence proper so that we can 9340 * actually enable/disable/reconfigure these things 9341 * safely. To do that we need to introduce a real 9342 * clock hierarchy. That would also allow us to do 9343 * clock bending finally. 9344 */ 9345 dev_priv->pch_ssc_use = 0; 9346 9347 if (spll_uses_pch_ssc(dev_priv)) { 9348 DRM_DEBUG_KMS("SPLL using PCH SSC\n"); 9349 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL); 9350 } 9351 9352 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { 9353 DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n"); 9354 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1); 9355 } 9356 9357 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { 9358 DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n"); 9359 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2); 9360 } 9361 9362 if (dev_priv->pch_ssc_use) 9363 return; 9364 9365 if (has_fdi) { 9366 lpt_bend_clkout_dp(dev_priv, 0); 9367 lpt_enable_clkout_dp(dev_priv, true, true); 9368 } else { 9369 lpt_disable_clkout_dp(dev_priv); 9370 } 9371 } 9372 9373 /* 9374 * Initialize reference clocks when the driver loads 9375 */ 9376 void intel_init_pch_refclk(struct drm_i915_private *dev_priv) 9377 { 9378 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 9379 ironlake_init_pch_refclk(dev_priv); 9380 else if (HAS_PCH_LPT(dev_priv)) 9381 lpt_init_pch_refclk(dev_priv); 9382 } 9383 9384 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state) 9385 { 9386 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 9387 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9388 enum pipe pipe = crtc->pipe; 9389 u32 val; 9390 9391 val = 0; 9392 9393 switch (crtc_state->pipe_bpp) { 9394 case 18: 9395 val |= PIPECONF_6BPC; 9396 break; 9397 case 24: 9398 val |= PIPECONF_8BPC; 9399 break; 9400 case 30: 9401 val |= PIPECONF_10BPC; 9402 break; 9403 case 36: 9404 val |= PIPECONF_12BPC; 9405 break; 9406 default: 9407 /* Case prevented by intel_choose_pipe_bpp_dither. */ 9408 BUG(); 9409 } 9410 9411 if (crtc_state->dither) 9412 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 9413 9414 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 9415 val |= PIPECONF_INTERLACED_ILK; 9416 else 9417 val |= PIPECONF_PROGRESSIVE; 9418 9419 if (crtc_state->limited_color_range) 9420 val |= PIPECONF_COLOR_RANGE_SELECT; 9421 9422 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 9423 9424 I915_WRITE(PIPECONF(pipe), val); 9425 POSTING_READ(PIPECONF(pipe)); 9426 } 9427 9428 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state) 9429 { 9430 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 9431 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9432 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 9433 u32 val = 0; 9434 9435 if (IS_HASWELL(dev_priv) && crtc_state->dither) 9436 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 9437 9438 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 9439 val |= PIPECONF_INTERLACED_ILK; 9440 else 9441 val |= PIPECONF_PROGRESSIVE; 9442 9443 I915_WRITE(PIPECONF(cpu_transcoder), val); 9444 POSTING_READ(PIPECONF(cpu_transcoder)); 9445 } 9446 9447 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) 9448 { 9449 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 9450 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9451 u32 val = 0; 9452 9453 switch (crtc_state->pipe_bpp) { 9454 case 18: 9455 val |= PIPEMISC_DITHER_6_BPC; 9456 break; 9457 case 24: 9458 val |= PIPEMISC_DITHER_8_BPC; 9459 break; 9460 case 30: 9461 val |= PIPEMISC_DITHER_10_BPC; 9462 break; 9463 case 36: 9464 val |= PIPEMISC_DITHER_12_BPC; 9465 break; 9466 default: 9467 MISSING_CASE(crtc_state->pipe_bpp); 9468 break; 9469 } 9470 9471 if (crtc_state->dither) 9472 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 9473 9474 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 9475 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 9476 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV; 9477 9478 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 9479 val |= PIPEMISC_YUV420_ENABLE | 9480 PIPEMISC_YUV420_MODE_FULL_BLEND; 9481 9482 if (INTEL_GEN(dev_priv) >= 11 && 9483 (crtc_state->active_planes & ~(icl_hdr_plane_mask() | 9484 BIT(PLANE_CURSOR))) == 0) 9485 val |= PIPEMISC_HDR_MODE_PRECISION; 9486 9487 I915_WRITE(PIPEMISC(crtc->pipe), val); 9488 } 9489 9490 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) 9491 { 9492 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9493 u32 tmp; 9494 9495 tmp = I915_READ(PIPEMISC(crtc->pipe)); 9496 9497 switch (tmp & PIPEMISC_DITHER_BPC_MASK) { 9498 case PIPEMISC_DITHER_6_BPC: 9499 return 18; 9500 case PIPEMISC_DITHER_8_BPC: 9501 return 24; 9502 case PIPEMISC_DITHER_10_BPC: 9503 return 30; 9504 case PIPEMISC_DITHER_12_BPC: 9505 return 36; 9506 default: 9507 MISSING_CASE(tmp); 9508 return 0; 9509 } 9510 } 9511 9512 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 9513 { 9514 /* 9515 * Account for spread spectrum to avoid 9516 * oversubscribing the link. Max center spread 9517 * is 2.5%; use 5% for safety's sake. 9518 */ 9519 u32 bps = target_clock * bpp * 21 / 20; 9520 return DIV_ROUND_UP(bps, link_bw * 8); 9521 } 9522 9523 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) 9524 { 9525 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 9526 } 9527 9528 static void ironlake_compute_dpll(struct intel_crtc *crtc, 9529 struct intel_crtc_state *crtc_state, 9530 struct dpll *reduced_clock) 9531 { 9532 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9533 u32 dpll, fp, fp2; 9534 int factor; 9535 9536 /* Enable autotuning of the PLL clock (if permissible) */ 9537 factor = 21; 9538 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9539 if ((intel_panel_use_ssc(dev_priv) && 9540 dev_priv->vbt.lvds_ssc_freq == 100000) || 9541 (HAS_PCH_IBX(dev_priv) && 9542 intel_is_dual_link_lvds(dev_priv))) 9543 factor = 25; 9544 } else if (crtc_state->sdvo_tv_clock) { 9545 factor = 20; 9546 } 9547 9548 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 9549 9550 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor)) 9551 fp |= FP_CB_TUNE; 9552 9553 if (reduced_clock) { 9554 fp2 = i9xx_dpll_compute_fp(reduced_clock); 9555 9556 if (reduced_clock->m < factor * reduced_clock->n) 9557 fp2 |= FP_CB_TUNE; 9558 } else { 9559 fp2 = fp; 9560 } 9561 9562 dpll = 0; 9563 9564 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 9565 dpll |= DPLLB_MODE_LVDS; 9566 else 9567 dpll |= DPLLB_MODE_DAC_SERIAL; 9568 9569 dpll |= (crtc_state->pixel_multiplier - 1) 9570 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 9571 9572 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 9573 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 9574 dpll |= DPLL_SDVO_HIGH_SPEED; 9575 9576 if (intel_crtc_has_dp_encoder(crtc_state)) 9577 dpll |= DPLL_SDVO_HIGH_SPEED; 9578 9579 /* 9580 * The high speed IO clock is only really required for 9581 * SDVO/HDMI/DP, but we also enable it for CRT to make it 9582 * possible to share the DPLL between CRT and HDMI. Enabling 9583 * the clock needlessly does no real harm, except use up a 9584 * bit of power potentially. 9585 * 9586 * We'll limit this to IVB with 3 pipes, since it has only two 9587 * DPLLs and so DPLL sharing is the only way to get three pipes 9588 * driving PCH ports at the same time. On SNB we could do this, 9589 * and potentially avoid enabling the second DPLL, but it's not 9590 * clear if it''s a win or loss power wise. No point in doing 9591 * this on ILK at all since it has a fixed DPLL<->pipe mapping. 9592 */ 9593 if (INTEL_INFO(dev_priv)->num_pipes == 3 && 9594 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) 9595 dpll |= DPLL_SDVO_HIGH_SPEED; 9596 9597 /* compute bitmask from p1 value */ 9598 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 9599 /* also FPA1 */ 9600 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 9601 9602 switch (crtc_state->dpll.p2) { 9603 case 5: 9604 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 9605 break; 9606 case 7: 9607 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 9608 break; 9609 case 10: 9610 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 9611 break; 9612 case 14: 9613 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 9614 break; 9615 } 9616 9617 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 9618 intel_panel_use_ssc(dev_priv)) 9619 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 9620 else 9621 dpll |= PLL_REF_INPUT_DREFCLK; 9622 9623 dpll |= DPLL_VCO_ENABLE; 9624 9625 crtc_state->dpll_hw_state.dpll = dpll; 9626 crtc_state->dpll_hw_state.fp0 = fp; 9627 crtc_state->dpll_hw_state.fp1 = fp2; 9628 } 9629 9630 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, 9631 struct intel_crtc_state *crtc_state) 9632 { 9633 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9634 struct intel_atomic_state *state = 9635 to_intel_atomic_state(crtc_state->base.state); 9636 const struct intel_limit *limit; 9637 int refclk = 120000; 9638 9639 memset(&crtc_state->dpll_hw_state, 0, 9640 sizeof(crtc_state->dpll_hw_state)); 9641 9642 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 9643 if (!crtc_state->has_pch_encoder) 9644 return 0; 9645 9646 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9647 if (intel_panel_use_ssc(dev_priv)) { 9648 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 9649 dev_priv->vbt.lvds_ssc_freq); 9650 refclk = dev_priv->vbt.lvds_ssc_freq; 9651 } 9652 9653 if (intel_is_dual_link_lvds(dev_priv)) { 9654 if (refclk == 100000) 9655 limit = &intel_limits_ironlake_dual_lvds_100m; 9656 else 9657 limit = &intel_limits_ironlake_dual_lvds; 9658 } else { 9659 if (refclk == 100000) 9660 limit = &intel_limits_ironlake_single_lvds_100m; 9661 else 9662 limit = &intel_limits_ironlake_single_lvds; 9663 } 9664 } else { 9665 limit = &intel_limits_ironlake_dac; 9666 } 9667 9668 if (!crtc_state->clock_set && 9669 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9670 refclk, NULL, &crtc_state->dpll)) { 9671 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 9672 return -EINVAL; 9673 } 9674 9675 ironlake_compute_dpll(crtc, crtc_state, NULL); 9676 9677 if (!intel_reserve_shared_dplls(state, crtc, NULL)) { 9678 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", 9679 pipe_name(crtc->pipe)); 9680 return -EINVAL; 9681 } 9682 9683 return 0; 9684 } 9685 9686 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 9687 struct intel_link_m_n *m_n) 9688 { 9689 struct drm_device *dev = crtc->base.dev; 9690 struct drm_i915_private *dev_priv = to_i915(dev); 9691 enum pipe pipe = crtc->pipe; 9692 9693 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 9694 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); 9695 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) 9696 & ~TU_SIZE_MASK; 9697 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); 9698 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) 9699 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9700 } 9701 9702 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 9703 enum transcoder transcoder, 9704 struct intel_link_m_n *m_n, 9705 struct intel_link_m_n *m2_n2) 9706 { 9707 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9708 enum pipe pipe = crtc->pipe; 9709 9710 if (INTEL_GEN(dev_priv) >= 5) { 9711 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); 9712 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); 9713 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 9714 & ~TU_SIZE_MASK; 9715 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 9716 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 9717 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9718 9719 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) { 9720 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 9721 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 9722 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 9723 & ~TU_SIZE_MASK; 9724 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); 9725 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) 9726 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9727 } 9728 } else { 9729 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 9730 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 9731 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) 9732 & ~TU_SIZE_MASK; 9733 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); 9734 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) 9735 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9736 } 9737 } 9738 9739 void intel_dp_get_m_n(struct intel_crtc *crtc, 9740 struct intel_crtc_state *pipe_config) 9741 { 9742 if (pipe_config->has_pch_encoder) 9743 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 9744 else 9745 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 9746 &pipe_config->dp_m_n, 9747 &pipe_config->dp_m2_n2); 9748 } 9749 9750 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 9751 struct intel_crtc_state *pipe_config) 9752 { 9753 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 9754 &pipe_config->fdi_m_n, NULL); 9755 } 9756 9757 static void skylake_get_pfit_config(struct intel_crtc *crtc, 9758 struct intel_crtc_state *pipe_config) 9759 { 9760 struct drm_device *dev = crtc->base.dev; 9761 struct drm_i915_private *dev_priv = to_i915(dev); 9762 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; 9763 u32 ps_ctrl = 0; 9764 int id = -1; 9765 int i; 9766 9767 /* find scaler attached to this pipe */ 9768 for (i = 0; i < crtc->num_scalers; i++) { 9769 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i)); 9770 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { 9771 id = i; 9772 pipe_config->pch_pfit.enabled = true; 9773 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); 9774 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i)); 9775 scaler_state->scalers[i].in_use = true; 9776 break; 9777 } 9778 } 9779 9780 scaler_state->scaler_id = id; 9781 if (id >= 0) { 9782 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 9783 } else { 9784 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 9785 } 9786 } 9787 9788 static void 9789 skylake_get_initial_plane_config(struct intel_crtc *crtc, 9790 struct intel_initial_plane_config *plane_config) 9791 { 9792 struct drm_device *dev = crtc->base.dev; 9793 struct drm_i915_private *dev_priv = to_i915(dev); 9794 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 9795 enum plane_id plane_id = plane->id; 9796 enum pipe pipe; 9797 u32 val, base, offset, stride_mult, tiling, alpha; 9798 int fourcc, pixel_format; 9799 unsigned int aligned_height; 9800 struct drm_framebuffer *fb; 9801 struct intel_framebuffer *intel_fb; 9802 9803 if (!plane->get_hw_state(plane, &pipe)) 9804 return; 9805 9806 WARN_ON(pipe != crtc->pipe); 9807 9808 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9809 if (!intel_fb) { 9810 DRM_DEBUG_KMS("failed to alloc fb\n"); 9811 return; 9812 } 9813 9814 fb = &intel_fb->base; 9815 9816 fb->dev = dev; 9817 9818 val = I915_READ(PLANE_CTL(pipe, plane_id)); 9819 9820 if (INTEL_GEN(dev_priv) >= 11) 9821 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK; 9822 else 9823 pixel_format = val & PLANE_CTL_FORMAT_MASK; 9824 9825 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { 9826 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id)); 9827 alpha &= PLANE_COLOR_ALPHA_MASK; 9828 } else { 9829 alpha = val & PLANE_CTL_ALPHA_MASK; 9830 } 9831 9832 fourcc = skl_format_to_fourcc(pixel_format, 9833 val & PLANE_CTL_ORDER_RGBX, alpha); 9834 fb->format = drm_format_info(fourcc); 9835 9836 tiling = val & PLANE_CTL_TILED_MASK; 9837 switch (tiling) { 9838 case PLANE_CTL_TILED_LINEAR: 9839 fb->modifier = DRM_FORMAT_MOD_LINEAR; 9840 break; 9841 case PLANE_CTL_TILED_X: 9842 plane_config->tiling = I915_TILING_X; 9843 fb->modifier = I915_FORMAT_MOD_X_TILED; 9844 break; 9845 case PLANE_CTL_TILED_Y: 9846 plane_config->tiling = I915_TILING_Y; 9847 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) 9848 fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS; 9849 else 9850 fb->modifier = I915_FORMAT_MOD_Y_TILED; 9851 break; 9852 case PLANE_CTL_TILED_YF: 9853 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) 9854 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS; 9855 else 9856 fb->modifier = I915_FORMAT_MOD_Yf_TILED; 9857 break; 9858 default: 9859 MISSING_CASE(tiling); 9860 goto error; 9861 } 9862 9863 /* 9864 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 9865 * while i915 HW rotation is clockwise, thats why this swapping. 9866 */ 9867 switch (val & PLANE_CTL_ROTATE_MASK) { 9868 case PLANE_CTL_ROTATE_0: 9869 plane_config->rotation = DRM_MODE_ROTATE_0; 9870 break; 9871 case PLANE_CTL_ROTATE_90: 9872 plane_config->rotation = DRM_MODE_ROTATE_270; 9873 break; 9874 case PLANE_CTL_ROTATE_180: 9875 plane_config->rotation = DRM_MODE_ROTATE_180; 9876 break; 9877 case PLANE_CTL_ROTATE_270: 9878 plane_config->rotation = DRM_MODE_ROTATE_90; 9879 break; 9880 } 9881 9882 if (INTEL_GEN(dev_priv) >= 10 && 9883 val & PLANE_CTL_FLIP_HORIZONTAL) 9884 plane_config->rotation |= DRM_MODE_REFLECT_X; 9885 9886 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000; 9887 plane_config->base = base; 9888 9889 offset = I915_READ(PLANE_OFFSET(pipe, plane_id)); 9890 9891 val = I915_READ(PLANE_SIZE(pipe, plane_id)); 9892 fb->height = ((val >> 16) & 0xfff) + 1; 9893 fb->width = ((val >> 0) & 0x1fff) + 1; 9894 9895 val = I915_READ(PLANE_STRIDE(pipe, plane_id)); 9896 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); 9897 fb->pitches[0] = (val & 0x3ff) * stride_mult; 9898 9899 aligned_height = intel_fb_align_height(fb, 0, fb->height); 9900 9901 plane_config->size = fb->pitches[0] * aligned_height; 9902 9903 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 9904 crtc->base.name, plane->base.name, fb->width, fb->height, 9905 fb->format->cpp[0] * 8, base, fb->pitches[0], 9906 plane_config->size); 9907 9908 plane_config->fb = intel_fb; 9909 return; 9910 9911 error: 9912 kfree(intel_fb); 9913 } 9914 9915 static void ironlake_get_pfit_config(struct intel_crtc *crtc, 9916 struct intel_crtc_state *pipe_config) 9917 { 9918 struct drm_device *dev = crtc->base.dev; 9919 struct drm_i915_private *dev_priv = to_i915(dev); 9920 u32 tmp; 9921 9922 tmp = I915_READ(PF_CTL(crtc->pipe)); 9923 9924 if (tmp & PF_ENABLE) { 9925 pipe_config->pch_pfit.enabled = true; 9926 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 9927 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 9928 9929 /* We currently do not free assignements of panel fitters on 9930 * ivb/hsw (since we don't use the higher upscaling modes which 9931 * differentiates them) so just WARN about this case for now. */ 9932 if (IS_GEN(dev_priv, 7)) { 9933 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 9934 PF_PIPE_SEL_IVB(crtc->pipe)); 9935 } 9936 } 9937 } 9938 9939 static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 9940 struct intel_crtc_state *pipe_config) 9941 { 9942 struct drm_device *dev = crtc->base.dev; 9943 struct drm_i915_private *dev_priv = to_i915(dev); 9944 enum intel_display_power_domain power_domain; 9945 intel_wakeref_t wakeref; 9946 u32 tmp; 9947 bool ret; 9948 9949 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9950 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 9951 if (!wakeref) 9952 return false; 9953 9954 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 9955 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9956 pipe_config->shared_dpll = NULL; 9957 9958 ret = false; 9959 tmp = I915_READ(PIPECONF(crtc->pipe)); 9960 if (!(tmp & PIPECONF_ENABLE)) 9961 goto out; 9962 9963 switch (tmp & PIPECONF_BPC_MASK) { 9964 case PIPECONF_6BPC: 9965 pipe_config->pipe_bpp = 18; 9966 break; 9967 case PIPECONF_8BPC: 9968 pipe_config->pipe_bpp = 24; 9969 break; 9970 case PIPECONF_10BPC: 9971 pipe_config->pipe_bpp = 30; 9972 break; 9973 case PIPECONF_12BPC: 9974 pipe_config->pipe_bpp = 36; 9975 break; 9976 default: 9977 break; 9978 } 9979 9980 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 9981 pipe_config->limited_color_range = true; 9982 9983 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >> 9984 PIPECONF_GAMMA_MODE_SHIFT; 9985 9986 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe)); 9987 9988 i9xx_get_pipe_color_config(pipe_config); 9989 intel_color_get_config(pipe_config); 9990 9991 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 9992 struct intel_shared_dpll *pll; 9993 enum intel_dpll_id pll_id; 9994 9995 pipe_config->has_pch_encoder = true; 9996 9997 tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); 9998 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 9999 FDI_DP_PORT_WIDTH_SHIFT) + 1; 10000 10001 ironlake_get_fdi_m_n_config(crtc, pipe_config); 10002 10003 if (HAS_PCH_IBX(dev_priv)) { 10004 /* 10005 * The pipe->pch transcoder and pch transcoder->pll 10006 * mapping is fixed. 10007 */ 10008 pll_id = (enum intel_dpll_id) crtc->pipe; 10009 } else { 10010 tmp = I915_READ(PCH_DPLL_SEL); 10011 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 10012 pll_id = DPLL_ID_PCH_PLL_B; 10013 else 10014 pll_id= DPLL_ID_PCH_PLL_A; 10015 } 10016 10017 pipe_config->shared_dpll = 10018 intel_get_shared_dpll_by_id(dev_priv, pll_id); 10019 pll = pipe_config->shared_dpll; 10020 10021 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll, 10022 &pipe_config->dpll_hw_state)); 10023 10024 tmp = pipe_config->dpll_hw_state.dpll; 10025 pipe_config->pixel_multiplier = 10026 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 10027 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 10028 10029 ironlake_pch_clock_get(crtc, pipe_config); 10030 } else { 10031 pipe_config->pixel_multiplier = 1; 10032 } 10033 10034 intel_get_pipe_timings(crtc, pipe_config); 10035 intel_get_pipe_src_size(crtc, pipe_config); 10036 10037 ironlake_get_pfit_config(crtc, pipe_config); 10038 10039 ret = true; 10040 10041 out: 10042 intel_display_power_put(dev_priv, power_domain, wakeref); 10043 10044 return ret; 10045 } 10046 static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 10047 struct intel_crtc_state *crtc_state) 10048 { 10049 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10050 struct intel_atomic_state *state = 10051 to_intel_atomic_state(crtc_state->base.state); 10052 10053 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) || 10054 INTEL_GEN(dev_priv) >= 11) { 10055 struct intel_encoder *encoder = 10056 intel_get_crtc_new_encoder(state, crtc_state); 10057 10058 if (!intel_reserve_shared_dplls(state, crtc, encoder)) { 10059 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", 10060 pipe_name(crtc->pipe)); 10061 return -EINVAL; 10062 } 10063 } 10064 10065 return 0; 10066 } 10067 10068 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv, 10069 enum port port, 10070 struct intel_crtc_state *pipe_config) 10071 { 10072 enum intel_dpll_id id; 10073 u32 temp; 10074 10075 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); 10076 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); 10077 10078 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2)) 10079 return; 10080 10081 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10082 } 10083 10084 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv, 10085 enum port port, 10086 struct intel_crtc_state *pipe_config) 10087 { 10088 enum phy phy = intel_port_to_phy(dev_priv, port); 10089 enum icl_port_dpll_id port_dpll_id; 10090 enum intel_dpll_id id; 10091 u32 temp; 10092 10093 if (intel_phy_is_combo(dev_priv, phy)) { 10094 temp = I915_READ(ICL_DPCLKA_CFGCR0) & 10095 ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); 10096 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); 10097 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 10098 } else if (intel_phy_is_tc(dev_priv, phy)) { 10099 u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; 10100 10101 if (clk_sel == DDI_CLK_SEL_MG) { 10102 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, 10103 port)); 10104 port_dpll_id = ICL_PORT_DPLL_MG_PHY; 10105 } else { 10106 WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162); 10107 id = DPLL_ID_ICL_TBTPLL; 10108 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 10109 } 10110 } else { 10111 WARN(1, "Invalid port %x\n", port); 10112 return; 10113 } 10114 10115 pipe_config->icl_port_dplls[port_dpll_id].pll = 10116 intel_get_shared_dpll_by_id(dev_priv, id); 10117 10118 icl_set_active_port_dpll(pipe_config, port_dpll_id); 10119 } 10120 10121 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 10122 enum port port, 10123 struct intel_crtc_state *pipe_config) 10124 { 10125 enum intel_dpll_id id; 10126 10127 switch (port) { 10128 case PORT_A: 10129 id = DPLL_ID_SKL_DPLL0; 10130 break; 10131 case PORT_B: 10132 id = DPLL_ID_SKL_DPLL1; 10133 break; 10134 case PORT_C: 10135 id = DPLL_ID_SKL_DPLL2; 10136 break; 10137 default: 10138 DRM_ERROR("Incorrect port type\n"); 10139 return; 10140 } 10141 10142 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10143 } 10144 10145 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, 10146 enum port port, 10147 struct intel_crtc_state *pipe_config) 10148 { 10149 enum intel_dpll_id id; 10150 u32 temp; 10151 10152 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 10153 id = temp >> (port * 3 + 1); 10154 10155 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3)) 10156 return; 10157 10158 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10159 } 10160 10161 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, 10162 enum port port, 10163 struct intel_crtc_state *pipe_config) 10164 { 10165 enum intel_dpll_id id; 10166 u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 10167 10168 switch (ddi_pll_sel) { 10169 case PORT_CLK_SEL_WRPLL1: 10170 id = DPLL_ID_WRPLL1; 10171 break; 10172 case PORT_CLK_SEL_WRPLL2: 10173 id = DPLL_ID_WRPLL2; 10174 break; 10175 case PORT_CLK_SEL_SPLL: 10176 id = DPLL_ID_SPLL; 10177 break; 10178 case PORT_CLK_SEL_LCPLL_810: 10179 id = DPLL_ID_LCPLL_810; 10180 break; 10181 case PORT_CLK_SEL_LCPLL_1350: 10182 id = DPLL_ID_LCPLL_1350; 10183 break; 10184 case PORT_CLK_SEL_LCPLL_2700: 10185 id = DPLL_ID_LCPLL_2700; 10186 break; 10187 default: 10188 MISSING_CASE(ddi_pll_sel); 10189 /* fall through */ 10190 case PORT_CLK_SEL_NONE: 10191 return; 10192 } 10193 10194 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10195 } 10196 10197 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 10198 struct intel_crtc_state *pipe_config, 10199 u64 *power_domain_mask, 10200 intel_wakeref_t *wakerefs) 10201 { 10202 struct drm_device *dev = crtc->base.dev; 10203 struct drm_i915_private *dev_priv = to_i915(dev); 10204 enum intel_display_power_domain power_domain; 10205 unsigned long panel_transcoder_mask = 0; 10206 unsigned long enabled_panel_transcoders = 0; 10207 enum transcoder panel_transcoder; 10208 intel_wakeref_t wf; 10209 u32 tmp; 10210 10211 if (INTEL_GEN(dev_priv) >= 11) 10212 panel_transcoder_mask |= 10213 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 10214 10215 if (HAS_TRANSCODER_EDP(dev_priv)) 10216 panel_transcoder_mask |= BIT(TRANSCODER_EDP); 10217 10218 /* 10219 * The pipe->transcoder mapping is fixed with the exception of the eDP 10220 * and DSI transcoders handled below. 10221 */ 10222 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 10223 10224 /* 10225 * XXX: Do intel_display_power_get_if_enabled before reading this (for 10226 * consistency and less surprising code; it's in always on power). 10227 */ 10228 for_each_set_bit(panel_transcoder, 10229 &panel_transcoder_mask, 10230 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) { 10231 bool force_thru = false; 10232 enum pipe trans_pipe; 10233 10234 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder)); 10235 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 10236 continue; 10237 10238 /* 10239 * Log all enabled ones, only use the first one. 10240 * 10241 * FIXME: This won't work for two separate DSI displays. 10242 */ 10243 enabled_panel_transcoders |= BIT(panel_transcoder); 10244 if (enabled_panel_transcoders != BIT(panel_transcoder)) 10245 continue; 10246 10247 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 10248 default: 10249 WARN(1, "unknown pipe linked to transcoder %s\n", 10250 transcoder_name(panel_transcoder)); 10251 /* fall through */ 10252 case TRANS_DDI_EDP_INPUT_A_ONOFF: 10253 force_thru = true; 10254 /* fall through */ 10255 case TRANS_DDI_EDP_INPUT_A_ON: 10256 trans_pipe = PIPE_A; 10257 break; 10258 case TRANS_DDI_EDP_INPUT_B_ONOFF: 10259 trans_pipe = PIPE_B; 10260 break; 10261 case TRANS_DDI_EDP_INPUT_C_ONOFF: 10262 trans_pipe = PIPE_C; 10263 break; 10264 } 10265 10266 if (trans_pipe == crtc->pipe) { 10267 pipe_config->cpu_transcoder = panel_transcoder; 10268 pipe_config->pch_pfit.force_thru = force_thru; 10269 } 10270 } 10271 10272 /* 10273 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1 10274 */ 10275 WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) && 10276 enabled_panel_transcoders != BIT(TRANSCODER_EDP)); 10277 10278 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); 10279 WARN_ON(*power_domain_mask & BIT_ULL(power_domain)); 10280 10281 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10282 if (!wf) 10283 return false; 10284 10285 wakerefs[power_domain] = wf; 10286 *power_domain_mask |= BIT_ULL(power_domain); 10287 10288 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 10289 10290 return tmp & PIPECONF_ENABLE; 10291 } 10292 10293 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 10294 struct intel_crtc_state *pipe_config, 10295 u64 *power_domain_mask, 10296 intel_wakeref_t *wakerefs) 10297 { 10298 struct drm_device *dev = crtc->base.dev; 10299 struct drm_i915_private *dev_priv = to_i915(dev); 10300 enum intel_display_power_domain power_domain; 10301 enum transcoder cpu_transcoder; 10302 intel_wakeref_t wf; 10303 enum port port; 10304 u32 tmp; 10305 10306 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 10307 if (port == PORT_A) 10308 cpu_transcoder = TRANSCODER_DSI_A; 10309 else 10310 cpu_transcoder = TRANSCODER_DSI_C; 10311 10312 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 10313 WARN_ON(*power_domain_mask & BIT_ULL(power_domain)); 10314 10315 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10316 if (!wf) 10317 continue; 10318 10319 wakerefs[power_domain] = wf; 10320 *power_domain_mask |= BIT_ULL(power_domain); 10321 10322 /* 10323 * The PLL needs to be enabled with a valid divider 10324 * configuration, otherwise accessing DSI registers will hang 10325 * the machine. See BSpec North Display Engine 10326 * registers/MIPI[BXT]. We can break out here early, since we 10327 * need the same DSI PLL to be enabled for both DSI ports. 10328 */ 10329 if (!bxt_dsi_pll_is_enabled(dev_priv)) 10330 break; 10331 10332 /* XXX: this works for video mode only */ 10333 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port)); 10334 if (!(tmp & DPI_ENABLE)) 10335 continue; 10336 10337 tmp = I915_READ(MIPI_CTRL(port)); 10338 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 10339 continue; 10340 10341 pipe_config->cpu_transcoder = cpu_transcoder; 10342 break; 10343 } 10344 10345 return transcoder_is_dsi(pipe_config->cpu_transcoder); 10346 } 10347 10348 static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 10349 struct intel_crtc_state *pipe_config) 10350 { 10351 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10352 struct intel_shared_dpll *pll; 10353 enum port port; 10354 u32 tmp; 10355 10356 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 10357 10358 if (INTEL_GEN(dev_priv) >= 12) 10359 port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); 10360 else 10361 port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); 10362 10363 if (INTEL_GEN(dev_priv) >= 11) 10364 icelake_get_ddi_pll(dev_priv, port, pipe_config); 10365 else if (IS_CANNONLAKE(dev_priv)) 10366 cannonlake_get_ddi_pll(dev_priv, port, pipe_config); 10367 else if (IS_GEN9_BC(dev_priv)) 10368 skylake_get_ddi_pll(dev_priv, port, pipe_config); 10369 else if (IS_GEN9_LP(dev_priv)) 10370 bxt_get_ddi_pll(dev_priv, port, pipe_config); 10371 else 10372 haswell_get_ddi_pll(dev_priv, port, pipe_config); 10373 10374 pll = pipe_config->shared_dpll; 10375 if (pll) { 10376 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll, 10377 &pipe_config->dpll_hw_state)); 10378 } 10379 10380 /* 10381 * Haswell has only FDI/PCH transcoder A. It is which is connected to 10382 * DDI E. So just check whether this pipe is wired to DDI E and whether 10383 * the PCH transcoder is on. 10384 */ 10385 if (INTEL_GEN(dev_priv) < 9 && 10386 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 10387 pipe_config->has_pch_encoder = true; 10388 10389 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 10390 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 10391 FDI_DP_PORT_WIDTH_SHIFT) + 1; 10392 10393 ironlake_get_fdi_m_n_config(crtc, pipe_config); 10394 } 10395 } 10396 10397 static bool haswell_get_pipe_config(struct intel_crtc *crtc, 10398 struct intel_crtc_state *pipe_config) 10399 { 10400 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10401 intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf; 10402 enum intel_display_power_domain power_domain; 10403 u64 power_domain_mask; 10404 bool active; 10405 10406 intel_crtc_init_scalers(crtc, pipe_config); 10407 10408 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 10409 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10410 if (!wf) 10411 return false; 10412 10413 wakerefs[power_domain] = wf; 10414 power_domain_mask = BIT_ULL(power_domain); 10415 10416 pipe_config->shared_dpll = NULL; 10417 10418 active = hsw_get_transcoder_state(crtc, pipe_config, 10419 &power_domain_mask, wakerefs); 10420 10421 if (IS_GEN9_LP(dev_priv) && 10422 bxt_get_dsi_transcoder_state(crtc, pipe_config, 10423 &power_domain_mask, wakerefs)) { 10424 WARN_ON(active); 10425 active = true; 10426 } 10427 10428 if (!active) 10429 goto out; 10430 10431 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 10432 INTEL_GEN(dev_priv) >= 11) { 10433 haswell_get_ddi_port_state(crtc, pipe_config); 10434 intel_get_pipe_timings(crtc, pipe_config); 10435 } 10436 10437 intel_get_pipe_src_size(crtc, pipe_config); 10438 intel_get_crtc_ycbcr_config(crtc, pipe_config); 10439 10440 pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe)); 10441 10442 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe)); 10443 10444 if (INTEL_GEN(dev_priv) >= 9) { 10445 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe)); 10446 10447 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) 10448 pipe_config->gamma_enable = true; 10449 10450 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE) 10451 pipe_config->csc_enable = true; 10452 } else { 10453 i9xx_get_pipe_color_config(pipe_config); 10454 } 10455 10456 intel_color_get_config(pipe_config); 10457 10458 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 10459 WARN_ON(power_domain_mask & BIT_ULL(power_domain)); 10460 10461 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10462 if (wf) { 10463 wakerefs[power_domain] = wf; 10464 power_domain_mask |= BIT_ULL(power_domain); 10465 10466 if (INTEL_GEN(dev_priv) >= 9) 10467 skylake_get_pfit_config(crtc, pipe_config); 10468 else 10469 ironlake_get_pfit_config(crtc, pipe_config); 10470 } 10471 10472 if (hsw_crtc_supports_ips(crtc)) { 10473 if (IS_HASWELL(dev_priv)) 10474 pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE; 10475 else { 10476 /* 10477 * We cannot readout IPS state on broadwell, set to 10478 * true so we can set it to a defined state on first 10479 * commit. 10480 */ 10481 pipe_config->ips_enabled = true; 10482 } 10483 } 10484 10485 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 10486 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 10487 pipe_config->pixel_multiplier = 10488 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 10489 } else { 10490 pipe_config->pixel_multiplier = 1; 10491 } 10492 10493 out: 10494 for_each_power_domain(power_domain, power_domain_mask) 10495 intel_display_power_put(dev_priv, 10496 power_domain, wakerefs[power_domain]); 10497 10498 return active; 10499 } 10500 10501 static u32 intel_cursor_base(const struct intel_plane_state *plane_state) 10502 { 10503 struct drm_i915_private *dev_priv = 10504 to_i915(plane_state->base.plane->dev); 10505 const struct drm_framebuffer *fb = plane_state->base.fb; 10506 const struct drm_i915_gem_object *obj = intel_fb_obj(fb); 10507 u32 base; 10508 10509 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) 10510 base = obj->phys_handle->busaddr; 10511 else 10512 base = intel_plane_ggtt_offset(plane_state); 10513 10514 base += plane_state->color_plane[0].offset; 10515 10516 /* ILK+ do this automagically */ 10517 if (HAS_GMCH(dev_priv) && 10518 plane_state->base.rotation & DRM_MODE_ROTATE_180) 10519 base += (plane_state->base.crtc_h * 10520 plane_state->base.crtc_w - 1) * fb->format->cpp[0]; 10521 10522 return base; 10523 } 10524 10525 static u32 intel_cursor_position(const struct intel_plane_state *plane_state) 10526 { 10527 int x = plane_state->base.crtc_x; 10528 int y = plane_state->base.crtc_y; 10529 u32 pos = 0; 10530 10531 if (x < 0) { 10532 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 10533 x = -x; 10534 } 10535 pos |= x << CURSOR_X_SHIFT; 10536 10537 if (y < 0) { 10538 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 10539 y = -y; 10540 } 10541 pos |= y << CURSOR_Y_SHIFT; 10542 10543 return pos; 10544 } 10545 10546 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) 10547 { 10548 const struct drm_mode_config *config = 10549 &plane_state->base.plane->dev->mode_config; 10550 int width = plane_state->base.crtc_w; 10551 int height = plane_state->base.crtc_h; 10552 10553 return width > 0 && width <= config->cursor_width && 10554 height > 0 && height <= config->cursor_height; 10555 } 10556 10557 static int intel_cursor_check_surface(struct intel_plane_state *plane_state) 10558 { 10559 int src_x, src_y; 10560 u32 offset; 10561 int ret; 10562 10563 ret = intel_plane_compute_gtt(plane_state); 10564 if (ret) 10565 return ret; 10566 10567 if (!plane_state->base.visible) 10568 return 0; 10569 10570 src_x = plane_state->base.src_x >> 16; 10571 src_y = plane_state->base.src_y >> 16; 10572 10573 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 10574 offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 10575 plane_state, 0); 10576 10577 if (src_x != 0 || src_y != 0) { 10578 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n"); 10579 return -EINVAL; 10580 } 10581 10582 plane_state->color_plane[0].offset = offset; 10583 10584 return 0; 10585 } 10586 10587 static int intel_check_cursor(struct intel_crtc_state *crtc_state, 10588 struct intel_plane_state *plane_state) 10589 { 10590 const struct drm_framebuffer *fb = plane_state->base.fb; 10591 int ret; 10592 10593 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) { 10594 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 10595 return -EINVAL; 10596 } 10597 10598 ret = drm_atomic_helper_check_plane_state(&plane_state->base, 10599 &crtc_state->base, 10600 DRM_PLANE_HELPER_NO_SCALING, 10601 DRM_PLANE_HELPER_NO_SCALING, 10602 true, true); 10603 if (ret) 10604 return ret; 10605 10606 ret = intel_cursor_check_surface(plane_state); 10607 if (ret) 10608 return ret; 10609 10610 if (!plane_state->base.visible) 10611 return 0; 10612 10613 ret = intel_plane_check_src_coordinates(plane_state); 10614 if (ret) 10615 return ret; 10616 10617 return 0; 10618 } 10619 10620 static unsigned int 10621 i845_cursor_max_stride(struct intel_plane *plane, 10622 u32 pixel_format, u64 modifier, 10623 unsigned int rotation) 10624 { 10625 return 2048; 10626 } 10627 10628 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 10629 { 10630 u32 cntl = 0; 10631 10632 if (crtc_state->gamma_enable) 10633 cntl |= CURSOR_GAMMA_ENABLE; 10634 10635 return cntl; 10636 } 10637 10638 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, 10639 const struct intel_plane_state *plane_state) 10640 { 10641 return CURSOR_ENABLE | 10642 CURSOR_FORMAT_ARGB | 10643 CURSOR_STRIDE(plane_state->color_plane[0].stride); 10644 } 10645 10646 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) 10647 { 10648 int width = plane_state->base.crtc_w; 10649 10650 /* 10651 * 845g/865g are only limited by the width of their cursors, 10652 * the height is arbitrary up to the precision of the register. 10653 */ 10654 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64); 10655 } 10656 10657 static int i845_check_cursor(struct intel_crtc_state *crtc_state, 10658 struct intel_plane_state *plane_state) 10659 { 10660 const struct drm_framebuffer *fb = plane_state->base.fb; 10661 int ret; 10662 10663 ret = intel_check_cursor(crtc_state, plane_state); 10664 if (ret) 10665 return ret; 10666 10667 /* if we want to turn off the cursor ignore width and height */ 10668 if (!fb) 10669 return 0; 10670 10671 /* Check for which cursor types we support */ 10672 if (!i845_cursor_size_ok(plane_state)) { 10673 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 10674 plane_state->base.crtc_w, 10675 plane_state->base.crtc_h); 10676 return -EINVAL; 10677 } 10678 10679 WARN_ON(plane_state->base.visible && 10680 plane_state->color_plane[0].stride != fb->pitches[0]); 10681 10682 switch (fb->pitches[0]) { 10683 case 256: 10684 case 512: 10685 case 1024: 10686 case 2048: 10687 break; 10688 default: 10689 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n", 10690 fb->pitches[0]); 10691 return -EINVAL; 10692 } 10693 10694 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state); 10695 10696 return 0; 10697 } 10698 10699 static void i845_update_cursor(struct intel_plane *plane, 10700 const struct intel_crtc_state *crtc_state, 10701 const struct intel_plane_state *plane_state) 10702 { 10703 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 10704 u32 cntl = 0, base = 0, pos = 0, size = 0; 10705 unsigned long irqflags; 10706 10707 if (plane_state && plane_state->base.visible) { 10708 unsigned int width = plane_state->base.crtc_w; 10709 unsigned int height = plane_state->base.crtc_h; 10710 10711 cntl = plane_state->ctl | 10712 i845_cursor_ctl_crtc(crtc_state); 10713 10714 size = (height << 12) | width; 10715 10716 base = intel_cursor_base(plane_state); 10717 pos = intel_cursor_position(plane_state); 10718 } 10719 10720 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 10721 10722 /* On these chipsets we can only modify the base/size/stride 10723 * whilst the cursor is disabled. 10724 */ 10725 if (plane->cursor.base != base || 10726 plane->cursor.size != size || 10727 plane->cursor.cntl != cntl) { 10728 I915_WRITE_FW(CURCNTR(PIPE_A), 0); 10729 I915_WRITE_FW(CURBASE(PIPE_A), base); 10730 I915_WRITE_FW(CURSIZE, size); 10731 I915_WRITE_FW(CURPOS(PIPE_A), pos); 10732 I915_WRITE_FW(CURCNTR(PIPE_A), cntl); 10733 10734 plane->cursor.base = base; 10735 plane->cursor.size = size; 10736 plane->cursor.cntl = cntl; 10737 } else { 10738 I915_WRITE_FW(CURPOS(PIPE_A), pos); 10739 } 10740 10741 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 10742 } 10743 10744 static void i845_disable_cursor(struct intel_plane *plane, 10745 const struct intel_crtc_state *crtc_state) 10746 { 10747 i845_update_cursor(plane, crtc_state, NULL); 10748 } 10749 10750 static bool i845_cursor_get_hw_state(struct intel_plane *plane, 10751 enum pipe *pipe) 10752 { 10753 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 10754 enum intel_display_power_domain power_domain; 10755 intel_wakeref_t wakeref; 10756 bool ret; 10757 10758 power_domain = POWER_DOMAIN_PIPE(PIPE_A); 10759 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 10760 if (!wakeref) 10761 return false; 10762 10763 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 10764 10765 *pipe = PIPE_A; 10766 10767 intel_display_power_put(dev_priv, power_domain, wakeref); 10768 10769 return ret; 10770 } 10771 10772 static unsigned int 10773 i9xx_cursor_max_stride(struct intel_plane *plane, 10774 u32 pixel_format, u64 modifier, 10775 unsigned int rotation) 10776 { 10777 return plane->base.dev->mode_config.cursor_width * 4; 10778 } 10779 10780 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 10781 { 10782 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 10783 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10784 u32 cntl = 0; 10785 10786 if (INTEL_GEN(dev_priv) >= 11) 10787 return cntl; 10788 10789 if (crtc_state->gamma_enable) 10790 cntl = MCURSOR_GAMMA_ENABLE; 10791 10792 if (crtc_state->csc_enable) 10793 cntl |= MCURSOR_PIPE_CSC_ENABLE; 10794 10795 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 10796 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); 10797 10798 return cntl; 10799 } 10800 10801 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, 10802 const struct intel_plane_state *plane_state) 10803 { 10804 struct drm_i915_private *dev_priv = 10805 to_i915(plane_state->base.plane->dev); 10806 u32 cntl = 0; 10807 10808 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 10809 cntl |= MCURSOR_TRICKLE_FEED_DISABLE; 10810 10811 switch (plane_state->base.crtc_w) { 10812 case 64: 10813 cntl |= MCURSOR_MODE_64_ARGB_AX; 10814 break; 10815 case 128: 10816 cntl |= MCURSOR_MODE_128_ARGB_AX; 10817 break; 10818 case 256: 10819 cntl |= MCURSOR_MODE_256_ARGB_AX; 10820 break; 10821 default: 10822 MISSING_CASE(plane_state->base.crtc_w); 10823 return 0; 10824 } 10825 10826 if (plane_state->base.rotation & DRM_MODE_ROTATE_180) 10827 cntl |= MCURSOR_ROTATE_180; 10828 10829 return cntl; 10830 } 10831 10832 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) 10833 { 10834 struct drm_i915_private *dev_priv = 10835 to_i915(plane_state->base.plane->dev); 10836 int width = plane_state->base.crtc_w; 10837 int height = plane_state->base.crtc_h; 10838 10839 if (!intel_cursor_size_ok(plane_state)) 10840 return false; 10841 10842 /* Cursor width is limited to a few power-of-two sizes */ 10843 switch (width) { 10844 case 256: 10845 case 128: 10846 case 64: 10847 break; 10848 default: 10849 return false; 10850 } 10851 10852 /* 10853 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor 10854 * height from 8 lines up to the cursor width, when the 10855 * cursor is not rotated. Everything else requires square 10856 * cursors. 10857 */ 10858 if (HAS_CUR_FBC(dev_priv) && 10859 plane_state->base.rotation & DRM_MODE_ROTATE_0) { 10860 if (height < 8 || height > width) 10861 return false; 10862 } else { 10863 if (height != width) 10864 return false; 10865 } 10866 10867 return true; 10868 } 10869 10870 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, 10871 struct intel_plane_state *plane_state) 10872 { 10873 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 10874 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 10875 const struct drm_framebuffer *fb = plane_state->base.fb; 10876 enum pipe pipe = plane->pipe; 10877 int ret; 10878 10879 ret = intel_check_cursor(crtc_state, plane_state); 10880 if (ret) 10881 return ret; 10882 10883 /* if we want to turn off the cursor ignore width and height */ 10884 if (!fb) 10885 return 0; 10886 10887 /* Check for which cursor types we support */ 10888 if (!i9xx_cursor_size_ok(plane_state)) { 10889 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 10890 plane_state->base.crtc_w, 10891 plane_state->base.crtc_h); 10892 return -EINVAL; 10893 } 10894 10895 WARN_ON(plane_state->base.visible && 10896 plane_state->color_plane[0].stride != fb->pitches[0]); 10897 10898 if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) { 10899 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n", 10900 fb->pitches[0], plane_state->base.crtc_w); 10901 return -EINVAL; 10902 } 10903 10904 /* 10905 * There's something wrong with the cursor on CHV pipe C. 10906 * If it straddles the left edge of the screen then 10907 * moving it away from the edge or disabling it often 10908 * results in a pipe underrun, and often that can lead to 10909 * dead pipe (constant underrun reported, and it scans 10910 * out just a solid color). To recover from that, the 10911 * display power well must be turned off and on again. 10912 * Refuse the put the cursor into that compromised position. 10913 */ 10914 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && 10915 plane_state->base.visible && plane_state->base.crtc_x < 0) { 10916 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); 10917 return -EINVAL; 10918 } 10919 10920 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state); 10921 10922 return 0; 10923 } 10924 10925 static void i9xx_update_cursor(struct intel_plane *plane, 10926 const struct intel_crtc_state *crtc_state, 10927 const struct intel_plane_state *plane_state) 10928 { 10929 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 10930 enum pipe pipe = plane->pipe; 10931 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0; 10932 unsigned long irqflags; 10933 10934 if (plane_state && plane_state->base.visible) { 10935 cntl = plane_state->ctl | 10936 i9xx_cursor_ctl_crtc(crtc_state); 10937 10938 if (plane_state->base.crtc_h != plane_state->base.crtc_w) 10939 fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1); 10940 10941 base = intel_cursor_base(plane_state); 10942 pos = intel_cursor_position(plane_state); 10943 } 10944 10945 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 10946 10947 /* 10948 * On some platforms writing CURCNTR first will also 10949 * cause CURPOS to be armed by the CURBASE write. 10950 * Without the CURCNTR write the CURPOS write would 10951 * arm itself. Thus we always update CURCNTR before 10952 * CURPOS. 10953 * 10954 * On other platforms CURPOS always requires the 10955 * CURBASE write to arm the update. Additonally 10956 * a write to any of the cursor register will cancel 10957 * an already armed cursor update. Thus leaving out 10958 * the CURBASE write after CURPOS could lead to a 10959 * cursor that doesn't appear to move, or even change 10960 * shape. Thus we always write CURBASE. 10961 * 10962 * The other registers are armed by by the CURBASE write 10963 * except when the plane is getting enabled at which time 10964 * the CURCNTR write arms the update. 10965 */ 10966 10967 if (INTEL_GEN(dev_priv) >= 9) 10968 skl_write_cursor_wm(plane, crtc_state); 10969 10970 if (plane->cursor.base != base || 10971 plane->cursor.size != fbc_ctl || 10972 plane->cursor.cntl != cntl) { 10973 if (HAS_CUR_FBC(dev_priv)) 10974 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl); 10975 I915_WRITE_FW(CURCNTR(pipe), cntl); 10976 I915_WRITE_FW(CURPOS(pipe), pos); 10977 I915_WRITE_FW(CURBASE(pipe), base); 10978 10979 plane->cursor.base = base; 10980 plane->cursor.size = fbc_ctl; 10981 plane->cursor.cntl = cntl; 10982 } else { 10983 I915_WRITE_FW(CURPOS(pipe), pos); 10984 I915_WRITE_FW(CURBASE(pipe), base); 10985 } 10986 10987 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 10988 } 10989 10990 static void i9xx_disable_cursor(struct intel_plane *plane, 10991 const struct intel_crtc_state *crtc_state) 10992 { 10993 i9xx_update_cursor(plane, crtc_state, NULL); 10994 } 10995 10996 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, 10997 enum pipe *pipe) 10998 { 10999 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11000 enum intel_display_power_domain power_domain; 11001 intel_wakeref_t wakeref; 11002 bool ret; 11003 u32 val; 11004 11005 /* 11006 * Not 100% correct for planes that can move between pipes, 11007 * but that's only the case for gen2-3 which don't have any 11008 * display power wells. 11009 */ 11010 power_domain = POWER_DOMAIN_PIPE(plane->pipe); 11011 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 11012 if (!wakeref) 11013 return false; 11014 11015 val = I915_READ(CURCNTR(plane->pipe)); 11016 11017 ret = val & MCURSOR_MODE; 11018 11019 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 11020 *pipe = plane->pipe; 11021 else 11022 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >> 11023 MCURSOR_PIPE_SELECT_SHIFT; 11024 11025 intel_display_power_put(dev_priv, power_domain, wakeref); 11026 11027 return ret; 11028 } 11029 11030 /* VESA 640x480x72Hz mode to set on the pipe */ 11031 static const struct drm_display_mode load_detect_mode = { 11032 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 11033 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 11034 }; 11035 11036 struct drm_framebuffer * 11037 intel_framebuffer_create(struct drm_i915_gem_object *obj, 11038 struct drm_mode_fb_cmd2 *mode_cmd) 11039 { 11040 struct intel_framebuffer *intel_fb; 11041 int ret; 11042 11043 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 11044 if (!intel_fb) 11045 return ERR_PTR(-ENOMEM); 11046 11047 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd); 11048 if (ret) 11049 goto err; 11050 11051 return &intel_fb->base; 11052 11053 err: 11054 kfree(intel_fb); 11055 return ERR_PTR(ret); 11056 } 11057 11058 static int intel_modeset_disable_planes(struct drm_atomic_state *state, 11059 struct drm_crtc *crtc) 11060 { 11061 struct drm_plane *plane; 11062 struct drm_plane_state *plane_state; 11063 int ret, i; 11064 11065 ret = drm_atomic_add_affected_planes(state, crtc); 11066 if (ret) 11067 return ret; 11068 11069 for_each_new_plane_in_state(state, plane, plane_state, i) { 11070 if (plane_state->crtc != crtc) 11071 continue; 11072 11073 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); 11074 if (ret) 11075 return ret; 11076 11077 drm_atomic_set_fb_for_plane(plane_state, NULL); 11078 } 11079 11080 return 0; 11081 } 11082 11083 int intel_get_load_detect_pipe(struct drm_connector *connector, 11084 const struct drm_display_mode *mode, 11085 struct intel_load_detect_pipe *old, 11086 struct drm_modeset_acquire_ctx *ctx) 11087 { 11088 struct intel_crtc *intel_crtc; 11089 struct intel_encoder *intel_encoder = 11090 intel_attached_encoder(connector); 11091 struct drm_crtc *possible_crtc; 11092 struct drm_encoder *encoder = &intel_encoder->base; 11093 struct drm_crtc *crtc = NULL; 11094 struct drm_device *dev = encoder->dev; 11095 struct drm_i915_private *dev_priv = to_i915(dev); 11096 struct drm_mode_config *config = &dev->mode_config; 11097 struct drm_atomic_state *state = NULL, *restore_state = NULL; 11098 struct drm_connector_state *connector_state; 11099 struct intel_crtc_state *crtc_state; 11100 int ret, i = -1; 11101 11102 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 11103 connector->base.id, connector->name, 11104 encoder->base.id, encoder->name); 11105 11106 old->restore_state = NULL; 11107 11108 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); 11109 11110 /* 11111 * Algorithm gets a little messy: 11112 * 11113 * - if the connector already has an assigned crtc, use it (but make 11114 * sure it's on first) 11115 * 11116 * - try to find the first unused crtc that can drive this connector, 11117 * and use that if we find one 11118 */ 11119 11120 /* See if we already have a CRTC for this connector */ 11121 if (connector->state->crtc) { 11122 crtc = connector->state->crtc; 11123 11124 ret = drm_modeset_lock(&crtc->mutex, ctx); 11125 if (ret) 11126 goto fail; 11127 11128 /* Make sure the crtc and connector are running */ 11129 goto found; 11130 } 11131 11132 /* Find an unused one (if possible) */ 11133 for_each_crtc(dev, possible_crtc) { 11134 i++; 11135 if (!(encoder->possible_crtcs & (1 << i))) 11136 continue; 11137 11138 ret = drm_modeset_lock(&possible_crtc->mutex, ctx); 11139 if (ret) 11140 goto fail; 11141 11142 if (possible_crtc->state->enable) { 11143 drm_modeset_unlock(&possible_crtc->mutex); 11144 continue; 11145 } 11146 11147 crtc = possible_crtc; 11148 break; 11149 } 11150 11151 /* 11152 * If we didn't find an unused CRTC, don't use any. 11153 */ 11154 if (!crtc) { 11155 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 11156 ret = -ENODEV; 11157 goto fail; 11158 } 11159 11160 found: 11161 intel_crtc = to_intel_crtc(crtc); 11162 11163 state = drm_atomic_state_alloc(dev); 11164 restore_state = drm_atomic_state_alloc(dev); 11165 if (!state || !restore_state) { 11166 ret = -ENOMEM; 11167 goto fail; 11168 } 11169 11170 state->acquire_ctx = ctx; 11171 restore_state->acquire_ctx = ctx; 11172 11173 connector_state = drm_atomic_get_connector_state(state, connector); 11174 if (IS_ERR(connector_state)) { 11175 ret = PTR_ERR(connector_state); 11176 goto fail; 11177 } 11178 11179 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc); 11180 if (ret) 11181 goto fail; 11182 11183 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 11184 if (IS_ERR(crtc_state)) { 11185 ret = PTR_ERR(crtc_state); 11186 goto fail; 11187 } 11188 11189 crtc_state->base.active = crtc_state->base.enable = true; 11190 11191 if (!mode) 11192 mode = &load_detect_mode; 11193 11194 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode); 11195 if (ret) 11196 goto fail; 11197 11198 ret = intel_modeset_disable_planes(state, crtc); 11199 if (ret) 11200 goto fail; 11201 11202 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); 11203 if (!ret) 11204 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc)); 11205 if (!ret) 11206 ret = drm_atomic_add_affected_planes(restore_state, crtc); 11207 if (ret) { 11208 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret); 11209 goto fail; 11210 } 11211 11212 ret = drm_atomic_commit(state); 11213 if (ret) { 11214 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 11215 goto fail; 11216 } 11217 11218 old->restore_state = restore_state; 11219 drm_atomic_state_put(state); 11220 11221 /* let the connector get through one full cycle before testing */ 11222 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 11223 return true; 11224 11225 fail: 11226 if (state) { 11227 drm_atomic_state_put(state); 11228 state = NULL; 11229 } 11230 if (restore_state) { 11231 drm_atomic_state_put(restore_state); 11232 restore_state = NULL; 11233 } 11234 11235 if (ret == -EDEADLK) 11236 return ret; 11237 11238 return false; 11239 } 11240 11241 void intel_release_load_detect_pipe(struct drm_connector *connector, 11242 struct intel_load_detect_pipe *old, 11243 struct drm_modeset_acquire_ctx *ctx) 11244 { 11245 struct intel_encoder *intel_encoder = 11246 intel_attached_encoder(connector); 11247 struct drm_encoder *encoder = &intel_encoder->base; 11248 struct drm_atomic_state *state = old->restore_state; 11249 int ret; 11250 11251 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 11252 connector->base.id, connector->name, 11253 encoder->base.id, encoder->name); 11254 11255 if (!state) 11256 return; 11257 11258 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 11259 if (ret) 11260 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret); 11261 drm_atomic_state_put(state); 11262 } 11263 11264 static int i9xx_pll_refclk(struct drm_device *dev, 11265 const struct intel_crtc_state *pipe_config) 11266 { 11267 struct drm_i915_private *dev_priv = to_i915(dev); 11268 u32 dpll = pipe_config->dpll_hw_state.dpll; 11269 11270 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 11271 return dev_priv->vbt.lvds_ssc_freq; 11272 else if (HAS_PCH_SPLIT(dev_priv)) 11273 return 120000; 11274 else if (!IS_GEN(dev_priv, 2)) 11275 return 96000; 11276 else 11277 return 48000; 11278 } 11279 11280 /* Returns the clock of the currently programmed mode of the given pipe. */ 11281 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 11282 struct intel_crtc_state *pipe_config) 11283 { 11284 struct drm_device *dev = crtc->base.dev; 11285 struct drm_i915_private *dev_priv = to_i915(dev); 11286 int pipe = pipe_config->cpu_transcoder; 11287 u32 dpll = pipe_config->dpll_hw_state.dpll; 11288 u32 fp; 11289 struct dpll clock; 11290 int port_clock; 11291 int refclk = i9xx_pll_refclk(dev, pipe_config); 11292 11293 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 11294 fp = pipe_config->dpll_hw_state.fp0; 11295 else 11296 fp = pipe_config->dpll_hw_state.fp1; 11297 11298 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 11299 if (IS_PINEVIEW(dev_priv)) { 11300 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 11301 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 11302 } else { 11303 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 11304 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 11305 } 11306 11307 if (!IS_GEN(dev_priv, 2)) { 11308 if (IS_PINEVIEW(dev_priv)) 11309 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 11310 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 11311 else 11312 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 11313 DPLL_FPA01_P1_POST_DIV_SHIFT); 11314 11315 switch (dpll & DPLL_MODE_MASK) { 11316 case DPLLB_MODE_DAC_SERIAL: 11317 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 11318 5 : 10; 11319 break; 11320 case DPLLB_MODE_LVDS: 11321 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 11322 7 : 14; 11323 break; 11324 default: 11325 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 11326 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 11327 return; 11328 } 11329 11330 if (IS_PINEVIEW(dev_priv)) 11331 port_clock = pnv_calc_dpll_params(refclk, &clock); 11332 else 11333 port_clock = i9xx_calc_dpll_params(refclk, &clock); 11334 } else { 11335 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS); 11336 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 11337 11338 if (is_lvds) { 11339 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 11340 DPLL_FPA01_P1_POST_DIV_SHIFT); 11341 11342 if (lvds & LVDS_CLKB_POWER_UP) 11343 clock.p2 = 7; 11344 else 11345 clock.p2 = 14; 11346 } else { 11347 if (dpll & PLL_P1_DIVIDE_BY_TWO) 11348 clock.p1 = 2; 11349 else { 11350 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 11351 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 11352 } 11353 if (dpll & PLL_P2_DIVIDE_BY_4) 11354 clock.p2 = 4; 11355 else 11356 clock.p2 = 2; 11357 } 11358 11359 port_clock = i9xx_calc_dpll_params(refclk, &clock); 11360 } 11361 11362 /* 11363 * This value includes pixel_multiplier. We will use 11364 * port_clock to compute adjusted_mode.crtc_clock in the 11365 * encoder's get_config() function. 11366 */ 11367 pipe_config->port_clock = port_clock; 11368 } 11369 11370 int intel_dotclock_calculate(int link_freq, 11371 const struct intel_link_m_n *m_n) 11372 { 11373 /* 11374 * The calculation for the data clock is: 11375 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 11376 * But we want to avoid losing precison if possible, so: 11377 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 11378 * 11379 * and the link clock is simpler: 11380 * link_clock = (m * link_clock) / n 11381 */ 11382 11383 if (!m_n->link_n) 11384 return 0; 11385 11386 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); 11387 } 11388 11389 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 11390 struct intel_crtc_state *pipe_config) 11391 { 11392 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11393 11394 /* read out port_clock from the DPLL */ 11395 i9xx_crtc_clock_get(crtc, pipe_config); 11396 11397 /* 11398 * In case there is an active pipe without active ports, 11399 * we may need some idea for the dotclock anyway. 11400 * Calculate one based on the FDI configuration. 11401 */ 11402 pipe_config->base.adjusted_mode.crtc_clock = 11403 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 11404 &pipe_config->fdi_m_n); 11405 } 11406 11407 /* Returns the currently programmed mode of the given encoder. */ 11408 struct drm_display_mode * 11409 intel_encoder_current_mode(struct intel_encoder *encoder) 11410 { 11411 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 11412 struct intel_crtc_state *crtc_state; 11413 struct drm_display_mode *mode; 11414 struct intel_crtc *crtc; 11415 enum pipe pipe; 11416 11417 if (!encoder->get_hw_state(encoder, &pipe)) 11418 return NULL; 11419 11420 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 11421 11422 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 11423 if (!mode) 11424 return NULL; 11425 11426 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 11427 if (!crtc_state) { 11428 kfree(mode); 11429 return NULL; 11430 } 11431 11432 crtc_state->base.crtc = &crtc->base; 11433 11434 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) { 11435 kfree(crtc_state); 11436 kfree(mode); 11437 return NULL; 11438 } 11439 11440 encoder->get_config(encoder, crtc_state); 11441 11442 intel_mode_from_pipe_config(mode, crtc_state); 11443 11444 kfree(crtc_state); 11445 11446 return mode; 11447 } 11448 11449 static void intel_crtc_destroy(struct drm_crtc *crtc) 11450 { 11451 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11452 11453 drm_crtc_cleanup(crtc); 11454 kfree(intel_crtc); 11455 } 11456 11457 /** 11458 * intel_wm_need_update - Check whether watermarks need updating 11459 * @cur: current plane state 11460 * @new: new plane state 11461 * 11462 * Check current plane state versus the new one to determine whether 11463 * watermarks need to be recalculated. 11464 * 11465 * Returns true or false. 11466 */ 11467 static bool intel_wm_need_update(const struct intel_plane_state *cur, 11468 struct intel_plane_state *new) 11469 { 11470 /* Update watermarks on tiling or size changes. */ 11471 if (new->base.visible != cur->base.visible) 11472 return true; 11473 11474 if (!cur->base.fb || !new->base.fb) 11475 return false; 11476 11477 if (cur->base.fb->modifier != new->base.fb->modifier || 11478 cur->base.rotation != new->base.rotation || 11479 drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) || 11480 drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) || 11481 drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) || 11482 drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst)) 11483 return true; 11484 11485 return false; 11486 } 11487 11488 static bool needs_scaling(const struct intel_plane_state *state) 11489 { 11490 int src_w = drm_rect_width(&state->base.src) >> 16; 11491 int src_h = drm_rect_height(&state->base.src) >> 16; 11492 int dst_w = drm_rect_width(&state->base.dst); 11493 int dst_h = drm_rect_height(&state->base.dst); 11494 11495 return (src_w != dst_w || src_h != dst_h); 11496 } 11497 11498 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, 11499 struct intel_crtc_state *crtc_state, 11500 const struct intel_plane_state *old_plane_state, 11501 struct intel_plane_state *plane_state) 11502 { 11503 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 11504 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 11505 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11506 bool mode_changed = needs_modeset(crtc_state); 11507 bool was_crtc_enabled = old_crtc_state->base.active; 11508 bool is_crtc_enabled = crtc_state->base.active; 11509 bool turn_off, turn_on, visible, was_visible; 11510 struct drm_framebuffer *fb = plane_state->base.fb; 11511 int ret; 11512 11513 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { 11514 ret = skl_update_scaler_plane(crtc_state, plane_state); 11515 if (ret) 11516 return ret; 11517 } 11518 11519 was_visible = old_plane_state->base.visible; 11520 visible = plane_state->base.visible; 11521 11522 if (!was_crtc_enabled && WARN_ON(was_visible)) 11523 was_visible = false; 11524 11525 /* 11526 * Visibility is calculated as if the crtc was on, but 11527 * after scaler setup everything depends on it being off 11528 * when the crtc isn't active. 11529 * 11530 * FIXME this is wrong for watermarks. Watermarks should also 11531 * be computed as if the pipe would be active. Perhaps move 11532 * per-plane wm computation to the .check_plane() hook, and 11533 * only combine the results from all planes in the current place? 11534 */ 11535 if (!is_crtc_enabled) { 11536 plane_state->base.visible = visible = false; 11537 crtc_state->active_planes &= ~BIT(plane->id); 11538 crtc_state->data_rate[plane->id] = 0; 11539 } 11540 11541 if (!was_visible && !visible) 11542 return 0; 11543 11544 if (fb != old_plane_state->base.fb) 11545 crtc_state->fb_changed = true; 11546 11547 turn_off = was_visible && (!visible || mode_changed); 11548 turn_on = visible && (!was_visible || mode_changed); 11549 11550 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n", 11551 crtc->base.base.id, crtc->base.name, 11552 plane->base.base.id, plane->base.name, 11553 fb ? fb->base.id : -1); 11554 11555 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", 11556 plane->base.base.id, plane->base.name, 11557 was_visible, visible, 11558 turn_off, turn_on, mode_changed); 11559 11560 if (turn_on) { 11561 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 11562 crtc_state->update_wm_pre = true; 11563 11564 /* must disable cxsr around plane enable/disable */ 11565 if (plane->id != PLANE_CURSOR) 11566 crtc_state->disable_cxsr = true; 11567 } else if (turn_off) { 11568 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 11569 crtc_state->update_wm_post = true; 11570 11571 /* must disable cxsr around plane enable/disable */ 11572 if (plane->id != PLANE_CURSOR) 11573 crtc_state->disable_cxsr = true; 11574 } else if (intel_wm_need_update(old_plane_state, plane_state)) { 11575 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) { 11576 /* FIXME bollocks */ 11577 crtc_state->update_wm_pre = true; 11578 crtc_state->update_wm_post = true; 11579 } 11580 } 11581 11582 if (visible || was_visible) 11583 crtc_state->fb_bits |= plane->frontbuffer_bit; 11584 11585 /* 11586 * ILK/SNB DVSACNTR/Sprite Enable 11587 * IVB SPR_CTL/Sprite Enable 11588 * "When in Self Refresh Big FIFO mode, a write to enable the 11589 * plane will be internally buffered and delayed while Big FIFO 11590 * mode is exiting." 11591 * 11592 * Which means that enabling the sprite can take an extra frame 11593 * when we start in big FIFO mode (LP1+). Thus we need to drop 11594 * down to LP0 and wait for vblank in order to make sure the 11595 * sprite gets enabled on the next vblank after the register write. 11596 * Doing otherwise would risk enabling the sprite one frame after 11597 * we've already signalled flip completion. We can resume LP1+ 11598 * once the sprite has been enabled. 11599 * 11600 * 11601 * WaCxSRDisabledForSpriteScaling:ivb 11602 * IVB SPR_SCALE/Scaling Enable 11603 * "Low Power watermarks must be disabled for at least one 11604 * frame before enabling sprite scaling, and kept disabled 11605 * until sprite scaling is disabled." 11606 * 11607 * ILK/SNB DVSASCALE/Scaling Enable 11608 * "When in Self Refresh Big FIFO mode, scaling enable will be 11609 * masked off while Big FIFO mode is exiting." 11610 * 11611 * Despite the w/a only being listed for IVB we assume that 11612 * the ILK/SNB note has similar ramifications, hence we apply 11613 * the w/a on all three platforms. 11614 * 11615 * With experimental results seems this is needed also for primary 11616 * plane, not only sprite plane. 11617 */ 11618 if (plane->id != PLANE_CURSOR && 11619 (IS_GEN_RANGE(dev_priv, 5, 6) || 11620 IS_IVYBRIDGE(dev_priv)) && 11621 (turn_on || (!needs_scaling(old_plane_state) && 11622 needs_scaling(plane_state)))) 11623 crtc_state->disable_lp_wm = true; 11624 11625 return 0; 11626 } 11627 11628 static bool encoders_cloneable(const struct intel_encoder *a, 11629 const struct intel_encoder *b) 11630 { 11631 /* masks could be asymmetric, so check both ways */ 11632 return a == b || (a->cloneable & (1 << b->type) && 11633 b->cloneable & (1 << a->type)); 11634 } 11635 11636 static bool check_single_encoder_cloning(struct drm_atomic_state *state, 11637 struct intel_crtc *crtc, 11638 struct intel_encoder *encoder) 11639 { 11640 struct intel_encoder *source_encoder; 11641 struct drm_connector *connector; 11642 struct drm_connector_state *connector_state; 11643 int i; 11644 11645 for_each_new_connector_in_state(state, connector, connector_state, i) { 11646 if (connector_state->crtc != &crtc->base) 11647 continue; 11648 11649 source_encoder = 11650 to_intel_encoder(connector_state->best_encoder); 11651 if (!encoders_cloneable(encoder, source_encoder)) 11652 return false; 11653 } 11654 11655 return true; 11656 } 11657 11658 static int icl_add_linked_planes(struct intel_atomic_state *state) 11659 { 11660 struct intel_plane *plane, *linked; 11661 struct intel_plane_state *plane_state, *linked_plane_state; 11662 int i; 11663 11664 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 11665 linked = plane_state->linked_plane; 11666 11667 if (!linked) 11668 continue; 11669 11670 linked_plane_state = intel_atomic_get_plane_state(state, linked); 11671 if (IS_ERR(linked_plane_state)) 11672 return PTR_ERR(linked_plane_state); 11673 11674 WARN_ON(linked_plane_state->linked_plane != plane); 11675 WARN_ON(linked_plane_state->slave == plane_state->slave); 11676 } 11677 11678 return 0; 11679 } 11680 11681 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) 11682 { 11683 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 11684 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11685 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state); 11686 struct intel_plane *plane, *linked; 11687 struct intel_plane_state *plane_state; 11688 int i; 11689 11690 if (INTEL_GEN(dev_priv) < 11) 11691 return 0; 11692 11693 /* 11694 * Destroy all old plane links and make the slave plane invisible 11695 * in the crtc_state->active_planes mask. 11696 */ 11697 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 11698 if (plane->pipe != crtc->pipe || !plane_state->linked_plane) 11699 continue; 11700 11701 plane_state->linked_plane = NULL; 11702 if (plane_state->slave && !plane_state->base.visible) { 11703 crtc_state->active_planes &= ~BIT(plane->id); 11704 crtc_state->update_planes |= BIT(plane->id); 11705 } 11706 11707 plane_state->slave = false; 11708 } 11709 11710 if (!crtc_state->nv12_planes) 11711 return 0; 11712 11713 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 11714 struct intel_plane_state *linked_state = NULL; 11715 11716 if (plane->pipe != crtc->pipe || 11717 !(crtc_state->nv12_planes & BIT(plane->id))) 11718 continue; 11719 11720 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { 11721 if (!icl_is_nv12_y_plane(linked->id)) 11722 continue; 11723 11724 if (crtc_state->active_planes & BIT(linked->id)) 11725 continue; 11726 11727 linked_state = intel_atomic_get_plane_state(state, linked); 11728 if (IS_ERR(linked_state)) 11729 return PTR_ERR(linked_state); 11730 11731 break; 11732 } 11733 11734 if (!linked_state) { 11735 DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n", 11736 hweight8(crtc_state->nv12_planes)); 11737 11738 return -EINVAL; 11739 } 11740 11741 plane_state->linked_plane = linked; 11742 11743 linked_state->slave = true; 11744 linked_state->linked_plane = plane; 11745 crtc_state->active_planes |= BIT(linked->id); 11746 crtc_state->update_planes |= BIT(linked->id); 11747 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name); 11748 } 11749 11750 return 0; 11751 } 11752 11753 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) 11754 { 11755 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 11756 struct intel_atomic_state *state = 11757 to_intel_atomic_state(new_crtc_state->base.state); 11758 const struct intel_crtc_state *old_crtc_state = 11759 intel_atomic_get_old_crtc_state(state, crtc); 11760 11761 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; 11762 } 11763 11764 static int intel_crtc_atomic_check(struct drm_crtc *crtc, 11765 struct drm_crtc_state *crtc_state) 11766 { 11767 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 11768 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11769 struct intel_crtc_state *pipe_config = 11770 to_intel_crtc_state(crtc_state); 11771 int ret; 11772 bool mode_changed = needs_modeset(pipe_config); 11773 11774 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) && 11775 mode_changed && !crtc_state->active) 11776 pipe_config->update_wm_post = true; 11777 11778 if (mode_changed && crtc_state->enable && 11779 dev_priv->display.crtc_compute_clock && 11780 !WARN_ON(pipe_config->shared_dpll)) { 11781 ret = dev_priv->display.crtc_compute_clock(intel_crtc, 11782 pipe_config); 11783 if (ret) 11784 return ret; 11785 } 11786 11787 /* 11788 * May need to update pipe gamma enable bits 11789 * when C8 planes are getting enabled/disabled. 11790 */ 11791 if (c8_planes_changed(pipe_config)) 11792 crtc_state->color_mgmt_changed = true; 11793 11794 if (mode_changed || pipe_config->update_pipe || 11795 crtc_state->color_mgmt_changed) { 11796 ret = intel_color_check(pipe_config); 11797 if (ret) 11798 return ret; 11799 } 11800 11801 ret = 0; 11802 if (dev_priv->display.compute_pipe_wm) { 11803 ret = dev_priv->display.compute_pipe_wm(pipe_config); 11804 if (ret) { 11805 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n"); 11806 return ret; 11807 } 11808 } 11809 11810 if (dev_priv->display.compute_intermediate_wm) { 11811 if (WARN_ON(!dev_priv->display.compute_pipe_wm)) 11812 return 0; 11813 11814 /* 11815 * Calculate 'intermediate' watermarks that satisfy both the 11816 * old state and the new state. We can program these 11817 * immediately. 11818 */ 11819 ret = dev_priv->display.compute_intermediate_wm(pipe_config); 11820 if (ret) { 11821 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); 11822 return ret; 11823 } 11824 } 11825 11826 if (INTEL_GEN(dev_priv) >= 9) { 11827 if (mode_changed || pipe_config->update_pipe) 11828 ret = skl_update_scaler_crtc(pipe_config); 11829 11830 if (!ret) 11831 ret = icl_check_nv12_planes(pipe_config); 11832 if (!ret) 11833 ret = skl_check_pipe_max_pixel_rate(intel_crtc, 11834 pipe_config); 11835 if (!ret) 11836 ret = intel_atomic_setup_scalers(dev_priv, intel_crtc, 11837 pipe_config); 11838 } 11839 11840 if (HAS_IPS(dev_priv)) 11841 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config); 11842 11843 return ret; 11844 } 11845 11846 static const struct drm_crtc_helper_funcs intel_helper_funcs = { 11847 .atomic_check = intel_crtc_atomic_check, 11848 }; 11849 11850 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 11851 { 11852 struct intel_connector *connector; 11853 struct drm_connector_list_iter conn_iter; 11854 11855 drm_connector_list_iter_begin(dev, &conn_iter); 11856 for_each_intel_connector_iter(connector, &conn_iter) { 11857 if (connector->base.state->crtc) 11858 drm_connector_put(&connector->base); 11859 11860 if (connector->base.encoder) { 11861 connector->base.state->best_encoder = 11862 connector->base.encoder; 11863 connector->base.state->crtc = 11864 connector->base.encoder->crtc; 11865 11866 drm_connector_get(&connector->base); 11867 } else { 11868 connector->base.state->best_encoder = NULL; 11869 connector->base.state->crtc = NULL; 11870 } 11871 } 11872 drm_connector_list_iter_end(&conn_iter); 11873 } 11874 11875 static int 11876 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 11877 struct intel_crtc_state *pipe_config) 11878 { 11879 struct drm_connector *connector = conn_state->connector; 11880 const struct drm_display_info *info = &connector->display_info; 11881 int bpp; 11882 11883 switch (conn_state->max_bpc) { 11884 case 6 ... 7: 11885 bpp = 6 * 3; 11886 break; 11887 case 8 ... 9: 11888 bpp = 8 * 3; 11889 break; 11890 case 10 ... 11: 11891 bpp = 10 * 3; 11892 break; 11893 case 12: 11894 bpp = 12 * 3; 11895 break; 11896 default: 11897 return -EINVAL; 11898 } 11899 11900 if (bpp < pipe_config->pipe_bpp) { 11901 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of " 11902 "EDID bpp %d, requested bpp %d, max platform bpp %d\n", 11903 connector->base.id, connector->name, 11904 bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc, 11905 pipe_config->pipe_bpp); 11906 11907 pipe_config->pipe_bpp = bpp; 11908 } 11909 11910 return 0; 11911 } 11912 11913 static int 11914 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 11915 struct intel_crtc_state *pipe_config) 11916 { 11917 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11918 struct drm_atomic_state *state = pipe_config->base.state; 11919 struct drm_connector *connector; 11920 struct drm_connector_state *connector_state; 11921 int bpp, i; 11922 11923 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 11924 IS_CHERRYVIEW(dev_priv))) 11925 bpp = 10*3; 11926 else if (INTEL_GEN(dev_priv) >= 5) 11927 bpp = 12*3; 11928 else 11929 bpp = 8*3; 11930 11931 pipe_config->pipe_bpp = bpp; 11932 11933 /* Clamp display bpp to connector max bpp */ 11934 for_each_new_connector_in_state(state, connector, connector_state, i) { 11935 int ret; 11936 11937 if (connector_state->crtc != &crtc->base) 11938 continue; 11939 11940 ret = compute_sink_pipe_bpp(connector_state, pipe_config); 11941 if (ret) 11942 return ret; 11943 } 11944 11945 return 0; 11946 } 11947 11948 static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 11949 { 11950 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 11951 "type: 0x%x flags: 0x%x\n", 11952 mode->crtc_clock, 11953 mode->crtc_hdisplay, mode->crtc_hsync_start, 11954 mode->crtc_hsync_end, mode->crtc_htotal, 11955 mode->crtc_vdisplay, mode->crtc_vsync_start, 11956 mode->crtc_vsync_end, mode->crtc_vtotal, 11957 mode->type, mode->flags); 11958 } 11959 11960 static inline void 11961 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config, 11962 const char *id, unsigned int lane_count, 11963 const struct intel_link_m_n *m_n) 11964 { 11965 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 11966 id, lane_count, 11967 m_n->gmch_m, m_n->gmch_n, 11968 m_n->link_m, m_n->link_n, m_n->tu); 11969 } 11970 11971 static void 11972 intel_dump_infoframe(struct drm_i915_private *dev_priv, 11973 const union hdmi_infoframe *frame) 11974 { 11975 if ((drm_debug & DRM_UT_KMS) == 0) 11976 return; 11977 11978 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame); 11979 } 11980 11981 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x 11982 11983 static const char * const output_type_str[] = { 11984 OUTPUT_TYPE(UNUSED), 11985 OUTPUT_TYPE(ANALOG), 11986 OUTPUT_TYPE(DVO), 11987 OUTPUT_TYPE(SDVO), 11988 OUTPUT_TYPE(LVDS), 11989 OUTPUT_TYPE(TVOUT), 11990 OUTPUT_TYPE(HDMI), 11991 OUTPUT_TYPE(DP), 11992 OUTPUT_TYPE(EDP), 11993 OUTPUT_TYPE(DSI), 11994 OUTPUT_TYPE(DDI), 11995 OUTPUT_TYPE(DP_MST), 11996 }; 11997 11998 #undef OUTPUT_TYPE 11999 12000 static void snprintf_output_types(char *buf, size_t len, 12001 unsigned int output_types) 12002 { 12003 char *str = buf; 12004 int i; 12005 12006 str[0] = '\0'; 12007 12008 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) { 12009 int r; 12010 12011 if ((output_types & BIT(i)) == 0) 12012 continue; 12013 12014 r = snprintf(str, len, "%s%s", 12015 str != buf ? "," : "", output_type_str[i]); 12016 if (r >= len) 12017 break; 12018 str += r; 12019 len -= r; 12020 12021 output_types &= ~BIT(i); 12022 } 12023 12024 WARN_ON_ONCE(output_types != 0); 12025 } 12026 12027 static const char * const output_format_str[] = { 12028 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid", 12029 [INTEL_OUTPUT_FORMAT_RGB] = "RGB", 12030 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0", 12031 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4", 12032 }; 12033 12034 static const char *output_formats(enum intel_output_format format) 12035 { 12036 if (format >= ARRAY_SIZE(output_format_str)) 12037 format = INTEL_OUTPUT_FORMAT_INVALID; 12038 return output_format_str[format]; 12039 } 12040 12041 static void intel_dump_plane_state(const struct intel_plane_state *plane_state) 12042 { 12043 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 12044 const struct drm_framebuffer *fb = plane_state->base.fb; 12045 struct drm_format_name_buf format_name; 12046 12047 if (!fb) { 12048 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n", 12049 plane->base.base.id, plane->base.name, 12050 yesno(plane_state->base.visible)); 12051 return; 12052 } 12053 12054 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n", 12055 plane->base.base.id, plane->base.name, 12056 fb->base.id, fb->width, fb->height, 12057 drm_get_format_name(fb->format->format, &format_name), 12058 yesno(plane_state->base.visible)); 12059 DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n", 12060 plane_state->base.rotation, plane_state->scaler_id); 12061 if (plane_state->base.visible) 12062 DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n", 12063 DRM_RECT_FP_ARG(&plane_state->base.src), 12064 DRM_RECT_ARG(&plane_state->base.dst)); 12065 } 12066 12067 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, 12068 struct intel_atomic_state *state, 12069 const char *context) 12070 { 12071 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); 12072 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12073 const struct intel_plane_state *plane_state; 12074 struct intel_plane *plane; 12075 char buf[64]; 12076 int i; 12077 12078 DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n", 12079 crtc->base.base.id, crtc->base.name, 12080 yesno(pipe_config->base.enable), context); 12081 12082 if (!pipe_config->base.enable) 12083 goto dump_planes; 12084 12085 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types); 12086 DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n", 12087 yesno(pipe_config->base.active), 12088 buf, pipe_config->output_types, 12089 output_formats(pipe_config->output_format)); 12090 12091 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", 12092 transcoder_name(pipe_config->cpu_transcoder), 12093 pipe_config->pipe_bpp, pipe_config->dither); 12094 12095 if (pipe_config->has_pch_encoder) 12096 intel_dump_m_n_config(pipe_config, "fdi", 12097 pipe_config->fdi_lanes, 12098 &pipe_config->fdi_m_n); 12099 12100 if (intel_crtc_has_dp_encoder(pipe_config)) { 12101 intel_dump_m_n_config(pipe_config, "dp m_n", 12102 pipe_config->lane_count, &pipe_config->dp_m_n); 12103 if (pipe_config->has_drrs) 12104 intel_dump_m_n_config(pipe_config, "dp m2_n2", 12105 pipe_config->lane_count, 12106 &pipe_config->dp_m2_n2); 12107 } 12108 12109 DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n", 12110 pipe_config->has_audio, pipe_config->has_infoframe, 12111 pipe_config->infoframes.enable); 12112 12113 if (pipe_config->infoframes.enable & 12114 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) 12115 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp); 12116 if (pipe_config->infoframes.enable & 12117 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) 12118 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi); 12119 if (pipe_config->infoframes.enable & 12120 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD)) 12121 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd); 12122 if (pipe_config->infoframes.enable & 12123 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR)) 12124 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi); 12125 12126 DRM_DEBUG_KMS("requested mode:\n"); 12127 drm_mode_debug_printmodeline(&pipe_config->base.mode); 12128 DRM_DEBUG_KMS("adjusted mode:\n"); 12129 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode); 12130 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode); 12131 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n", 12132 pipe_config->port_clock, 12133 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 12134 pipe_config->pixel_rate); 12135 12136 if (INTEL_GEN(dev_priv) >= 9) 12137 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 12138 crtc->num_scalers, 12139 pipe_config->scaler_state.scaler_users, 12140 pipe_config->scaler_state.scaler_id); 12141 12142 if (HAS_GMCH(dev_priv)) 12143 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 12144 pipe_config->gmch_pfit.control, 12145 pipe_config->gmch_pfit.pgm_ratios, 12146 pipe_config->gmch_pfit.lvds_border_bits); 12147 else 12148 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n", 12149 pipe_config->pch_pfit.pos, 12150 pipe_config->pch_pfit.size, 12151 enableddisabled(pipe_config->pch_pfit.enabled), 12152 yesno(pipe_config->pch_pfit.force_thru)); 12153 12154 DRM_DEBUG_KMS("ips: %i, double wide: %i\n", 12155 pipe_config->ips_enabled, pipe_config->double_wide); 12156 12157 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state); 12158 12159 dump_planes: 12160 if (!state) 12161 return; 12162 12163 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12164 if (plane->pipe == crtc->pipe) 12165 intel_dump_plane_state(plane_state); 12166 } 12167 } 12168 12169 static bool check_digital_port_conflicts(struct intel_atomic_state *state) 12170 { 12171 struct drm_device *dev = state->base.dev; 12172 struct drm_connector *connector; 12173 struct drm_connector_list_iter conn_iter; 12174 unsigned int used_ports = 0; 12175 unsigned int used_mst_ports = 0; 12176 bool ret = true; 12177 12178 /* 12179 * Walk the connector list instead of the encoder 12180 * list to detect the problem on ddi platforms 12181 * where there's just one encoder per digital port. 12182 */ 12183 drm_connector_list_iter_begin(dev, &conn_iter); 12184 drm_for_each_connector_iter(connector, &conn_iter) { 12185 struct drm_connector_state *connector_state; 12186 struct intel_encoder *encoder; 12187 12188 connector_state = 12189 drm_atomic_get_new_connector_state(&state->base, 12190 connector); 12191 if (!connector_state) 12192 connector_state = connector->state; 12193 12194 if (!connector_state->best_encoder) 12195 continue; 12196 12197 encoder = to_intel_encoder(connector_state->best_encoder); 12198 12199 WARN_ON(!connector_state->crtc); 12200 12201 switch (encoder->type) { 12202 unsigned int port_mask; 12203 case INTEL_OUTPUT_DDI: 12204 if (WARN_ON(!HAS_DDI(to_i915(dev)))) 12205 break; 12206 /* else, fall through */ 12207 case INTEL_OUTPUT_DP: 12208 case INTEL_OUTPUT_HDMI: 12209 case INTEL_OUTPUT_EDP: 12210 port_mask = 1 << encoder->port; 12211 12212 /* the same port mustn't appear more than once */ 12213 if (used_ports & port_mask) 12214 ret = false; 12215 12216 used_ports |= port_mask; 12217 break; 12218 case INTEL_OUTPUT_DP_MST: 12219 used_mst_ports |= 12220 1 << encoder->port; 12221 break; 12222 default: 12223 break; 12224 } 12225 } 12226 drm_connector_list_iter_end(&conn_iter); 12227 12228 /* can't mix MST and SST/HDMI on the same port */ 12229 if (used_ports & used_mst_ports) 12230 return false; 12231 12232 return ret; 12233 } 12234 12235 static int 12236 clear_intel_crtc_state(struct intel_crtc_state *crtc_state) 12237 { 12238 struct drm_i915_private *dev_priv = 12239 to_i915(crtc_state->base.crtc->dev); 12240 struct intel_crtc_state *saved_state; 12241 12242 saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL); 12243 if (!saved_state) 12244 return -ENOMEM; 12245 12246 /* FIXME: before the switch to atomic started, a new pipe_config was 12247 * kzalloc'd. Code that depends on any field being zero should be 12248 * fixed, so that the crtc_state can be safely duplicated. For now, 12249 * only fields that are know to not cause problems are preserved. */ 12250 12251 saved_state->scaler_state = crtc_state->scaler_state; 12252 saved_state->shared_dpll = crtc_state->shared_dpll; 12253 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 12254 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 12255 sizeof(saved_state->icl_port_dplls)); 12256 saved_state->crc_enabled = crtc_state->crc_enabled; 12257 if (IS_G4X(dev_priv) || 12258 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 12259 saved_state->wm = crtc_state->wm; 12260 12261 /* Keep base drm_crtc_state intact, only clear our extended struct */ 12262 BUILD_BUG_ON(offsetof(struct intel_crtc_state, base)); 12263 memcpy(&crtc_state->base + 1, &saved_state->base + 1, 12264 sizeof(*crtc_state) - sizeof(crtc_state->base)); 12265 12266 kfree(saved_state); 12267 return 0; 12268 } 12269 12270 static int 12271 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config) 12272 { 12273 struct drm_crtc *crtc = pipe_config->base.crtc; 12274 struct drm_atomic_state *state = pipe_config->base.state; 12275 struct intel_encoder *encoder; 12276 struct drm_connector *connector; 12277 struct drm_connector_state *connector_state; 12278 int base_bpp, ret; 12279 int i; 12280 bool retry = true; 12281 12282 ret = clear_intel_crtc_state(pipe_config); 12283 if (ret) 12284 return ret; 12285 12286 pipe_config->cpu_transcoder = 12287 (enum transcoder) to_intel_crtc(crtc)->pipe; 12288 12289 /* 12290 * Sanitize sync polarity flags based on requested ones. If neither 12291 * positive or negative polarity is requested, treat this as meaning 12292 * negative polarity. 12293 */ 12294 if (!(pipe_config->base.adjusted_mode.flags & 12295 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 12296 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 12297 12298 if (!(pipe_config->base.adjusted_mode.flags & 12299 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 12300 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 12301 12302 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 12303 pipe_config); 12304 if (ret) 12305 return ret; 12306 12307 base_bpp = pipe_config->pipe_bpp; 12308 12309 /* 12310 * Determine the real pipe dimensions. Note that stereo modes can 12311 * increase the actual pipe size due to the frame doubling and 12312 * insertion of additional space for blanks between the frame. This 12313 * is stored in the crtc timings. We use the requested mode to do this 12314 * computation to clearly distinguish it from the adjusted mode, which 12315 * can be changed by the connectors in the below retry loop. 12316 */ 12317 drm_mode_get_hv_timing(&pipe_config->base.mode, 12318 &pipe_config->pipe_src_w, 12319 &pipe_config->pipe_src_h); 12320 12321 for_each_new_connector_in_state(state, connector, connector_state, i) { 12322 if (connector_state->crtc != crtc) 12323 continue; 12324 12325 encoder = to_intel_encoder(connector_state->best_encoder); 12326 12327 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { 12328 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 12329 return -EINVAL; 12330 } 12331 12332 /* 12333 * Determine output_types before calling the .compute_config() 12334 * hooks so that the hooks can use this information safely. 12335 */ 12336 if (encoder->compute_output_type) 12337 pipe_config->output_types |= 12338 BIT(encoder->compute_output_type(encoder, pipe_config, 12339 connector_state)); 12340 else 12341 pipe_config->output_types |= BIT(encoder->type); 12342 } 12343 12344 encoder_retry: 12345 /* Ensure the port clock defaults are reset when retrying. */ 12346 pipe_config->port_clock = 0; 12347 pipe_config->pixel_multiplier = 1; 12348 12349 /* Fill in default crtc timings, allow encoders to overwrite them. */ 12350 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode, 12351 CRTC_STEREO_DOUBLE); 12352 12353 /* Pass our mode to the connectors and the CRTC to give them a chance to 12354 * adjust it according to limitations or connector properties, and also 12355 * a chance to reject the mode entirely. 12356 */ 12357 for_each_new_connector_in_state(state, connector, connector_state, i) { 12358 if (connector_state->crtc != crtc) 12359 continue; 12360 12361 encoder = to_intel_encoder(connector_state->best_encoder); 12362 ret = encoder->compute_config(encoder, pipe_config, 12363 connector_state); 12364 if (ret < 0) { 12365 if (ret != -EDEADLK) 12366 DRM_DEBUG_KMS("Encoder config failure: %d\n", 12367 ret); 12368 return ret; 12369 } 12370 } 12371 12372 /* Set default port clock if not overwritten by the encoder. Needs to be 12373 * done afterwards in case the encoder adjusts the mode. */ 12374 if (!pipe_config->port_clock) 12375 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock 12376 * pipe_config->pixel_multiplier; 12377 12378 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 12379 if (ret == -EDEADLK) 12380 return ret; 12381 if (ret < 0) { 12382 DRM_DEBUG_KMS("CRTC fixup failed\n"); 12383 return ret; 12384 } 12385 12386 if (ret == RETRY) { 12387 if (WARN(!retry, "loop in pipe configuration computation\n")) 12388 return -EINVAL; 12389 12390 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 12391 retry = false; 12392 goto encoder_retry; 12393 } 12394 12395 /* Dithering seems to not pass-through bits correctly when it should, so 12396 * only enable it on 6bpc panels and when its not a compliance 12397 * test requesting 6bpc video pattern. 12398 */ 12399 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) && 12400 !pipe_config->dither_force_disable; 12401 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 12402 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 12403 12404 return 0; 12405 } 12406 12407 bool intel_fuzzy_clock_check(int clock1, int clock2) 12408 { 12409 int diff; 12410 12411 if (clock1 == clock2) 12412 return true; 12413 12414 if (!clock1 || !clock2) 12415 return false; 12416 12417 diff = abs(clock1 - clock2); 12418 12419 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 12420 return true; 12421 12422 return false; 12423 } 12424 12425 static bool 12426 intel_compare_m_n(unsigned int m, unsigned int n, 12427 unsigned int m2, unsigned int n2, 12428 bool exact) 12429 { 12430 if (m == m2 && n == n2) 12431 return true; 12432 12433 if (exact || !m || !n || !m2 || !n2) 12434 return false; 12435 12436 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 12437 12438 if (n > n2) { 12439 while (n > n2) { 12440 m2 <<= 1; 12441 n2 <<= 1; 12442 } 12443 } else if (n < n2) { 12444 while (n < n2) { 12445 m <<= 1; 12446 n <<= 1; 12447 } 12448 } 12449 12450 if (n != n2) 12451 return false; 12452 12453 return intel_fuzzy_clock_check(m, m2); 12454 } 12455 12456 static bool 12457 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 12458 const struct intel_link_m_n *m2_n2, 12459 bool exact) 12460 { 12461 return m_n->tu == m2_n2->tu && 12462 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, 12463 m2_n2->gmch_m, m2_n2->gmch_n, exact) && 12464 intel_compare_m_n(m_n->link_m, m_n->link_n, 12465 m2_n2->link_m, m2_n2->link_n, exact); 12466 } 12467 12468 static bool 12469 intel_compare_infoframe(const union hdmi_infoframe *a, 12470 const union hdmi_infoframe *b) 12471 { 12472 return memcmp(a, b, sizeof(*a)) == 0; 12473 } 12474 12475 static void 12476 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, 12477 bool fastset, const char *name, 12478 const union hdmi_infoframe *a, 12479 const union hdmi_infoframe *b) 12480 { 12481 if (fastset) { 12482 if ((drm_debug & DRM_UT_KMS) == 0) 12483 return; 12484 12485 drm_dbg(DRM_UT_KMS, "fastset mismatch in %s infoframe", name); 12486 drm_dbg(DRM_UT_KMS, "expected:"); 12487 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); 12488 drm_dbg(DRM_UT_KMS, "found"); 12489 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); 12490 } else { 12491 drm_err("mismatch in %s infoframe", name); 12492 drm_err("expected:"); 12493 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); 12494 drm_err("found"); 12495 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); 12496 } 12497 } 12498 12499 static void __printf(3, 4) 12500 pipe_config_mismatch(bool fastset, const char *name, const char *format, ...) 12501 { 12502 struct va_format vaf; 12503 va_list args; 12504 12505 va_start(args, format); 12506 vaf.fmt = format; 12507 vaf.va = &args; 12508 12509 if (fastset) 12510 drm_dbg(DRM_UT_KMS, "fastset mismatch in %s %pV", name, &vaf); 12511 else 12512 drm_err("mismatch in %s %pV", name, &vaf); 12513 12514 va_end(args); 12515 } 12516 12517 static bool fastboot_enabled(struct drm_i915_private *dev_priv) 12518 { 12519 if (i915_modparams.fastboot != -1) 12520 return i915_modparams.fastboot; 12521 12522 /* Enable fastboot by default on Skylake and newer */ 12523 if (INTEL_GEN(dev_priv) >= 9) 12524 return true; 12525 12526 /* Enable fastboot by default on VLV and CHV */ 12527 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 12528 return true; 12529 12530 /* Disabled by default on all others */ 12531 return false; 12532 } 12533 12534 static bool 12535 intel_pipe_config_compare(const struct intel_crtc_state *current_config, 12536 const struct intel_crtc_state *pipe_config, 12537 bool fastset) 12538 { 12539 struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev); 12540 bool ret = true; 12541 bool fixup_inherited = fastset && 12542 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) && 12543 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED); 12544 12545 if (fixup_inherited && !fastboot_enabled(dev_priv)) { 12546 DRM_DEBUG_KMS("initial modeset and fastboot not set\n"); 12547 ret = false; 12548 } 12549 12550 #define PIPE_CONF_CHECK_X(name) do { \ 12551 if (current_config->name != pipe_config->name) { \ 12552 pipe_config_mismatch(fastset, __stringify(name), \ 12553 "(expected 0x%08x, found 0x%08x)\n", \ 12554 current_config->name, \ 12555 pipe_config->name); \ 12556 ret = false; \ 12557 } \ 12558 } while (0) 12559 12560 #define PIPE_CONF_CHECK_I(name) do { \ 12561 if (current_config->name != pipe_config->name) { \ 12562 pipe_config_mismatch(fastset, __stringify(name), \ 12563 "(expected %i, found %i)\n", \ 12564 current_config->name, \ 12565 pipe_config->name); \ 12566 ret = false; \ 12567 } \ 12568 } while (0) 12569 12570 #define PIPE_CONF_CHECK_BOOL(name) do { \ 12571 if (current_config->name != pipe_config->name) { \ 12572 pipe_config_mismatch(fastset, __stringify(name), \ 12573 "(expected %s, found %s)\n", \ 12574 yesno(current_config->name), \ 12575 yesno(pipe_config->name)); \ 12576 ret = false; \ 12577 } \ 12578 } while (0) 12579 12580 /* 12581 * Checks state where we only read out the enabling, but not the entire 12582 * state itself (like full infoframes or ELD for audio). These states 12583 * require a full modeset on bootup to fix up. 12584 */ 12585 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \ 12586 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \ 12587 PIPE_CONF_CHECK_BOOL(name); \ 12588 } else { \ 12589 pipe_config_mismatch(fastset, __stringify(name), \ 12590 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \ 12591 yesno(current_config->name), \ 12592 yesno(pipe_config->name)); \ 12593 ret = false; \ 12594 } \ 12595 } while (0) 12596 12597 #define PIPE_CONF_CHECK_P(name) do { \ 12598 if (current_config->name != pipe_config->name) { \ 12599 pipe_config_mismatch(fastset, __stringify(name), \ 12600 "(expected %p, found %p)\n", \ 12601 current_config->name, \ 12602 pipe_config->name); \ 12603 ret = false; \ 12604 } \ 12605 } while (0) 12606 12607 #define PIPE_CONF_CHECK_M_N(name) do { \ 12608 if (!intel_compare_link_m_n(¤t_config->name, \ 12609 &pipe_config->name,\ 12610 !fastset)) { \ 12611 pipe_config_mismatch(fastset, __stringify(name), \ 12612 "(expected tu %i gmch %i/%i link %i/%i, " \ 12613 "found tu %i, gmch %i/%i link %i/%i)\n", \ 12614 current_config->name.tu, \ 12615 current_config->name.gmch_m, \ 12616 current_config->name.gmch_n, \ 12617 current_config->name.link_m, \ 12618 current_config->name.link_n, \ 12619 pipe_config->name.tu, \ 12620 pipe_config->name.gmch_m, \ 12621 pipe_config->name.gmch_n, \ 12622 pipe_config->name.link_m, \ 12623 pipe_config->name.link_n); \ 12624 ret = false; \ 12625 } \ 12626 } while (0) 12627 12628 /* This is required for BDW+ where there is only one set of registers for 12629 * switching between high and low RR. 12630 * This macro can be used whenever a comparison has to be made between one 12631 * hw state and multiple sw state variables. 12632 */ 12633 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \ 12634 if (!intel_compare_link_m_n(¤t_config->name, \ 12635 &pipe_config->name, !fastset) && \ 12636 !intel_compare_link_m_n(¤t_config->alt_name, \ 12637 &pipe_config->name, !fastset)) { \ 12638 pipe_config_mismatch(fastset, __stringify(name), \ 12639 "(expected tu %i gmch %i/%i link %i/%i, " \ 12640 "or tu %i gmch %i/%i link %i/%i, " \ 12641 "found tu %i, gmch %i/%i link %i/%i)\n", \ 12642 current_config->name.tu, \ 12643 current_config->name.gmch_m, \ 12644 current_config->name.gmch_n, \ 12645 current_config->name.link_m, \ 12646 current_config->name.link_n, \ 12647 current_config->alt_name.tu, \ 12648 current_config->alt_name.gmch_m, \ 12649 current_config->alt_name.gmch_n, \ 12650 current_config->alt_name.link_m, \ 12651 current_config->alt_name.link_n, \ 12652 pipe_config->name.tu, \ 12653 pipe_config->name.gmch_m, \ 12654 pipe_config->name.gmch_n, \ 12655 pipe_config->name.link_m, \ 12656 pipe_config->name.link_n); \ 12657 ret = false; \ 12658 } \ 12659 } while (0) 12660 12661 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 12662 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 12663 pipe_config_mismatch(fastset, __stringify(name), \ 12664 "(%x) (expected %i, found %i)\n", \ 12665 (mask), \ 12666 current_config->name & (mask), \ 12667 pipe_config->name & (mask)); \ 12668 ret = false; \ 12669 } \ 12670 } while (0) 12671 12672 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \ 12673 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 12674 pipe_config_mismatch(fastset, __stringify(name), \ 12675 "(expected %i, found %i)\n", \ 12676 current_config->name, \ 12677 pipe_config->name); \ 12678 ret = false; \ 12679 } \ 12680 } while (0) 12681 12682 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 12683 if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 12684 &pipe_config->infoframes.name)) { \ 12685 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \ 12686 ¤t_config->infoframes.name, \ 12687 &pipe_config->infoframes.name); \ 12688 ret = false; \ 12689 } \ 12690 } while (0) 12691 12692 #define PIPE_CONF_QUIRK(quirk) \ 12693 ((current_config->quirks | pipe_config->quirks) & (quirk)) 12694 12695 PIPE_CONF_CHECK_I(cpu_transcoder); 12696 12697 PIPE_CONF_CHECK_BOOL(has_pch_encoder); 12698 PIPE_CONF_CHECK_I(fdi_lanes); 12699 PIPE_CONF_CHECK_M_N(fdi_m_n); 12700 12701 PIPE_CONF_CHECK_I(lane_count); 12702 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 12703 12704 if (INTEL_GEN(dev_priv) < 8) { 12705 PIPE_CONF_CHECK_M_N(dp_m_n); 12706 12707 if (current_config->has_drrs) 12708 PIPE_CONF_CHECK_M_N(dp_m2_n2); 12709 } else 12710 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 12711 12712 PIPE_CONF_CHECK_X(output_types); 12713 12714 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); 12715 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); 12716 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start); 12717 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end); 12718 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start); 12719 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end); 12720 12721 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay); 12722 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal); 12723 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start); 12724 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end); 12725 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start); 12726 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); 12727 12728 PIPE_CONF_CHECK_I(pixel_multiplier); 12729 PIPE_CONF_CHECK_I(output_format); 12730 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 12731 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 12732 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 12733 PIPE_CONF_CHECK_BOOL(limited_color_range); 12734 12735 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 12736 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 12737 PIPE_CONF_CHECK_BOOL(has_infoframe); 12738 12739 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); 12740 12741 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12742 DRM_MODE_FLAG_INTERLACE); 12743 12744 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 12745 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12746 DRM_MODE_FLAG_PHSYNC); 12747 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12748 DRM_MODE_FLAG_NHSYNC); 12749 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12750 DRM_MODE_FLAG_PVSYNC); 12751 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12752 DRM_MODE_FLAG_NVSYNC); 12753 } 12754 12755 PIPE_CONF_CHECK_X(gmch_pfit.control); 12756 /* pfit ratios are autocomputed by the hw on gen4+ */ 12757 if (INTEL_GEN(dev_priv) < 4) 12758 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 12759 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 12760 12761 /* 12762 * Changing the EDP transcoder input mux 12763 * (A_ONOFF vs. A_ON) requires a full modeset. 12764 */ 12765 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 12766 12767 if (!fastset) { 12768 PIPE_CONF_CHECK_I(pipe_src_w); 12769 PIPE_CONF_CHECK_I(pipe_src_h); 12770 12771 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 12772 if (current_config->pch_pfit.enabled) { 12773 PIPE_CONF_CHECK_X(pch_pfit.pos); 12774 PIPE_CONF_CHECK_X(pch_pfit.size); 12775 } 12776 12777 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 12778 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); 12779 12780 PIPE_CONF_CHECK_X(gamma_mode); 12781 if (IS_CHERRYVIEW(dev_priv)) 12782 PIPE_CONF_CHECK_X(cgm_mode); 12783 else 12784 PIPE_CONF_CHECK_X(csc_mode); 12785 PIPE_CONF_CHECK_BOOL(gamma_enable); 12786 PIPE_CONF_CHECK_BOOL(csc_enable); 12787 } 12788 12789 PIPE_CONF_CHECK_BOOL(double_wide); 12790 12791 PIPE_CONF_CHECK_P(shared_dpll); 12792 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 12793 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 12794 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 12795 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 12796 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 12797 PIPE_CONF_CHECK_X(dpll_hw_state.spll); 12798 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 12799 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 12800 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 12801 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0); 12802 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0); 12803 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4); 12804 PIPE_CONF_CHECK_X(dpll_hw_state.pll0); 12805 PIPE_CONF_CHECK_X(dpll_hw_state.pll1); 12806 PIPE_CONF_CHECK_X(dpll_hw_state.pll2); 12807 PIPE_CONF_CHECK_X(dpll_hw_state.pll3); 12808 PIPE_CONF_CHECK_X(dpll_hw_state.pll6); 12809 PIPE_CONF_CHECK_X(dpll_hw_state.pll8); 12810 PIPE_CONF_CHECK_X(dpll_hw_state.pll9); 12811 PIPE_CONF_CHECK_X(dpll_hw_state.pll10); 12812 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12); 12813 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl); 12814 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1); 12815 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl); 12816 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0); 12817 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1); 12818 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf); 12819 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock); 12820 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc); 12821 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias); 12822 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); 12823 12824 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 12825 PIPE_CONF_CHECK_X(dsi_pll.div); 12826 12827 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) 12828 PIPE_CONF_CHECK_I(pipe_bpp); 12829 12830 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock); 12831 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 12832 12833 PIPE_CONF_CHECK_I(min_voltage_level); 12834 12835 PIPE_CONF_CHECK_X(infoframes.enable); 12836 PIPE_CONF_CHECK_X(infoframes.gcp); 12837 PIPE_CONF_CHECK_INFOFRAME(avi); 12838 PIPE_CONF_CHECK_INFOFRAME(spd); 12839 PIPE_CONF_CHECK_INFOFRAME(hdmi); 12840 PIPE_CONF_CHECK_INFOFRAME(drm); 12841 12842 #undef PIPE_CONF_CHECK_X 12843 #undef PIPE_CONF_CHECK_I 12844 #undef PIPE_CONF_CHECK_BOOL 12845 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE 12846 #undef PIPE_CONF_CHECK_P 12847 #undef PIPE_CONF_CHECK_FLAGS 12848 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 12849 #undef PIPE_CONF_QUIRK 12850 12851 return ret; 12852 } 12853 12854 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, 12855 const struct intel_crtc_state *pipe_config) 12856 { 12857 if (pipe_config->has_pch_encoder) { 12858 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 12859 &pipe_config->fdi_m_n); 12860 int dotclock = pipe_config->base.adjusted_mode.crtc_clock; 12861 12862 /* 12863 * FDI already provided one idea for the dotclock. 12864 * Yell if the encoder disagrees. 12865 */ 12866 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock), 12867 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 12868 fdi_dotclock, dotclock); 12869 } 12870 } 12871 12872 static void verify_wm_state(struct intel_crtc *crtc, 12873 struct intel_crtc_state *new_crtc_state) 12874 { 12875 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12876 struct skl_hw_state { 12877 struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; 12878 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES]; 12879 struct skl_ddb_allocation ddb; 12880 struct skl_pipe_wm wm; 12881 } *hw; 12882 struct skl_ddb_allocation *sw_ddb; 12883 struct skl_pipe_wm *sw_wm; 12884 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; 12885 const enum pipe pipe = crtc->pipe; 12886 int plane, level, max_level = ilk_wm_max_level(dev_priv); 12887 12888 if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->base.active) 12889 return; 12890 12891 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 12892 if (!hw) 12893 return; 12894 12895 skl_pipe_wm_get_hw_state(crtc, &hw->wm); 12896 sw_wm = &new_crtc_state->wm.skl.optimal; 12897 12898 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv); 12899 12900 skl_ddb_get_hw_state(dev_priv, &hw->ddb); 12901 sw_ddb = &dev_priv->wm.skl_hw.ddb; 12902 12903 if (INTEL_GEN(dev_priv) >= 11 && 12904 hw->ddb.enabled_slices != sw_ddb->enabled_slices) 12905 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n", 12906 sw_ddb->enabled_slices, 12907 hw->ddb.enabled_slices); 12908 12909 /* planes */ 12910 for_each_universal_plane(dev_priv, pipe, plane) { 12911 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 12912 12913 hw_plane_wm = &hw->wm.planes[plane]; 12914 sw_plane_wm = &sw_wm->planes[plane]; 12915 12916 /* Watermarks */ 12917 for (level = 0; level <= max_level; level++) { 12918 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 12919 &sw_plane_wm->wm[level])) 12920 continue; 12921 12922 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 12923 pipe_name(pipe), plane + 1, level, 12924 sw_plane_wm->wm[level].plane_en, 12925 sw_plane_wm->wm[level].plane_res_b, 12926 sw_plane_wm->wm[level].plane_res_l, 12927 hw_plane_wm->wm[level].plane_en, 12928 hw_plane_wm->wm[level].plane_res_b, 12929 hw_plane_wm->wm[level].plane_res_l); 12930 } 12931 12932 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 12933 &sw_plane_wm->trans_wm)) { 12934 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 12935 pipe_name(pipe), plane + 1, 12936 sw_plane_wm->trans_wm.plane_en, 12937 sw_plane_wm->trans_wm.plane_res_b, 12938 sw_plane_wm->trans_wm.plane_res_l, 12939 hw_plane_wm->trans_wm.plane_en, 12940 hw_plane_wm->trans_wm.plane_res_b, 12941 hw_plane_wm->trans_wm.plane_res_l); 12942 } 12943 12944 /* DDB */ 12945 hw_ddb_entry = &hw->ddb_y[plane]; 12946 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane]; 12947 12948 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 12949 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", 12950 pipe_name(pipe), plane + 1, 12951 sw_ddb_entry->start, sw_ddb_entry->end, 12952 hw_ddb_entry->start, hw_ddb_entry->end); 12953 } 12954 } 12955 12956 /* 12957 * cursor 12958 * If the cursor plane isn't active, we may not have updated it's ddb 12959 * allocation. In that case since the ddb allocation will be updated 12960 * once the plane becomes visible, we can skip this check 12961 */ 12962 if (1) { 12963 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 12964 12965 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR]; 12966 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR]; 12967 12968 /* Watermarks */ 12969 for (level = 0; level <= max_level; level++) { 12970 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 12971 &sw_plane_wm->wm[level])) 12972 continue; 12973 12974 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 12975 pipe_name(pipe), level, 12976 sw_plane_wm->wm[level].plane_en, 12977 sw_plane_wm->wm[level].plane_res_b, 12978 sw_plane_wm->wm[level].plane_res_l, 12979 hw_plane_wm->wm[level].plane_en, 12980 hw_plane_wm->wm[level].plane_res_b, 12981 hw_plane_wm->wm[level].plane_res_l); 12982 } 12983 12984 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 12985 &sw_plane_wm->trans_wm)) { 12986 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 12987 pipe_name(pipe), 12988 sw_plane_wm->trans_wm.plane_en, 12989 sw_plane_wm->trans_wm.plane_res_b, 12990 sw_plane_wm->trans_wm.plane_res_l, 12991 hw_plane_wm->trans_wm.plane_en, 12992 hw_plane_wm->trans_wm.plane_res_b, 12993 hw_plane_wm->trans_wm.plane_res_l); 12994 } 12995 12996 /* DDB */ 12997 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR]; 12998 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; 12999 13000 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 13001 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", 13002 pipe_name(pipe), 13003 sw_ddb_entry->start, sw_ddb_entry->end, 13004 hw_ddb_entry->start, hw_ddb_entry->end); 13005 } 13006 } 13007 13008 kfree(hw); 13009 } 13010 13011 static void 13012 verify_connector_state(struct intel_atomic_state *state, 13013 struct intel_crtc *crtc) 13014 { 13015 struct drm_connector *connector; 13016 struct drm_connector_state *new_conn_state; 13017 int i; 13018 13019 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { 13020 struct drm_encoder *encoder = connector->encoder; 13021 struct intel_crtc_state *crtc_state = NULL; 13022 13023 if (new_conn_state->crtc != &crtc->base) 13024 continue; 13025 13026 if (crtc) 13027 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 13028 13029 intel_connector_verify_state(crtc_state, new_conn_state); 13030 13031 I915_STATE_WARN(new_conn_state->best_encoder != encoder, 13032 "connector's atomic encoder doesn't match legacy encoder\n"); 13033 } 13034 } 13035 13036 static void 13037 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state) 13038 { 13039 struct intel_encoder *encoder; 13040 struct drm_connector *connector; 13041 struct drm_connector_state *old_conn_state, *new_conn_state; 13042 int i; 13043 13044 for_each_intel_encoder(&dev_priv->drm, encoder) { 13045 bool enabled = false, found = false; 13046 enum pipe pipe; 13047 13048 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 13049 encoder->base.base.id, 13050 encoder->base.name); 13051 13052 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state, 13053 new_conn_state, i) { 13054 if (old_conn_state->best_encoder == &encoder->base) 13055 found = true; 13056 13057 if (new_conn_state->best_encoder != &encoder->base) 13058 continue; 13059 found = enabled = true; 13060 13061 I915_STATE_WARN(new_conn_state->crtc != 13062 encoder->base.crtc, 13063 "connector's crtc doesn't match encoder crtc\n"); 13064 } 13065 13066 if (!found) 13067 continue; 13068 13069 I915_STATE_WARN(!!encoder->base.crtc != enabled, 13070 "encoder's enabled state mismatch " 13071 "(expected %i, found %i)\n", 13072 !!encoder->base.crtc, enabled); 13073 13074 if (!encoder->base.crtc) { 13075 bool active; 13076 13077 active = encoder->get_hw_state(encoder, &pipe); 13078 I915_STATE_WARN(active, 13079 "encoder detached but still enabled on pipe %c.\n", 13080 pipe_name(pipe)); 13081 } 13082 } 13083 } 13084 13085 static void 13086 verify_crtc_state(struct intel_crtc *crtc, 13087 struct intel_crtc_state *old_crtc_state, 13088 struct intel_crtc_state *new_crtc_state) 13089 { 13090 struct drm_device *dev = crtc->base.dev; 13091 struct drm_i915_private *dev_priv = to_i915(dev); 13092 struct intel_encoder *encoder; 13093 struct intel_crtc_state *pipe_config; 13094 struct drm_atomic_state *state; 13095 bool active; 13096 13097 state = old_crtc_state->base.state; 13098 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->base); 13099 pipe_config = old_crtc_state; 13100 memset(pipe_config, 0, sizeof(*pipe_config)); 13101 pipe_config->base.crtc = &crtc->base; 13102 pipe_config->base.state = state; 13103 13104 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name); 13105 13106 active = dev_priv->display.get_pipe_config(crtc, pipe_config); 13107 13108 /* we keep both pipes enabled on 830 */ 13109 if (IS_I830(dev_priv)) 13110 active = new_crtc_state->base.active; 13111 13112 I915_STATE_WARN(new_crtc_state->base.active != active, 13113 "crtc active state doesn't match with hw state " 13114 "(expected %i, found %i)\n", new_crtc_state->base.active, active); 13115 13116 I915_STATE_WARN(crtc->active != new_crtc_state->base.active, 13117 "transitional active state does not match atomic hw state " 13118 "(expected %i, found %i)\n", new_crtc_state->base.active, crtc->active); 13119 13120 for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 13121 enum pipe pipe; 13122 13123 active = encoder->get_hw_state(encoder, &pipe); 13124 I915_STATE_WARN(active != new_crtc_state->base.active, 13125 "[ENCODER:%i] active %i with crtc active %i\n", 13126 encoder->base.base.id, active, new_crtc_state->base.active); 13127 13128 I915_STATE_WARN(active && crtc->pipe != pipe, 13129 "Encoder connected to wrong pipe %c\n", 13130 pipe_name(pipe)); 13131 13132 if (active) 13133 encoder->get_config(encoder, pipe_config); 13134 } 13135 13136 intel_crtc_compute_pixel_rate(pipe_config); 13137 13138 if (!new_crtc_state->base.active) 13139 return; 13140 13141 intel_pipe_config_sanity_check(dev_priv, pipe_config); 13142 13143 if (!intel_pipe_config_compare(new_crtc_state, 13144 pipe_config, false)) { 13145 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 13146 intel_dump_pipe_config(pipe_config, NULL, "[hw state]"); 13147 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]"); 13148 } 13149 } 13150 13151 static void 13152 intel_verify_planes(struct intel_atomic_state *state) 13153 { 13154 struct intel_plane *plane; 13155 const struct intel_plane_state *plane_state; 13156 int i; 13157 13158 for_each_new_intel_plane_in_state(state, plane, 13159 plane_state, i) 13160 assert_plane(plane, plane_state->slave || 13161 plane_state->base.visible); 13162 } 13163 13164 static void 13165 verify_single_dpll_state(struct drm_i915_private *dev_priv, 13166 struct intel_shared_dpll *pll, 13167 struct intel_crtc *crtc, 13168 struct intel_crtc_state *new_crtc_state) 13169 { 13170 struct intel_dpll_hw_state dpll_hw_state; 13171 unsigned int crtc_mask; 13172 bool active; 13173 13174 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 13175 13176 DRM_DEBUG_KMS("%s\n", pll->info->name); 13177 13178 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state); 13179 13180 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) { 13181 I915_STATE_WARN(!pll->on && pll->active_mask, 13182 "pll in active use but not on in sw tracking\n"); 13183 I915_STATE_WARN(pll->on && !pll->active_mask, 13184 "pll is on but not used by any active crtc\n"); 13185 I915_STATE_WARN(pll->on != active, 13186 "pll on state mismatch (expected %i, found %i)\n", 13187 pll->on, active); 13188 } 13189 13190 if (!crtc) { 13191 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask, 13192 "more active pll users than references: %x vs %x\n", 13193 pll->active_mask, pll->state.crtc_mask); 13194 13195 return; 13196 } 13197 13198 crtc_mask = drm_crtc_mask(&crtc->base); 13199 13200 if (new_crtc_state->base.active) 13201 I915_STATE_WARN(!(pll->active_mask & crtc_mask), 13202 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", 13203 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); 13204 else 13205 I915_STATE_WARN(pll->active_mask & crtc_mask, 13206 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", 13207 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); 13208 13209 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), 13210 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", 13211 crtc_mask, pll->state.crtc_mask); 13212 13213 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state, 13214 &dpll_hw_state, 13215 sizeof(dpll_hw_state)), 13216 "pll hw state mismatch\n"); 13217 } 13218 13219 static void 13220 verify_shared_dpll_state(struct intel_crtc *crtc, 13221 struct intel_crtc_state *old_crtc_state, 13222 struct intel_crtc_state *new_crtc_state) 13223 { 13224 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13225 13226 if (new_crtc_state->shared_dpll) 13227 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state); 13228 13229 if (old_crtc_state->shared_dpll && 13230 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) { 13231 unsigned int crtc_mask = drm_crtc_mask(&crtc->base); 13232 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll; 13233 13234 I915_STATE_WARN(pll->active_mask & crtc_mask, 13235 "pll active mismatch (didn't expect pipe %c in active mask)\n", 13236 pipe_name(drm_crtc_index(&crtc->base))); 13237 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, 13238 "pll enabled crtcs mismatch (found %x in enabled mask)\n", 13239 pipe_name(drm_crtc_index(&crtc->base))); 13240 } 13241 } 13242 13243 static void 13244 intel_modeset_verify_crtc(struct intel_crtc *crtc, 13245 struct intel_atomic_state *state, 13246 struct intel_crtc_state *old_crtc_state, 13247 struct intel_crtc_state *new_crtc_state) 13248 { 13249 if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) 13250 return; 13251 13252 verify_wm_state(crtc, new_crtc_state); 13253 verify_connector_state(state, crtc); 13254 verify_crtc_state(crtc, old_crtc_state, new_crtc_state); 13255 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state); 13256 } 13257 13258 static void 13259 verify_disabled_dpll_state(struct drm_i915_private *dev_priv) 13260 { 13261 int i; 13262 13263 for (i = 0; i < dev_priv->num_shared_dpll; i++) 13264 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL); 13265 } 13266 13267 static void 13268 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv, 13269 struct intel_atomic_state *state) 13270 { 13271 verify_encoder_state(dev_priv, state); 13272 verify_connector_state(state, NULL); 13273 verify_disabled_dpll_state(dev_priv); 13274 } 13275 13276 static void update_scanline_offset(const struct intel_crtc_state *crtc_state) 13277 { 13278 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 13279 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13280 13281 /* 13282 * The scanline counter increments at the leading edge of hsync. 13283 * 13284 * On most platforms it starts counting from vtotal-1 on the 13285 * first active line. That means the scanline counter value is 13286 * always one less than what we would expect. Ie. just after 13287 * start of vblank, which also occurs at start of hsync (on the 13288 * last active line), the scanline counter will read vblank_start-1. 13289 * 13290 * On gen2 the scanline counter starts counting from 1 instead 13291 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 13292 * to keep the value positive), instead of adding one. 13293 * 13294 * On HSW+ the behaviour of the scanline counter depends on the output 13295 * type. For DP ports it behaves like most other platforms, but on HDMI 13296 * there's an extra 1 line difference. So we need to add two instead of 13297 * one to the value. 13298 * 13299 * On VLV/CHV DSI the scanline counter would appear to increment 13300 * approx. 1/3 of a scanline before start of vblank. Unfortunately 13301 * that means we can't tell whether we're in vblank or not while 13302 * we're on that particular line. We must still set scanline_offset 13303 * to 1 so that the vblank timestamps come out correct when we query 13304 * the scanline counter from within the vblank interrupt handler. 13305 * However if queried just before the start of vblank we'll get an 13306 * answer that's slightly in the future. 13307 */ 13308 if (IS_GEN(dev_priv, 2)) { 13309 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; 13310 int vtotal; 13311 13312 vtotal = adjusted_mode->crtc_vtotal; 13313 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 13314 vtotal /= 2; 13315 13316 crtc->scanline_offset = vtotal - 1; 13317 } else if (HAS_DDI(dev_priv) && 13318 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 13319 crtc->scanline_offset = 2; 13320 } else 13321 crtc->scanline_offset = 1; 13322 } 13323 13324 static void intel_modeset_clear_plls(struct intel_atomic_state *state) 13325 { 13326 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 13327 struct intel_crtc_state *new_crtc_state; 13328 struct intel_crtc *crtc; 13329 int i; 13330 13331 if (!dev_priv->display.crtc_compute_clock) 13332 return; 13333 13334 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 13335 if (!needs_modeset(new_crtc_state)) 13336 continue; 13337 13338 intel_release_shared_dplls(state, crtc); 13339 } 13340 } 13341 13342 /* 13343 * This implements the workaround described in the "notes" section of the mode 13344 * set sequence documentation. When going from no pipes or single pipe to 13345 * multiple pipes, and planes are enabled after the pipe, we need to wait at 13346 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 13347 */ 13348 static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state) 13349 { 13350 struct intel_crtc_state *crtc_state; 13351 struct intel_crtc *crtc; 13352 struct intel_crtc_state *first_crtc_state = NULL; 13353 struct intel_crtc_state *other_crtc_state = NULL; 13354 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 13355 int i; 13356 13357 /* look at all crtc's that are going to be enabled in during modeset */ 13358 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 13359 if (!crtc_state->base.active || 13360 !needs_modeset(crtc_state)) 13361 continue; 13362 13363 if (first_crtc_state) { 13364 other_crtc_state = crtc_state; 13365 break; 13366 } else { 13367 first_crtc_state = crtc_state; 13368 first_pipe = crtc->pipe; 13369 } 13370 } 13371 13372 /* No workaround needed? */ 13373 if (!first_crtc_state) 13374 return 0; 13375 13376 /* w/a possibly needed, check how many crtc's are already enabled. */ 13377 for_each_intel_crtc(state->base.dev, crtc) { 13378 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 13379 if (IS_ERR(crtc_state)) 13380 return PTR_ERR(crtc_state); 13381 13382 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 13383 13384 if (!crtc_state->base.active || 13385 needs_modeset(crtc_state)) 13386 continue; 13387 13388 /* 2 or more enabled crtcs means no need for w/a */ 13389 if (enabled_pipe != INVALID_PIPE) 13390 return 0; 13391 13392 enabled_pipe = crtc->pipe; 13393 } 13394 13395 if (enabled_pipe != INVALID_PIPE) 13396 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 13397 else if (other_crtc_state) 13398 other_crtc_state->hsw_workaround_pipe = first_pipe; 13399 13400 return 0; 13401 } 13402 13403 static int intel_lock_all_pipes(struct intel_atomic_state *state) 13404 { 13405 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 13406 struct intel_crtc *crtc; 13407 13408 /* Add all pipes to the state */ 13409 for_each_intel_crtc(&dev_priv->drm, crtc) { 13410 struct intel_crtc_state *crtc_state; 13411 13412 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 13413 if (IS_ERR(crtc_state)) 13414 return PTR_ERR(crtc_state); 13415 } 13416 13417 return 0; 13418 } 13419 13420 static int intel_modeset_all_pipes(struct intel_atomic_state *state) 13421 { 13422 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 13423 struct intel_crtc *crtc; 13424 13425 /* 13426 * Add all pipes to the state, and force 13427 * a modeset on all the active ones. 13428 */ 13429 for_each_intel_crtc(&dev_priv->drm, crtc) { 13430 struct intel_crtc_state *crtc_state; 13431 int ret; 13432 13433 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 13434 if (IS_ERR(crtc_state)) 13435 return PTR_ERR(crtc_state); 13436 13437 if (!crtc_state->base.active || needs_modeset(crtc_state)) 13438 continue; 13439 13440 crtc_state->base.mode_changed = true; 13441 13442 ret = drm_atomic_add_affected_connectors(&state->base, 13443 &crtc->base); 13444 if (ret) 13445 return ret; 13446 13447 ret = drm_atomic_add_affected_planes(&state->base, 13448 &crtc->base); 13449 if (ret) 13450 return ret; 13451 } 13452 13453 return 0; 13454 } 13455 13456 static int intel_modeset_checks(struct intel_atomic_state *state) 13457 { 13458 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 13459 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 13460 struct intel_crtc *crtc; 13461 int ret = 0, i; 13462 13463 if (!check_digital_port_conflicts(state)) { 13464 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); 13465 return -EINVAL; 13466 } 13467 13468 /* keep the current setting */ 13469 if (!state->cdclk.force_min_cdclk_changed) 13470 state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk; 13471 13472 state->modeset = true; 13473 state->active_crtcs = dev_priv->active_crtcs; 13474 state->cdclk.logical = dev_priv->cdclk.logical; 13475 state->cdclk.actual = dev_priv->cdclk.actual; 13476 state->cdclk.pipe = INVALID_PIPE; 13477 13478 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13479 new_crtc_state, i) { 13480 if (new_crtc_state->base.active) 13481 state->active_crtcs |= 1 << i; 13482 else 13483 state->active_crtcs &= ~(1 << i); 13484 13485 if (old_crtc_state->base.active != new_crtc_state->base.active) 13486 state->active_pipe_changes |= drm_crtc_mask(&crtc->base); 13487 } 13488 13489 /* 13490 * See if the config requires any additional preparation, e.g. 13491 * to adjust global state with pipes off. We need to do this 13492 * here so we can get the modeset_pipe updated config for the new 13493 * mode set on this crtc. For other crtcs we need to use the 13494 * adjusted_mode bits in the crtc directly. 13495 */ 13496 if (dev_priv->display.modeset_calc_cdclk) { 13497 enum pipe pipe; 13498 13499 ret = dev_priv->display.modeset_calc_cdclk(state); 13500 if (ret < 0) 13501 return ret; 13502 13503 /* 13504 * Writes to dev_priv->cdclk.logical must protected by 13505 * holding all the crtc locks, even if we don't end up 13506 * touching the hardware 13507 */ 13508 if (intel_cdclk_changed(&dev_priv->cdclk.logical, 13509 &state->cdclk.logical)) { 13510 ret = intel_lock_all_pipes(state); 13511 if (ret < 0) 13512 return ret; 13513 } 13514 13515 if (is_power_of_2(state->active_crtcs)) { 13516 struct intel_crtc *crtc; 13517 struct intel_crtc_state *crtc_state; 13518 13519 pipe = ilog2(state->active_crtcs); 13520 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 13521 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 13522 if (crtc_state && needs_modeset(crtc_state)) 13523 pipe = INVALID_PIPE; 13524 } else { 13525 pipe = INVALID_PIPE; 13526 } 13527 13528 /* All pipes must be switched off while we change the cdclk. */ 13529 if (pipe != INVALID_PIPE && 13530 intel_cdclk_needs_cd2x_update(dev_priv, 13531 &dev_priv->cdclk.actual, 13532 &state->cdclk.actual)) { 13533 ret = intel_lock_all_pipes(state); 13534 if (ret < 0) 13535 return ret; 13536 13537 state->cdclk.pipe = pipe; 13538 } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual, 13539 &state->cdclk.actual)) { 13540 ret = intel_modeset_all_pipes(state); 13541 if (ret < 0) 13542 return ret; 13543 13544 state->cdclk.pipe = INVALID_PIPE; 13545 } 13546 13547 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n", 13548 state->cdclk.logical.cdclk, 13549 state->cdclk.actual.cdclk); 13550 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n", 13551 state->cdclk.logical.voltage_level, 13552 state->cdclk.actual.voltage_level); 13553 } 13554 13555 intel_modeset_clear_plls(state); 13556 13557 if (IS_HASWELL(dev_priv)) 13558 return haswell_mode_set_planes_workaround(state); 13559 13560 return 0; 13561 } 13562 13563 /* 13564 * Handle calculation of various watermark data at the end of the atomic check 13565 * phase. The code here should be run after the per-crtc and per-plane 'check' 13566 * handlers to ensure that all derived state has been updated. 13567 */ 13568 static int calc_watermark_data(struct intel_atomic_state *state) 13569 { 13570 struct drm_device *dev = state->base.dev; 13571 struct drm_i915_private *dev_priv = to_i915(dev); 13572 13573 /* Is there platform-specific watermark information to calculate? */ 13574 if (dev_priv->display.compute_global_watermarks) 13575 return dev_priv->display.compute_global_watermarks(state); 13576 13577 return 0; 13578 } 13579 13580 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 13581 struct intel_crtc_state *new_crtc_state) 13582 { 13583 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) 13584 return; 13585 13586 new_crtc_state->base.mode_changed = false; 13587 new_crtc_state->update_pipe = true; 13588 13589 /* 13590 * If we're not doing the full modeset we want to 13591 * keep the current M/N values as they may be 13592 * sufficiently different to the computed values 13593 * to cause problems. 13594 * 13595 * FIXME: should really copy more fuzzy state here 13596 */ 13597 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n; 13598 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n; 13599 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2; 13600 new_crtc_state->has_drrs = old_crtc_state->has_drrs; 13601 } 13602 13603 /** 13604 * intel_atomic_check - validate state object 13605 * @dev: drm device 13606 * @_state: state to validate 13607 */ 13608 static int intel_atomic_check(struct drm_device *dev, 13609 struct drm_atomic_state *_state) 13610 { 13611 struct drm_i915_private *dev_priv = to_i915(dev); 13612 struct intel_atomic_state *state = to_intel_atomic_state(_state); 13613 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 13614 struct intel_crtc *crtc; 13615 int ret, i; 13616 bool any_ms = state->cdclk.force_min_cdclk_changed; 13617 13618 /* Catch I915_MODE_FLAG_INHERITED */ 13619 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13620 new_crtc_state, i) { 13621 if (new_crtc_state->base.mode.private_flags != 13622 old_crtc_state->base.mode.private_flags) 13623 new_crtc_state->base.mode_changed = true; 13624 } 13625 13626 ret = drm_atomic_helper_check_modeset(dev, &state->base); 13627 if (ret) 13628 goto fail; 13629 13630 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13631 new_crtc_state, i) { 13632 if (!needs_modeset(new_crtc_state)) 13633 continue; 13634 13635 if (!new_crtc_state->base.enable) { 13636 any_ms = true; 13637 continue; 13638 } 13639 13640 ret = intel_modeset_pipe_config(new_crtc_state); 13641 if (ret) 13642 goto fail; 13643 13644 intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 13645 13646 if (needs_modeset(new_crtc_state)) 13647 any_ms = true; 13648 } 13649 13650 ret = drm_dp_mst_atomic_check(&state->base); 13651 if (ret) 13652 goto fail; 13653 13654 if (any_ms) { 13655 ret = intel_modeset_checks(state); 13656 if (ret) 13657 goto fail; 13658 } else { 13659 state->cdclk.logical = dev_priv->cdclk.logical; 13660 } 13661 13662 ret = icl_add_linked_planes(state); 13663 if (ret) 13664 goto fail; 13665 13666 ret = drm_atomic_helper_check_planes(dev, &state->base); 13667 if (ret) 13668 goto fail; 13669 13670 intel_fbc_choose_crtc(dev_priv, state); 13671 ret = calc_watermark_data(state); 13672 if (ret) 13673 goto fail; 13674 13675 ret = intel_bw_atomic_check(state); 13676 if (ret) 13677 goto fail; 13678 13679 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13680 new_crtc_state, i) { 13681 if (!needs_modeset(new_crtc_state) && 13682 !new_crtc_state->update_pipe) 13683 continue; 13684 13685 intel_dump_pipe_config(new_crtc_state, state, 13686 needs_modeset(new_crtc_state) ? 13687 "[modeset]" : "[fastset]"); 13688 } 13689 13690 return 0; 13691 13692 fail: 13693 if (ret == -EDEADLK) 13694 return ret; 13695 13696 /* 13697 * FIXME would probably be nice to know which crtc specifically 13698 * caused the failure, in cases where we can pinpoint it. 13699 */ 13700 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13701 new_crtc_state, i) 13702 intel_dump_pipe_config(new_crtc_state, state, "[failed]"); 13703 13704 return ret; 13705 } 13706 13707 static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 13708 { 13709 return drm_atomic_helper_prepare_planes(state->base.dev, 13710 &state->base); 13711 } 13712 13713 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) 13714 { 13715 struct drm_device *dev = crtc->base.dev; 13716 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)]; 13717 13718 if (!vblank->max_vblank_count) 13719 return (u32)drm_crtc_accurate_vblank_count(&crtc->base); 13720 13721 return crtc->base.funcs->get_vblank_counter(&crtc->base); 13722 } 13723 13724 static void intel_update_crtc(struct intel_crtc *crtc, 13725 struct intel_atomic_state *state, 13726 struct intel_crtc_state *old_crtc_state, 13727 struct intel_crtc_state *new_crtc_state) 13728 { 13729 struct drm_device *dev = state->base.dev; 13730 struct drm_i915_private *dev_priv = to_i915(dev); 13731 bool modeset = needs_modeset(new_crtc_state); 13732 struct intel_plane_state *new_plane_state = 13733 intel_atomic_get_new_plane_state(state, 13734 to_intel_plane(crtc->base.primary)); 13735 13736 if (modeset) { 13737 update_scanline_offset(new_crtc_state); 13738 dev_priv->display.crtc_enable(new_crtc_state, state); 13739 13740 /* vblanks work again, re-enable pipe CRC. */ 13741 intel_crtc_enable_pipe_crc(crtc); 13742 } else { 13743 intel_pre_plane_update(old_crtc_state, new_crtc_state); 13744 13745 if (new_crtc_state->update_pipe) 13746 intel_encoders_update_pipe(crtc, new_crtc_state, state); 13747 } 13748 13749 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) 13750 intel_fbc_disable(crtc); 13751 else if (new_plane_state) 13752 intel_fbc_enable(crtc, new_crtc_state, new_plane_state); 13753 13754 intel_begin_crtc_commit(state, crtc); 13755 13756 if (INTEL_GEN(dev_priv) >= 9) 13757 skl_update_planes_on_crtc(state, crtc); 13758 else 13759 i9xx_update_planes_on_crtc(state, crtc); 13760 13761 intel_finish_crtc_commit(state, crtc); 13762 } 13763 13764 static void intel_update_crtcs(struct intel_atomic_state *state) 13765 { 13766 struct intel_crtc *crtc; 13767 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 13768 int i; 13769 13770 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 13771 if (!new_crtc_state->base.active) 13772 continue; 13773 13774 intel_update_crtc(crtc, state, old_crtc_state, 13775 new_crtc_state); 13776 } 13777 } 13778 13779 static void skl_update_crtcs(struct intel_atomic_state *state) 13780 { 13781 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 13782 struct intel_crtc *crtc; 13783 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 13784 unsigned int updated = 0; 13785 bool progress; 13786 enum pipe pipe; 13787 int i; 13788 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; 13789 u8 required_slices = state->wm_results.ddb.enabled_slices; 13790 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 13791 13792 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) 13793 /* ignore allocations for crtc's that have been turned off. */ 13794 if (new_crtc_state->base.active) 13795 entries[i] = old_crtc_state->wm.skl.ddb; 13796 13797 /* If 2nd DBuf slice required, enable it here */ 13798 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices) 13799 icl_dbuf_slices_update(dev_priv, required_slices); 13800 13801 /* 13802 * Whenever the number of active pipes changes, we need to make sure we 13803 * update the pipes in the right order so that their ddb allocations 13804 * never overlap with eachother inbetween CRTC updates. Otherwise we'll 13805 * cause pipe underruns and other bad stuff. 13806 */ 13807 do { 13808 progress = false; 13809 13810 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 13811 bool vbl_wait = false; 13812 unsigned int cmask = drm_crtc_mask(&crtc->base); 13813 13814 pipe = crtc->pipe; 13815 13816 if (updated & cmask || !new_crtc_state->base.active) 13817 continue; 13818 13819 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 13820 entries, 13821 INTEL_INFO(dev_priv)->num_pipes, i)) 13822 continue; 13823 13824 updated |= cmask; 13825 entries[i] = new_crtc_state->wm.skl.ddb; 13826 13827 /* 13828 * If this is an already active pipe, it's DDB changed, 13829 * and this isn't the last pipe that needs updating 13830 * then we need to wait for a vblank to pass for the 13831 * new ddb allocation to take effect. 13832 */ 13833 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 13834 &old_crtc_state->wm.skl.ddb) && 13835 !new_crtc_state->base.active_changed && 13836 state->wm_results.dirty_pipes != updated) 13837 vbl_wait = true; 13838 13839 intel_update_crtc(crtc, state, old_crtc_state, 13840 new_crtc_state); 13841 13842 if (vbl_wait) 13843 intel_wait_for_vblank(dev_priv, pipe); 13844 13845 progress = true; 13846 } 13847 } while (progress); 13848 13849 /* If 2nd DBuf slice is no more required disable it */ 13850 if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices) 13851 icl_dbuf_slices_update(dev_priv, required_slices); 13852 } 13853 13854 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) 13855 { 13856 struct intel_atomic_state *state, *next; 13857 struct llist_node *freed; 13858 13859 freed = llist_del_all(&dev_priv->atomic_helper.free_list); 13860 llist_for_each_entry_safe(state, next, freed, freed) 13861 drm_atomic_state_put(&state->base); 13862 } 13863 13864 static void intel_atomic_helper_free_state_worker(struct work_struct *work) 13865 { 13866 struct drm_i915_private *dev_priv = 13867 container_of(work, typeof(*dev_priv), atomic_helper.free_work); 13868 13869 intel_atomic_helper_free_state(dev_priv); 13870 } 13871 13872 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 13873 { 13874 struct wait_queue_entry wait_fence, wait_reset; 13875 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); 13876 13877 init_wait_entry(&wait_fence, 0); 13878 init_wait_entry(&wait_reset, 0); 13879 for (;;) { 13880 prepare_to_wait(&intel_state->commit_ready.wait, 13881 &wait_fence, TASK_UNINTERRUPTIBLE); 13882 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags, 13883 I915_RESET_MODESET), 13884 &wait_reset, TASK_UNINTERRUPTIBLE); 13885 13886 13887 if (i915_sw_fence_done(&intel_state->commit_ready) || 13888 test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) 13889 break; 13890 13891 schedule(); 13892 } 13893 finish_wait(&intel_state->commit_ready.wait, &wait_fence); 13894 finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags, 13895 I915_RESET_MODESET), 13896 &wait_reset); 13897 } 13898 13899 static void intel_atomic_cleanup_work(struct work_struct *work) 13900 { 13901 struct drm_atomic_state *state = 13902 container_of(work, struct drm_atomic_state, commit_work); 13903 struct drm_i915_private *i915 = to_i915(state->dev); 13904 13905 drm_atomic_helper_cleanup_planes(&i915->drm, state); 13906 drm_atomic_helper_commit_cleanup_done(state); 13907 drm_atomic_state_put(state); 13908 13909 intel_atomic_helper_free_state(i915); 13910 } 13911 13912 static void intel_atomic_commit_tail(struct intel_atomic_state *state) 13913 { 13914 struct drm_device *dev = state->base.dev; 13915 struct drm_i915_private *dev_priv = to_i915(dev); 13916 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 13917 struct intel_crtc *crtc; 13918 u64 put_domains[I915_MAX_PIPES] = {}; 13919 intel_wakeref_t wakeref = 0; 13920 int i; 13921 13922 intel_atomic_commit_fence_wait(state); 13923 13924 drm_atomic_helper_wait_for_dependencies(&state->base); 13925 13926 if (state->modeset) 13927 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 13928 13929 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 13930 if (needs_modeset(new_crtc_state) || 13931 new_crtc_state->update_pipe) { 13932 13933 put_domains[crtc->pipe] = 13934 modeset_get_crtc_power_domains(new_crtc_state); 13935 } 13936 13937 if (!needs_modeset(new_crtc_state)) 13938 continue; 13939 13940 intel_pre_plane_update(old_crtc_state, new_crtc_state); 13941 13942 if (old_crtc_state->base.active) { 13943 intel_crtc_disable_planes(state, crtc); 13944 13945 /* 13946 * We need to disable pipe CRC before disabling the pipe, 13947 * or we race against vblank off. 13948 */ 13949 intel_crtc_disable_pipe_crc(crtc); 13950 13951 dev_priv->display.crtc_disable(old_crtc_state, state); 13952 crtc->active = false; 13953 intel_fbc_disable(crtc); 13954 intel_disable_shared_dpll(old_crtc_state); 13955 13956 /* 13957 * Underruns don't always raise 13958 * interrupts, so check manually. 13959 */ 13960 intel_check_cpu_fifo_underruns(dev_priv); 13961 intel_check_pch_fifo_underruns(dev_priv); 13962 13963 /* FIXME unify this for all platforms */ 13964 if (!new_crtc_state->base.active && 13965 !HAS_GMCH(dev_priv) && 13966 dev_priv->display.initial_watermarks) 13967 dev_priv->display.initial_watermarks(state, 13968 new_crtc_state); 13969 } 13970 } 13971 13972 /* FIXME: Eventually get rid of our crtc->config pointer */ 13973 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 13974 crtc->config = new_crtc_state; 13975 13976 if (state->modeset) { 13977 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); 13978 13979 intel_set_cdclk_pre_plane_update(dev_priv, 13980 &state->cdclk.actual, 13981 &dev_priv->cdclk.actual, 13982 state->cdclk.pipe); 13983 13984 /* 13985 * SKL workaround: bspec recommends we disable the SAGV when we 13986 * have more then one pipe enabled 13987 */ 13988 if (!intel_can_enable_sagv(state)) 13989 intel_disable_sagv(dev_priv); 13990 13991 intel_modeset_verify_disabled(dev_priv, state); 13992 } 13993 13994 /* Complete the events for pipes that have now been disabled */ 13995 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 13996 bool modeset = needs_modeset(new_crtc_state); 13997 13998 /* Complete events for now disable pipes here. */ 13999 if (modeset && !new_crtc_state->base.active && new_crtc_state->base.event) { 14000 spin_lock_irq(&dev->event_lock); 14001 drm_crtc_send_vblank_event(&crtc->base, new_crtc_state->base.event); 14002 spin_unlock_irq(&dev->event_lock); 14003 14004 new_crtc_state->base.event = NULL; 14005 } 14006 } 14007 14008 if (state->modeset) 14009 intel_encoders_update_prepare(state); 14010 14011 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 14012 dev_priv->display.update_crtcs(state); 14013 14014 if (state->modeset) { 14015 intel_encoders_update_complete(state); 14016 14017 intel_set_cdclk_post_plane_update(dev_priv, 14018 &state->cdclk.actual, 14019 &dev_priv->cdclk.actual, 14020 state->cdclk.pipe); 14021 } 14022 14023 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 14024 * already, but still need the state for the delayed optimization. To 14025 * fix this: 14026 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 14027 * - schedule that vblank worker _before_ calling hw_done 14028 * - at the start of commit_tail, cancel it _synchrously 14029 * - switch over to the vblank wait helper in the core after that since 14030 * we don't need out special handling any more. 14031 */ 14032 drm_atomic_helper_wait_for_flip_done(dev, &state->base); 14033 14034 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14035 if (new_crtc_state->base.active && 14036 !needs_modeset(new_crtc_state) && 14037 (new_crtc_state->base.color_mgmt_changed || 14038 new_crtc_state->update_pipe)) 14039 intel_color_load_luts(new_crtc_state); 14040 } 14041 14042 /* 14043 * Now that the vblank has passed, we can go ahead and program the 14044 * optimal watermarks on platforms that need two-step watermark 14045 * programming. 14046 * 14047 * TODO: Move this (and other cleanup) to an async worker eventually. 14048 */ 14049 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14050 if (dev_priv->display.optimize_watermarks) 14051 dev_priv->display.optimize_watermarks(state, 14052 new_crtc_state); 14053 } 14054 14055 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 14056 intel_post_plane_update(old_crtc_state); 14057 14058 if (put_domains[i]) 14059 modeset_put_power_domains(dev_priv, put_domains[i]); 14060 14061 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); 14062 } 14063 14064 if (state->modeset) 14065 intel_verify_planes(state); 14066 14067 if (state->modeset && intel_can_enable_sagv(state)) 14068 intel_enable_sagv(dev_priv); 14069 14070 drm_atomic_helper_commit_hw_done(&state->base); 14071 14072 if (state->modeset) { 14073 /* As one of the primary mmio accessors, KMS has a high 14074 * likelihood of triggering bugs in unclaimed access. After we 14075 * finish modesetting, see if an error has been flagged, and if 14076 * so enable debugging for the next modeset - and hope we catch 14077 * the culprit. 14078 */ 14079 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 14080 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); 14081 } 14082 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 14083 14084 /* 14085 * Defer the cleanup of the old state to a separate worker to not 14086 * impede the current task (userspace for blocking modesets) that 14087 * are executed inline. For out-of-line asynchronous modesets/flips, 14088 * deferring to a new worker seems overkill, but we would place a 14089 * schedule point (cond_resched()) here anyway to keep latencies 14090 * down. 14091 */ 14092 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work); 14093 queue_work(system_highpri_wq, &state->base.commit_work); 14094 } 14095 14096 static void intel_atomic_commit_work(struct work_struct *work) 14097 { 14098 struct intel_atomic_state *state = 14099 container_of(work, struct intel_atomic_state, base.commit_work); 14100 14101 intel_atomic_commit_tail(state); 14102 } 14103 14104 static int __i915_sw_fence_call 14105 intel_atomic_commit_ready(struct i915_sw_fence *fence, 14106 enum i915_sw_fence_notify notify) 14107 { 14108 struct intel_atomic_state *state = 14109 container_of(fence, struct intel_atomic_state, commit_ready); 14110 14111 switch (notify) { 14112 case FENCE_COMPLETE: 14113 /* we do blocking waits in the worker, nothing to do here */ 14114 break; 14115 case FENCE_FREE: 14116 { 14117 struct intel_atomic_helper *helper = 14118 &to_i915(state->base.dev)->atomic_helper; 14119 14120 if (llist_add(&state->freed, &helper->free_list)) 14121 schedule_work(&helper->free_work); 14122 break; 14123 } 14124 } 14125 14126 return NOTIFY_DONE; 14127 } 14128 14129 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 14130 { 14131 struct intel_plane_state *old_plane_state, *new_plane_state; 14132 struct intel_plane *plane; 14133 int i; 14134 14135 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 14136 new_plane_state, i) 14137 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb), 14138 to_intel_frontbuffer(new_plane_state->base.fb), 14139 plane->frontbuffer_bit); 14140 } 14141 14142 static int intel_atomic_commit(struct drm_device *dev, 14143 struct drm_atomic_state *_state, 14144 bool nonblock) 14145 { 14146 struct intel_atomic_state *state = to_intel_atomic_state(_state); 14147 struct drm_i915_private *dev_priv = to_i915(dev); 14148 int ret = 0; 14149 14150 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 14151 14152 drm_atomic_state_get(&state->base); 14153 i915_sw_fence_init(&state->commit_ready, 14154 intel_atomic_commit_ready); 14155 14156 /* 14157 * The intel_legacy_cursor_update() fast path takes care 14158 * of avoiding the vblank waits for simple cursor 14159 * movement and flips. For cursor on/off and size changes, 14160 * we want to perform the vblank waits so that watermark 14161 * updates happen during the correct frames. Gen9+ have 14162 * double buffered watermarks and so shouldn't need this. 14163 * 14164 * Unset state->legacy_cursor_update before the call to 14165 * drm_atomic_helper_setup_commit() because otherwise 14166 * drm_atomic_helper_wait_for_flip_done() is a noop and 14167 * we get FIFO underruns because we didn't wait 14168 * for vblank. 14169 * 14170 * FIXME doing watermarks and fb cleanup from a vblank worker 14171 * (assuming we had any) would solve these problems. 14172 */ 14173 if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) { 14174 struct intel_crtc_state *new_crtc_state; 14175 struct intel_crtc *crtc; 14176 int i; 14177 14178 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 14179 if (new_crtc_state->wm.need_postvbl_update || 14180 new_crtc_state->update_wm_post) 14181 state->base.legacy_cursor_update = false; 14182 } 14183 14184 ret = intel_atomic_prepare_commit(state); 14185 if (ret) { 14186 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); 14187 i915_sw_fence_commit(&state->commit_ready); 14188 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 14189 return ret; 14190 } 14191 14192 ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 14193 if (!ret) 14194 ret = drm_atomic_helper_swap_state(&state->base, true); 14195 14196 if (ret) { 14197 i915_sw_fence_commit(&state->commit_ready); 14198 14199 drm_atomic_helper_cleanup_planes(dev, &state->base); 14200 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 14201 return ret; 14202 } 14203 dev_priv->wm.distrust_bios_wm = false; 14204 intel_shared_dpll_swap_state(state); 14205 intel_atomic_track_fbs(state); 14206 14207 if (state->modeset) { 14208 memcpy(dev_priv->min_cdclk, state->min_cdclk, 14209 sizeof(state->min_cdclk)); 14210 memcpy(dev_priv->min_voltage_level, state->min_voltage_level, 14211 sizeof(state->min_voltage_level)); 14212 dev_priv->active_crtcs = state->active_crtcs; 14213 dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk; 14214 14215 intel_cdclk_swap_state(state); 14216 } 14217 14218 drm_atomic_state_get(&state->base); 14219 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 14220 14221 i915_sw_fence_commit(&state->commit_ready); 14222 if (nonblock && state->modeset) { 14223 queue_work(dev_priv->modeset_wq, &state->base.commit_work); 14224 } else if (nonblock) { 14225 queue_work(system_unbound_wq, &state->base.commit_work); 14226 } else { 14227 if (state->modeset) 14228 flush_workqueue(dev_priv->modeset_wq); 14229 intel_atomic_commit_tail(state); 14230 } 14231 14232 return 0; 14233 } 14234 14235 struct wait_rps_boost { 14236 struct wait_queue_entry wait; 14237 14238 struct drm_crtc *crtc; 14239 struct i915_request *request; 14240 }; 14241 14242 static int do_rps_boost(struct wait_queue_entry *_wait, 14243 unsigned mode, int sync, void *key) 14244 { 14245 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); 14246 struct i915_request *rq = wait->request; 14247 14248 /* 14249 * If we missed the vblank, but the request is already running it 14250 * is reasonable to assume that it will complete before the next 14251 * vblank without our intervention, so leave RPS alone. 14252 */ 14253 if (!i915_request_started(rq)) 14254 gen6_rps_boost(rq); 14255 i915_request_put(rq); 14256 14257 drm_crtc_vblank_put(wait->crtc); 14258 14259 list_del(&wait->wait.entry); 14260 kfree(wait); 14261 return 1; 14262 } 14263 14264 static void add_rps_boost_after_vblank(struct drm_crtc *crtc, 14265 struct dma_fence *fence) 14266 { 14267 struct wait_rps_boost *wait; 14268 14269 if (!dma_fence_is_i915(fence)) 14270 return; 14271 14272 if (INTEL_GEN(to_i915(crtc->dev)) < 6) 14273 return; 14274 14275 if (drm_crtc_vblank_get(crtc)) 14276 return; 14277 14278 wait = kmalloc(sizeof(*wait), GFP_KERNEL); 14279 if (!wait) { 14280 drm_crtc_vblank_put(crtc); 14281 return; 14282 } 14283 14284 wait->request = to_request(dma_fence_get(fence)); 14285 wait->crtc = crtc; 14286 14287 wait->wait.func = do_rps_boost; 14288 wait->wait.flags = 0; 14289 14290 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); 14291 } 14292 14293 static int intel_plane_pin_fb(struct intel_plane_state *plane_state) 14294 { 14295 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 14296 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 14297 struct drm_framebuffer *fb = plane_state->base.fb; 14298 struct i915_vma *vma; 14299 14300 if (plane->id == PLANE_CURSOR && 14301 INTEL_INFO(dev_priv)->display.cursor_needs_physical) { 14302 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 14303 const int align = intel_cursor_alignment(dev_priv); 14304 int err; 14305 14306 err = i915_gem_object_attach_phys(obj, align); 14307 if (err) 14308 return err; 14309 } 14310 14311 vma = intel_pin_and_fence_fb_obj(fb, 14312 &plane_state->view, 14313 intel_plane_uses_fence(plane_state), 14314 &plane_state->flags); 14315 if (IS_ERR(vma)) 14316 return PTR_ERR(vma); 14317 14318 plane_state->vma = vma; 14319 14320 return 0; 14321 } 14322 14323 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) 14324 { 14325 struct i915_vma *vma; 14326 14327 vma = fetch_and_zero(&old_plane_state->vma); 14328 if (vma) 14329 intel_unpin_fb_vma(vma, old_plane_state->flags); 14330 } 14331 14332 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj) 14333 { 14334 struct i915_sched_attr attr = { 14335 .priority = I915_PRIORITY_DISPLAY, 14336 }; 14337 14338 i915_gem_object_wait_priority(obj, 0, &attr); 14339 } 14340 14341 /** 14342 * intel_prepare_plane_fb - Prepare fb for usage on plane 14343 * @plane: drm plane to prepare for 14344 * @new_state: the plane state being prepared 14345 * 14346 * Prepares a framebuffer for usage on a display plane. Generally this 14347 * involves pinning the underlying object and updating the frontbuffer tracking 14348 * bits. Some older platforms need special physical address handling for 14349 * cursor planes. 14350 * 14351 * Must be called with struct_mutex held. 14352 * 14353 * Returns 0 on success, negative error code on failure. 14354 */ 14355 int 14356 intel_prepare_plane_fb(struct drm_plane *plane, 14357 struct drm_plane_state *new_state) 14358 { 14359 struct intel_atomic_state *intel_state = 14360 to_intel_atomic_state(new_state->state); 14361 struct drm_i915_private *dev_priv = to_i915(plane->dev); 14362 struct drm_framebuffer *fb = new_state->fb; 14363 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 14364 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); 14365 int ret; 14366 14367 if (old_obj) { 14368 struct intel_crtc_state *crtc_state = 14369 intel_atomic_get_new_crtc_state(intel_state, 14370 to_intel_crtc(plane->state->crtc)); 14371 14372 /* Big Hammer, we also need to ensure that any pending 14373 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 14374 * current scanout is retired before unpinning the old 14375 * framebuffer. Note that we rely on userspace rendering 14376 * into the buffer attached to the pipe they are waiting 14377 * on. If not, userspace generates a GPU hang with IPEHR 14378 * point to the MI_WAIT_FOR_EVENT. 14379 * 14380 * This should only fail upon a hung GPU, in which case we 14381 * can safely continue. 14382 */ 14383 if (needs_modeset(crtc_state)) { 14384 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, 14385 old_obj->base.resv, NULL, 14386 false, 0, 14387 GFP_KERNEL); 14388 if (ret < 0) 14389 return ret; 14390 } 14391 } 14392 14393 if (new_state->fence) { /* explicit fencing */ 14394 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready, 14395 new_state->fence, 14396 I915_FENCE_TIMEOUT, 14397 GFP_KERNEL); 14398 if (ret < 0) 14399 return ret; 14400 } 14401 14402 if (!obj) 14403 return 0; 14404 14405 ret = i915_gem_object_pin_pages(obj); 14406 if (ret) 14407 return ret; 14408 14409 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex); 14410 if (ret) { 14411 i915_gem_object_unpin_pages(obj); 14412 return ret; 14413 } 14414 14415 ret = intel_plane_pin_fb(to_intel_plane_state(new_state)); 14416 14417 mutex_unlock(&dev_priv->drm.struct_mutex); 14418 i915_gem_object_unpin_pages(obj); 14419 if (ret) 14420 return ret; 14421 14422 fb_obj_bump_render_priority(obj); 14423 intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB); 14424 14425 if (!new_state->fence) { /* implicit fencing */ 14426 struct dma_fence *fence; 14427 14428 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, 14429 obj->base.resv, NULL, 14430 false, I915_FENCE_TIMEOUT, 14431 GFP_KERNEL); 14432 if (ret < 0) 14433 return ret; 14434 14435 fence = dma_resv_get_excl_rcu(obj->base.resv); 14436 if (fence) { 14437 add_rps_boost_after_vblank(new_state->crtc, fence); 14438 dma_fence_put(fence); 14439 } 14440 } else { 14441 add_rps_boost_after_vblank(new_state->crtc, new_state->fence); 14442 } 14443 14444 /* 14445 * We declare pageflips to be interactive and so merit a small bias 14446 * towards upclocking to deliver the frame on time. By only changing 14447 * the RPS thresholds to sample more regularly and aim for higher 14448 * clocks we can hopefully deliver low power workloads (like kodi) 14449 * that are not quite steady state without resorting to forcing 14450 * maximum clocks following a vblank miss (see do_rps_boost()). 14451 */ 14452 if (!intel_state->rps_interactive) { 14453 intel_rps_mark_interactive(dev_priv, true); 14454 intel_state->rps_interactive = true; 14455 } 14456 14457 return 0; 14458 } 14459 14460 /** 14461 * intel_cleanup_plane_fb - Cleans up an fb after plane use 14462 * @plane: drm plane to clean up for 14463 * @old_state: the state from the previous modeset 14464 * 14465 * Cleans up a framebuffer that has just been removed from a plane. 14466 * 14467 * Must be called with struct_mutex held. 14468 */ 14469 void 14470 intel_cleanup_plane_fb(struct drm_plane *plane, 14471 struct drm_plane_state *old_state) 14472 { 14473 struct intel_atomic_state *intel_state = 14474 to_intel_atomic_state(old_state->state); 14475 struct drm_i915_private *dev_priv = to_i915(plane->dev); 14476 14477 if (intel_state->rps_interactive) { 14478 intel_rps_mark_interactive(dev_priv, false); 14479 intel_state->rps_interactive = false; 14480 } 14481 14482 /* Should only be called after a successful intel_prepare_plane_fb()! */ 14483 mutex_lock(&dev_priv->drm.struct_mutex); 14484 intel_plane_unpin_fb(to_intel_plane_state(old_state)); 14485 mutex_unlock(&dev_priv->drm.struct_mutex); 14486 } 14487 14488 int 14489 skl_max_scale(const struct intel_crtc_state *crtc_state, 14490 u32 pixel_format) 14491 { 14492 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 14493 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14494 int max_scale, mult; 14495 int crtc_clock, max_dotclk, tmpclk1, tmpclk2; 14496 14497 if (!crtc_state->base.enable) 14498 return DRM_PLANE_HELPER_NO_SCALING; 14499 14500 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; 14501 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk; 14502 14503 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) 14504 max_dotclk *= 2; 14505 14506 if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock)) 14507 return DRM_PLANE_HELPER_NO_SCALING; 14508 14509 /* 14510 * skl max scale is lower of: 14511 * close to 3 but not 3, -1 is for that purpose 14512 * or 14513 * cdclk/crtc_clock 14514 */ 14515 mult = is_planar_yuv_format(pixel_format) ? 2 : 3; 14516 tmpclk1 = (1 << 16) * mult - 1; 14517 tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock); 14518 max_scale = min(tmpclk1, tmpclk2); 14519 14520 return max_scale; 14521 } 14522 14523 static void intel_begin_crtc_commit(struct intel_atomic_state *state, 14524 struct intel_crtc *crtc) 14525 { 14526 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14527 struct intel_crtc_state *old_crtc_state = 14528 intel_atomic_get_old_crtc_state(state, crtc); 14529 struct intel_crtc_state *new_crtc_state = 14530 intel_atomic_get_new_crtc_state(state, crtc); 14531 bool modeset = needs_modeset(new_crtc_state); 14532 14533 /* Perform vblank evasion around commit operation */ 14534 intel_pipe_update_start(new_crtc_state); 14535 14536 if (modeset) 14537 goto out; 14538 14539 if (new_crtc_state->base.color_mgmt_changed || 14540 new_crtc_state->update_pipe) 14541 intel_color_commit(new_crtc_state); 14542 14543 if (new_crtc_state->update_pipe) 14544 intel_update_pipe_config(old_crtc_state, new_crtc_state); 14545 else if (INTEL_GEN(dev_priv) >= 9) 14546 skl_detach_scalers(new_crtc_state); 14547 14548 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 14549 bdw_set_pipemisc(new_crtc_state); 14550 14551 out: 14552 if (dev_priv->display.atomic_update_watermarks) 14553 dev_priv->display.atomic_update_watermarks(state, 14554 new_crtc_state); 14555 } 14556 14557 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 14558 struct intel_crtc_state *crtc_state) 14559 { 14560 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14561 14562 if (!IS_GEN(dev_priv, 2)) 14563 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 14564 14565 if (crtc_state->has_pch_encoder) { 14566 enum pipe pch_transcoder = 14567 intel_crtc_pch_transcoder(crtc); 14568 14569 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); 14570 } 14571 } 14572 14573 static void intel_finish_crtc_commit(struct intel_atomic_state *state, 14574 struct intel_crtc *crtc) 14575 { 14576 struct intel_crtc_state *old_crtc_state = 14577 intel_atomic_get_old_crtc_state(state, crtc); 14578 struct intel_crtc_state *new_crtc_state = 14579 intel_atomic_get_new_crtc_state(state, crtc); 14580 14581 intel_pipe_update_end(new_crtc_state); 14582 14583 if (new_crtc_state->update_pipe && 14584 !needs_modeset(new_crtc_state) && 14585 old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED) 14586 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 14587 } 14588 14589 /** 14590 * intel_plane_destroy - destroy a plane 14591 * @plane: plane to destroy 14592 * 14593 * Common destruction function for all types of planes (primary, cursor, 14594 * sprite). 14595 */ 14596 void intel_plane_destroy(struct drm_plane *plane) 14597 { 14598 drm_plane_cleanup(plane); 14599 kfree(to_intel_plane(plane)); 14600 } 14601 14602 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, 14603 u32 format, u64 modifier) 14604 { 14605 switch (modifier) { 14606 case DRM_FORMAT_MOD_LINEAR: 14607 case I915_FORMAT_MOD_X_TILED: 14608 break; 14609 default: 14610 return false; 14611 } 14612 14613 switch (format) { 14614 case DRM_FORMAT_C8: 14615 case DRM_FORMAT_RGB565: 14616 case DRM_FORMAT_XRGB1555: 14617 case DRM_FORMAT_XRGB8888: 14618 return modifier == DRM_FORMAT_MOD_LINEAR || 14619 modifier == I915_FORMAT_MOD_X_TILED; 14620 default: 14621 return false; 14622 } 14623 } 14624 14625 static bool i965_plane_format_mod_supported(struct drm_plane *_plane, 14626 u32 format, u64 modifier) 14627 { 14628 switch (modifier) { 14629 case DRM_FORMAT_MOD_LINEAR: 14630 case I915_FORMAT_MOD_X_TILED: 14631 break; 14632 default: 14633 return false; 14634 } 14635 14636 switch (format) { 14637 case DRM_FORMAT_C8: 14638 case DRM_FORMAT_RGB565: 14639 case DRM_FORMAT_XRGB8888: 14640 case DRM_FORMAT_XBGR8888: 14641 case DRM_FORMAT_XRGB2101010: 14642 case DRM_FORMAT_XBGR2101010: 14643 return modifier == DRM_FORMAT_MOD_LINEAR || 14644 modifier == I915_FORMAT_MOD_X_TILED; 14645 default: 14646 return false; 14647 } 14648 } 14649 14650 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, 14651 u32 format, u64 modifier) 14652 { 14653 return modifier == DRM_FORMAT_MOD_LINEAR && 14654 format == DRM_FORMAT_ARGB8888; 14655 } 14656 14657 static const struct drm_plane_funcs i965_plane_funcs = { 14658 .update_plane = drm_atomic_helper_update_plane, 14659 .disable_plane = drm_atomic_helper_disable_plane, 14660 .destroy = intel_plane_destroy, 14661 .atomic_duplicate_state = intel_plane_duplicate_state, 14662 .atomic_destroy_state = intel_plane_destroy_state, 14663 .format_mod_supported = i965_plane_format_mod_supported, 14664 }; 14665 14666 static const struct drm_plane_funcs i8xx_plane_funcs = { 14667 .update_plane = drm_atomic_helper_update_plane, 14668 .disable_plane = drm_atomic_helper_disable_plane, 14669 .destroy = intel_plane_destroy, 14670 .atomic_duplicate_state = intel_plane_duplicate_state, 14671 .atomic_destroy_state = intel_plane_destroy_state, 14672 .format_mod_supported = i8xx_plane_format_mod_supported, 14673 }; 14674 14675 static int 14676 intel_legacy_cursor_update(struct drm_plane *plane, 14677 struct drm_crtc *crtc, 14678 struct drm_framebuffer *fb, 14679 int crtc_x, int crtc_y, 14680 unsigned int crtc_w, unsigned int crtc_h, 14681 u32 src_x, u32 src_y, 14682 u32 src_w, u32 src_h, 14683 struct drm_modeset_acquire_ctx *ctx) 14684 { 14685 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 14686 struct drm_plane_state *old_plane_state, *new_plane_state; 14687 struct intel_plane *intel_plane = to_intel_plane(plane); 14688 struct intel_crtc_state *crtc_state = 14689 to_intel_crtc_state(crtc->state); 14690 struct intel_crtc_state *new_crtc_state; 14691 int ret; 14692 14693 /* 14694 * When crtc is inactive or there is a modeset pending, 14695 * wait for it to complete in the slowpath 14696 */ 14697 if (!crtc_state->base.active || needs_modeset(crtc_state) || 14698 crtc_state->update_pipe) 14699 goto slow; 14700 14701 old_plane_state = plane->state; 14702 /* 14703 * Don't do an async update if there is an outstanding commit modifying 14704 * the plane. This prevents our async update's changes from getting 14705 * overridden by a previous synchronous update's state. 14706 */ 14707 if (old_plane_state->commit && 14708 !try_wait_for_completion(&old_plane_state->commit->hw_done)) 14709 goto slow; 14710 14711 /* 14712 * If any parameters change that may affect watermarks, 14713 * take the slowpath. Only changing fb or position should be 14714 * in the fastpath. 14715 */ 14716 if (old_plane_state->crtc != crtc || 14717 old_plane_state->src_w != src_w || 14718 old_plane_state->src_h != src_h || 14719 old_plane_state->crtc_w != crtc_w || 14720 old_plane_state->crtc_h != crtc_h || 14721 !old_plane_state->fb != !fb) 14722 goto slow; 14723 14724 new_plane_state = intel_plane_duplicate_state(plane); 14725 if (!new_plane_state) 14726 return -ENOMEM; 14727 14728 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc)); 14729 if (!new_crtc_state) { 14730 ret = -ENOMEM; 14731 goto out_free; 14732 } 14733 14734 drm_atomic_set_fb_for_plane(new_plane_state, fb); 14735 14736 new_plane_state->src_x = src_x; 14737 new_plane_state->src_y = src_y; 14738 new_plane_state->src_w = src_w; 14739 new_plane_state->src_h = src_h; 14740 new_plane_state->crtc_x = crtc_x; 14741 new_plane_state->crtc_y = crtc_y; 14742 new_plane_state->crtc_w = crtc_w; 14743 new_plane_state->crtc_h = crtc_h; 14744 14745 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state, 14746 to_intel_plane_state(old_plane_state), 14747 to_intel_plane_state(new_plane_state)); 14748 if (ret) 14749 goto out_free; 14750 14751 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex); 14752 if (ret) 14753 goto out_free; 14754 14755 ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state)); 14756 if (ret) 14757 goto out_unlock; 14758 14759 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_FLIP); 14760 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->fb), 14761 to_intel_frontbuffer(fb), 14762 intel_plane->frontbuffer_bit); 14763 14764 /* Swap plane state */ 14765 plane->state = new_plane_state; 14766 14767 /* 14768 * We cannot swap crtc_state as it may be in use by an atomic commit or 14769 * page flip that's running simultaneously. If we swap crtc_state and 14770 * destroy the old state, we will cause a use-after-free there. 14771 * 14772 * Only update active_planes, which is needed for our internal 14773 * bookkeeping. Either value will do the right thing when updating 14774 * planes atomically. If the cursor was part of the atomic update then 14775 * we would have taken the slowpath. 14776 */ 14777 crtc_state->active_planes = new_crtc_state->active_planes; 14778 14779 if (plane->state->visible) 14780 intel_update_plane(intel_plane, crtc_state, 14781 to_intel_plane_state(plane->state)); 14782 else 14783 intel_disable_plane(intel_plane, crtc_state); 14784 14785 intel_plane_unpin_fb(to_intel_plane_state(old_plane_state)); 14786 14787 out_unlock: 14788 mutex_unlock(&dev_priv->drm.struct_mutex); 14789 out_free: 14790 if (new_crtc_state) 14791 intel_crtc_destroy_state(crtc, &new_crtc_state->base); 14792 if (ret) 14793 intel_plane_destroy_state(plane, new_plane_state); 14794 else 14795 intel_plane_destroy_state(plane, old_plane_state); 14796 return ret; 14797 14798 slow: 14799 return drm_atomic_helper_update_plane(plane, crtc, fb, 14800 crtc_x, crtc_y, crtc_w, crtc_h, 14801 src_x, src_y, src_w, src_h, ctx); 14802 } 14803 14804 static const struct drm_plane_funcs intel_cursor_plane_funcs = { 14805 .update_plane = intel_legacy_cursor_update, 14806 .disable_plane = drm_atomic_helper_disable_plane, 14807 .destroy = intel_plane_destroy, 14808 .atomic_duplicate_state = intel_plane_duplicate_state, 14809 .atomic_destroy_state = intel_plane_destroy_state, 14810 .format_mod_supported = intel_cursor_format_mod_supported, 14811 }; 14812 14813 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, 14814 enum i9xx_plane_id i9xx_plane) 14815 { 14816 if (!HAS_FBC(dev_priv)) 14817 return false; 14818 14819 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 14820 return i9xx_plane == PLANE_A; /* tied to pipe A */ 14821 else if (IS_IVYBRIDGE(dev_priv)) 14822 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B || 14823 i9xx_plane == PLANE_C; 14824 else if (INTEL_GEN(dev_priv) >= 4) 14825 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B; 14826 else 14827 return i9xx_plane == PLANE_A; 14828 } 14829 14830 static struct intel_plane * 14831 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) 14832 { 14833 struct intel_plane *plane; 14834 const struct drm_plane_funcs *plane_funcs; 14835 unsigned int supported_rotations; 14836 unsigned int possible_crtcs; 14837 const u64 *modifiers; 14838 const u32 *formats; 14839 int num_formats; 14840 int ret; 14841 14842 if (INTEL_GEN(dev_priv) >= 9) 14843 return skl_universal_plane_create(dev_priv, pipe, 14844 PLANE_PRIMARY); 14845 14846 plane = intel_plane_alloc(); 14847 if (IS_ERR(plane)) 14848 return plane; 14849 14850 plane->pipe = pipe; 14851 /* 14852 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS 14853 * port is hooked to pipe B. Hence we want plane A feeding pipe B. 14854 */ 14855 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) 14856 plane->i9xx_plane = (enum i9xx_plane_id) !pipe; 14857 else 14858 plane->i9xx_plane = (enum i9xx_plane_id) pipe; 14859 plane->id = PLANE_PRIMARY; 14860 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); 14861 14862 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane); 14863 if (plane->has_fbc) { 14864 struct intel_fbc *fbc = &dev_priv->fbc; 14865 14866 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; 14867 } 14868 14869 if (INTEL_GEN(dev_priv) >= 4) { 14870 formats = i965_primary_formats; 14871 num_formats = ARRAY_SIZE(i965_primary_formats); 14872 modifiers = i9xx_format_modifiers; 14873 14874 plane->max_stride = i9xx_plane_max_stride; 14875 plane->update_plane = i9xx_update_plane; 14876 plane->disable_plane = i9xx_disable_plane; 14877 plane->get_hw_state = i9xx_plane_get_hw_state; 14878 plane->check_plane = i9xx_plane_check; 14879 14880 plane_funcs = &i965_plane_funcs; 14881 } else { 14882 formats = i8xx_primary_formats; 14883 num_formats = ARRAY_SIZE(i8xx_primary_formats); 14884 modifiers = i9xx_format_modifiers; 14885 14886 plane->max_stride = i9xx_plane_max_stride; 14887 plane->update_plane = i9xx_update_plane; 14888 plane->disable_plane = i9xx_disable_plane; 14889 plane->get_hw_state = i9xx_plane_get_hw_state; 14890 plane->check_plane = i9xx_plane_check; 14891 14892 plane_funcs = &i8xx_plane_funcs; 14893 } 14894 14895 possible_crtcs = BIT(pipe); 14896 14897 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 14898 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 14899 possible_crtcs, plane_funcs, 14900 formats, num_formats, modifiers, 14901 DRM_PLANE_TYPE_PRIMARY, 14902 "primary %c", pipe_name(pipe)); 14903 else 14904 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 14905 possible_crtcs, plane_funcs, 14906 formats, num_formats, modifiers, 14907 DRM_PLANE_TYPE_PRIMARY, 14908 "plane %c", 14909 plane_name(plane->i9xx_plane)); 14910 if (ret) 14911 goto fail; 14912 14913 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 14914 supported_rotations = 14915 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | 14916 DRM_MODE_REFLECT_X; 14917 } else if (INTEL_GEN(dev_priv) >= 4) { 14918 supported_rotations = 14919 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; 14920 } else { 14921 supported_rotations = DRM_MODE_ROTATE_0; 14922 } 14923 14924 if (INTEL_GEN(dev_priv) >= 4) 14925 drm_plane_create_rotation_property(&plane->base, 14926 DRM_MODE_ROTATE_0, 14927 supported_rotations); 14928 14929 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); 14930 14931 return plane; 14932 14933 fail: 14934 intel_plane_free(plane); 14935 14936 return ERR_PTR(ret); 14937 } 14938 14939 static struct intel_plane * 14940 intel_cursor_plane_create(struct drm_i915_private *dev_priv, 14941 enum pipe pipe) 14942 { 14943 unsigned int possible_crtcs; 14944 struct intel_plane *cursor; 14945 int ret; 14946 14947 cursor = intel_plane_alloc(); 14948 if (IS_ERR(cursor)) 14949 return cursor; 14950 14951 cursor->pipe = pipe; 14952 cursor->i9xx_plane = (enum i9xx_plane_id) pipe; 14953 cursor->id = PLANE_CURSOR; 14954 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id); 14955 14956 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 14957 cursor->max_stride = i845_cursor_max_stride; 14958 cursor->update_plane = i845_update_cursor; 14959 cursor->disable_plane = i845_disable_cursor; 14960 cursor->get_hw_state = i845_cursor_get_hw_state; 14961 cursor->check_plane = i845_check_cursor; 14962 } else { 14963 cursor->max_stride = i9xx_cursor_max_stride; 14964 cursor->update_plane = i9xx_update_cursor; 14965 cursor->disable_plane = i9xx_disable_cursor; 14966 cursor->get_hw_state = i9xx_cursor_get_hw_state; 14967 cursor->check_plane = i9xx_check_cursor; 14968 } 14969 14970 cursor->cursor.base = ~0; 14971 cursor->cursor.cntl = ~0; 14972 14973 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) 14974 cursor->cursor.size = ~0; 14975 14976 possible_crtcs = BIT(pipe); 14977 14978 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 14979 possible_crtcs, &intel_cursor_plane_funcs, 14980 intel_cursor_formats, 14981 ARRAY_SIZE(intel_cursor_formats), 14982 cursor_format_modifiers, 14983 DRM_PLANE_TYPE_CURSOR, 14984 "cursor %c", pipe_name(pipe)); 14985 if (ret) 14986 goto fail; 14987 14988 if (INTEL_GEN(dev_priv) >= 4) 14989 drm_plane_create_rotation_property(&cursor->base, 14990 DRM_MODE_ROTATE_0, 14991 DRM_MODE_ROTATE_0 | 14992 DRM_MODE_ROTATE_180); 14993 14994 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 14995 14996 return cursor; 14997 14998 fail: 14999 intel_plane_free(cursor); 15000 15001 return ERR_PTR(ret); 15002 } 15003 15004 static void intel_crtc_init_scalers(struct intel_crtc *crtc, 15005 struct intel_crtc_state *crtc_state) 15006 { 15007 struct intel_crtc_scaler_state *scaler_state = 15008 &crtc_state->scaler_state; 15009 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 15010 int i; 15011 15012 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe]; 15013 if (!crtc->num_scalers) 15014 return; 15015 15016 for (i = 0; i < crtc->num_scalers; i++) { 15017 struct intel_scaler *scaler = &scaler_state->scalers[i]; 15018 15019 scaler->in_use = 0; 15020 scaler->mode = 0; 15021 } 15022 15023 scaler_state->scaler_id = -1; 15024 } 15025 15026 #define INTEL_CRTC_FUNCS \ 15027 .gamma_set = drm_atomic_helper_legacy_gamma_set, \ 15028 .set_config = drm_atomic_helper_set_config, \ 15029 .destroy = intel_crtc_destroy, \ 15030 .page_flip = drm_atomic_helper_page_flip, \ 15031 .atomic_duplicate_state = intel_crtc_duplicate_state, \ 15032 .atomic_destroy_state = intel_crtc_destroy_state, \ 15033 .set_crc_source = intel_crtc_set_crc_source, \ 15034 .verify_crc_source = intel_crtc_verify_crc_source, \ 15035 .get_crc_sources = intel_crtc_get_crc_sources 15036 15037 static const struct drm_crtc_funcs bdw_crtc_funcs = { 15038 INTEL_CRTC_FUNCS, 15039 15040 .get_vblank_counter = g4x_get_vblank_counter, 15041 .enable_vblank = bdw_enable_vblank, 15042 .disable_vblank = bdw_disable_vblank, 15043 }; 15044 15045 static const struct drm_crtc_funcs ilk_crtc_funcs = { 15046 INTEL_CRTC_FUNCS, 15047 15048 .get_vblank_counter = g4x_get_vblank_counter, 15049 .enable_vblank = ilk_enable_vblank, 15050 .disable_vblank = ilk_disable_vblank, 15051 }; 15052 15053 static const struct drm_crtc_funcs g4x_crtc_funcs = { 15054 INTEL_CRTC_FUNCS, 15055 15056 .get_vblank_counter = g4x_get_vblank_counter, 15057 .enable_vblank = i965_enable_vblank, 15058 .disable_vblank = i965_disable_vblank, 15059 }; 15060 15061 static const struct drm_crtc_funcs i965_crtc_funcs = { 15062 INTEL_CRTC_FUNCS, 15063 15064 .get_vblank_counter = i915_get_vblank_counter, 15065 .enable_vblank = i965_enable_vblank, 15066 .disable_vblank = i965_disable_vblank, 15067 }; 15068 15069 static const struct drm_crtc_funcs i945gm_crtc_funcs = { 15070 INTEL_CRTC_FUNCS, 15071 15072 .get_vblank_counter = i915_get_vblank_counter, 15073 .enable_vblank = i945gm_enable_vblank, 15074 .disable_vblank = i945gm_disable_vblank, 15075 }; 15076 15077 static const struct drm_crtc_funcs i915_crtc_funcs = { 15078 INTEL_CRTC_FUNCS, 15079 15080 .get_vblank_counter = i915_get_vblank_counter, 15081 .enable_vblank = i8xx_enable_vblank, 15082 .disable_vblank = i8xx_disable_vblank, 15083 }; 15084 15085 static const struct drm_crtc_funcs i8xx_crtc_funcs = { 15086 INTEL_CRTC_FUNCS, 15087 15088 /* no hw vblank counter */ 15089 .enable_vblank = i8xx_enable_vblank, 15090 .disable_vblank = i8xx_disable_vblank, 15091 }; 15092 15093 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) 15094 { 15095 const struct drm_crtc_funcs *funcs; 15096 struct intel_crtc *intel_crtc; 15097 struct intel_crtc_state *crtc_state = NULL; 15098 struct intel_plane *primary = NULL; 15099 struct intel_plane *cursor = NULL; 15100 int sprite, ret; 15101 15102 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); 15103 if (!intel_crtc) 15104 return -ENOMEM; 15105 15106 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 15107 if (!crtc_state) { 15108 ret = -ENOMEM; 15109 goto fail; 15110 } 15111 __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base); 15112 intel_crtc->config = crtc_state; 15113 15114 primary = intel_primary_plane_create(dev_priv, pipe); 15115 if (IS_ERR(primary)) { 15116 ret = PTR_ERR(primary); 15117 goto fail; 15118 } 15119 intel_crtc->plane_ids_mask |= BIT(primary->id); 15120 15121 for_each_sprite(dev_priv, pipe, sprite) { 15122 struct intel_plane *plane; 15123 15124 plane = intel_sprite_plane_create(dev_priv, pipe, sprite); 15125 if (IS_ERR(plane)) { 15126 ret = PTR_ERR(plane); 15127 goto fail; 15128 } 15129 intel_crtc->plane_ids_mask |= BIT(plane->id); 15130 } 15131 15132 cursor = intel_cursor_plane_create(dev_priv, pipe); 15133 if (IS_ERR(cursor)) { 15134 ret = PTR_ERR(cursor); 15135 goto fail; 15136 } 15137 intel_crtc->plane_ids_mask |= BIT(cursor->id); 15138 15139 if (HAS_GMCH(dev_priv)) { 15140 if (IS_CHERRYVIEW(dev_priv) || 15141 IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv)) 15142 funcs = &g4x_crtc_funcs; 15143 else if (IS_GEN(dev_priv, 4)) 15144 funcs = &i965_crtc_funcs; 15145 else if (IS_I945GM(dev_priv)) 15146 funcs = &i945gm_crtc_funcs; 15147 else if (IS_GEN(dev_priv, 3)) 15148 funcs = &i915_crtc_funcs; 15149 else 15150 funcs = &i8xx_crtc_funcs; 15151 } else { 15152 if (INTEL_GEN(dev_priv) >= 8) 15153 funcs = &bdw_crtc_funcs; 15154 else 15155 funcs = &ilk_crtc_funcs; 15156 } 15157 15158 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base, 15159 &primary->base, &cursor->base, 15160 funcs, "pipe %c", pipe_name(pipe)); 15161 if (ret) 15162 goto fail; 15163 15164 intel_crtc->pipe = pipe; 15165 15166 /* initialize shared scalers */ 15167 intel_crtc_init_scalers(intel_crtc, crtc_state); 15168 15169 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) || 15170 dev_priv->pipe_to_crtc_mapping[pipe] != NULL); 15171 dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc; 15172 15173 if (INTEL_GEN(dev_priv) < 9) { 15174 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane; 15175 15176 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 15177 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL); 15178 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc; 15179 } 15180 15181 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 15182 15183 intel_color_init(intel_crtc); 15184 15185 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 15186 15187 return 0; 15188 15189 fail: 15190 /* 15191 * drm_mode_config_cleanup() will free up any 15192 * crtcs/planes already initialized. 15193 */ 15194 kfree(crtc_state); 15195 kfree(intel_crtc); 15196 15197 return ret; 15198 } 15199 15200 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 15201 struct drm_file *file) 15202 { 15203 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 15204 struct drm_crtc *drmmode_crtc; 15205 struct intel_crtc *crtc; 15206 15207 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); 15208 if (!drmmode_crtc) 15209 return -ENOENT; 15210 15211 crtc = to_intel_crtc(drmmode_crtc); 15212 pipe_from_crtc_id->pipe = crtc->pipe; 15213 15214 return 0; 15215 } 15216 15217 static int intel_encoder_clones(struct intel_encoder *encoder) 15218 { 15219 struct drm_device *dev = encoder->base.dev; 15220 struct intel_encoder *source_encoder; 15221 int index_mask = 0; 15222 int entry = 0; 15223 15224 for_each_intel_encoder(dev, source_encoder) { 15225 if (encoders_cloneable(encoder, source_encoder)) 15226 index_mask |= (1 << entry); 15227 15228 entry++; 15229 } 15230 15231 return index_mask; 15232 } 15233 15234 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) 15235 { 15236 if (!IS_MOBILE(dev_priv)) 15237 return false; 15238 15239 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 15240 return false; 15241 15242 if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 15243 return false; 15244 15245 return true; 15246 } 15247 15248 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) 15249 { 15250 if (INTEL_GEN(dev_priv) >= 9) 15251 return false; 15252 15253 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) 15254 return false; 15255 15256 if (HAS_PCH_LPT_H(dev_priv) && 15257 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 15258 return false; 15259 15260 /* DDI E can't be used if DDI A requires 4 lanes */ 15261 if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 15262 return false; 15263 15264 if (!dev_priv->vbt.int_crt_support) 15265 return false; 15266 15267 return true; 15268 } 15269 15270 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) 15271 { 15272 int pps_num; 15273 int pps_idx; 15274 15275 if (HAS_DDI(dev_priv)) 15276 return; 15277 /* 15278 * This w/a is needed at least on CPT/PPT, but to be sure apply it 15279 * everywhere where registers can be write protected. 15280 */ 15281 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 15282 pps_num = 2; 15283 else 15284 pps_num = 1; 15285 15286 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { 15287 u32 val = I915_READ(PP_CONTROL(pps_idx)); 15288 15289 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS; 15290 I915_WRITE(PP_CONTROL(pps_idx), val); 15291 } 15292 } 15293 15294 static void intel_pps_init(struct drm_i915_private *dev_priv) 15295 { 15296 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv)) 15297 dev_priv->pps_mmio_base = PCH_PPS_BASE; 15298 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 15299 dev_priv->pps_mmio_base = VLV_PPS_BASE; 15300 else 15301 dev_priv->pps_mmio_base = PPS_BASE; 15302 15303 intel_pps_unlock_regs_wa(dev_priv); 15304 } 15305 15306 static void intel_setup_outputs(struct drm_i915_private *dev_priv) 15307 { 15308 struct intel_encoder *encoder; 15309 bool dpd_is_edp = false; 15310 15311 intel_pps_init(dev_priv); 15312 15313 if (!HAS_DISPLAY(dev_priv)) 15314 return; 15315 15316 if (INTEL_GEN(dev_priv) >= 12) { 15317 /* TODO: initialize TC ports as well */ 15318 intel_ddi_init(dev_priv, PORT_A); 15319 intel_ddi_init(dev_priv, PORT_B); 15320 icl_dsi_init(dev_priv); 15321 } else if (IS_ELKHARTLAKE(dev_priv)) { 15322 intel_ddi_init(dev_priv, PORT_A); 15323 intel_ddi_init(dev_priv, PORT_B); 15324 intel_ddi_init(dev_priv, PORT_C); 15325 intel_ddi_init(dev_priv, PORT_D); 15326 icl_dsi_init(dev_priv); 15327 } else if (IS_GEN(dev_priv, 11)) { 15328 intel_ddi_init(dev_priv, PORT_A); 15329 intel_ddi_init(dev_priv, PORT_B); 15330 intel_ddi_init(dev_priv, PORT_C); 15331 intel_ddi_init(dev_priv, PORT_D); 15332 intel_ddi_init(dev_priv, PORT_E); 15333 /* 15334 * On some ICL SKUs port F is not present. No strap bits for 15335 * this, so rely on VBT. 15336 * Work around broken VBTs on SKUs known to have no port F. 15337 */ 15338 if (IS_ICL_WITH_PORT_F(dev_priv) && 15339 intel_bios_is_port_present(dev_priv, PORT_F)) 15340 intel_ddi_init(dev_priv, PORT_F); 15341 15342 icl_dsi_init(dev_priv); 15343 } else if (IS_GEN9_LP(dev_priv)) { 15344 /* 15345 * FIXME: Broxton doesn't support port detection via the 15346 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 15347 * detect the ports. 15348 */ 15349 intel_ddi_init(dev_priv, PORT_A); 15350 intel_ddi_init(dev_priv, PORT_B); 15351 intel_ddi_init(dev_priv, PORT_C); 15352 15353 vlv_dsi_init(dev_priv); 15354 } else if (HAS_DDI(dev_priv)) { 15355 int found; 15356 15357 if (intel_ddi_crt_present(dev_priv)) 15358 intel_crt_init(dev_priv); 15359 15360 /* 15361 * Haswell uses DDI functions to detect digital outputs. 15362 * On SKL pre-D0 the strap isn't connected, so we assume 15363 * it's there. 15364 */ 15365 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 15366 /* WaIgnoreDDIAStrap: skl */ 15367 if (found || IS_GEN9_BC(dev_priv)) 15368 intel_ddi_init(dev_priv, PORT_A); 15369 15370 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP 15371 * register */ 15372 found = I915_READ(SFUSE_STRAP); 15373 15374 if (found & SFUSE_STRAP_DDIB_DETECTED) 15375 intel_ddi_init(dev_priv, PORT_B); 15376 if (found & SFUSE_STRAP_DDIC_DETECTED) 15377 intel_ddi_init(dev_priv, PORT_C); 15378 if (found & SFUSE_STRAP_DDID_DETECTED) 15379 intel_ddi_init(dev_priv, PORT_D); 15380 if (found & SFUSE_STRAP_DDIF_DETECTED) 15381 intel_ddi_init(dev_priv, PORT_F); 15382 /* 15383 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 15384 */ 15385 if (IS_GEN9_BC(dev_priv) && 15386 intel_bios_is_port_present(dev_priv, PORT_E)) 15387 intel_ddi_init(dev_priv, PORT_E); 15388 15389 } else if (HAS_PCH_SPLIT(dev_priv)) { 15390 int found; 15391 15392 /* 15393 * intel_edp_init_connector() depends on this completing first, 15394 * to prevent the registration of both eDP and LVDS and the 15395 * incorrect sharing of the PPS. 15396 */ 15397 intel_lvds_init(dev_priv); 15398 intel_crt_init(dev_priv); 15399 15400 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); 15401 15402 if (ilk_has_edp_a(dev_priv)) 15403 intel_dp_init(dev_priv, DP_A, PORT_A); 15404 15405 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 15406 /* PCH SDVOB multiplex with HDMIB */ 15407 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 15408 if (!found) 15409 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 15410 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 15411 intel_dp_init(dev_priv, PCH_DP_B, PORT_B); 15412 } 15413 15414 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 15415 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 15416 15417 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 15418 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 15419 15420 if (I915_READ(PCH_DP_C) & DP_DETECTED) 15421 intel_dp_init(dev_priv, PCH_DP_C, PORT_C); 15422 15423 if (I915_READ(PCH_DP_D) & DP_DETECTED) 15424 intel_dp_init(dev_priv, PCH_DP_D, PORT_D); 15425 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 15426 bool has_edp, has_port; 15427 15428 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support) 15429 intel_crt_init(dev_priv); 15430 15431 /* 15432 * The DP_DETECTED bit is the latched state of the DDC 15433 * SDA pin at boot. However since eDP doesn't require DDC 15434 * (no way to plug in a DP->HDMI dongle) the DDC pins for 15435 * eDP ports may have been muxed to an alternate function. 15436 * Thus we can't rely on the DP_DETECTED bit alone to detect 15437 * eDP ports. Consult the VBT as well as DP_DETECTED to 15438 * detect eDP ports. 15439 * 15440 * Sadly the straps seem to be missing sometimes even for HDMI 15441 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 15442 * and VBT for the presence of the port. Additionally we can't 15443 * trust the port type the VBT declares as we've seen at least 15444 * HDMI ports that the VBT claim are DP or eDP. 15445 */ 15446 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); 15447 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 15448 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) 15449 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B); 15450 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 15451 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 15452 15453 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); 15454 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 15455 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) 15456 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C); 15457 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 15458 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 15459 15460 if (IS_CHERRYVIEW(dev_priv)) { 15461 /* 15462 * eDP not supported on port D, 15463 * so no need to worry about it 15464 */ 15465 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 15466 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) 15467 intel_dp_init(dev_priv, CHV_DP_D, PORT_D); 15468 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) 15469 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 15470 } 15471 15472 vlv_dsi_init(dev_priv); 15473 } else if (IS_PINEVIEW(dev_priv)) { 15474 intel_lvds_init(dev_priv); 15475 intel_crt_init(dev_priv); 15476 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) { 15477 bool found = false; 15478 15479 if (IS_MOBILE(dev_priv)) 15480 intel_lvds_init(dev_priv); 15481 15482 intel_crt_init(dev_priv); 15483 15484 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 15485 DRM_DEBUG_KMS("probing SDVOB\n"); 15486 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 15487 if (!found && IS_G4X(dev_priv)) { 15488 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 15489 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 15490 } 15491 15492 if (!found && IS_G4X(dev_priv)) 15493 intel_dp_init(dev_priv, DP_B, PORT_B); 15494 } 15495 15496 /* Before G4X SDVOC doesn't have its own detect register */ 15497 15498 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 15499 DRM_DEBUG_KMS("probing SDVOC\n"); 15500 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 15501 } 15502 15503 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 15504 15505 if (IS_G4X(dev_priv)) { 15506 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 15507 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 15508 } 15509 if (IS_G4X(dev_priv)) 15510 intel_dp_init(dev_priv, DP_C, PORT_C); 15511 } 15512 15513 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) 15514 intel_dp_init(dev_priv, DP_D, PORT_D); 15515 15516 if (SUPPORTS_TV(dev_priv)) 15517 intel_tv_init(dev_priv); 15518 } else if (IS_GEN(dev_priv, 2)) { 15519 if (IS_I85X(dev_priv)) 15520 intel_lvds_init(dev_priv); 15521 15522 intel_crt_init(dev_priv); 15523 intel_dvo_init(dev_priv); 15524 } 15525 15526 intel_psr_init(dev_priv); 15527 15528 for_each_intel_encoder(&dev_priv->drm, encoder) { 15529 encoder->base.possible_crtcs = encoder->crtc_mask; 15530 encoder->base.possible_clones = 15531 intel_encoder_clones(encoder); 15532 } 15533 15534 intel_init_pch_refclk(dev_priv); 15535 15536 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 15537 } 15538 15539 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 15540 { 15541 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 15542 15543 drm_framebuffer_cleanup(fb); 15544 intel_frontbuffer_put(intel_fb->frontbuffer); 15545 15546 kfree(intel_fb); 15547 } 15548 15549 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 15550 struct drm_file *file, 15551 unsigned int *handle) 15552 { 15553 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 15554 15555 if (obj->userptr.mm) { 15556 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); 15557 return -EINVAL; 15558 } 15559 15560 return drm_gem_handle_create(file, &obj->base, handle); 15561 } 15562 15563 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, 15564 struct drm_file *file, 15565 unsigned flags, unsigned color, 15566 struct drm_clip_rect *clips, 15567 unsigned num_clips) 15568 { 15569 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 15570 15571 i915_gem_object_flush_if_display(obj); 15572 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); 15573 15574 return 0; 15575 } 15576 15577 static const struct drm_framebuffer_funcs intel_fb_funcs = { 15578 .destroy = intel_user_framebuffer_destroy, 15579 .create_handle = intel_user_framebuffer_create_handle, 15580 .dirty = intel_user_framebuffer_dirty, 15581 }; 15582 15583 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, 15584 struct drm_i915_gem_object *obj, 15585 struct drm_mode_fb_cmd2 *mode_cmd) 15586 { 15587 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 15588 struct drm_framebuffer *fb = &intel_fb->base; 15589 u32 max_stride; 15590 unsigned int tiling, stride; 15591 int ret = -EINVAL; 15592 int i; 15593 15594 intel_fb->frontbuffer = intel_frontbuffer_get(obj); 15595 if (!intel_fb->frontbuffer) 15596 return -ENOMEM; 15597 15598 i915_gem_object_lock(obj); 15599 tiling = i915_gem_object_get_tiling(obj); 15600 stride = i915_gem_object_get_stride(obj); 15601 i915_gem_object_unlock(obj); 15602 15603 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 15604 /* 15605 * If there's a fence, enforce that 15606 * the fb modifier and tiling mode match. 15607 */ 15608 if (tiling != I915_TILING_NONE && 15609 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 15610 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n"); 15611 goto err; 15612 } 15613 } else { 15614 if (tiling == I915_TILING_X) { 15615 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 15616 } else if (tiling == I915_TILING_Y) { 15617 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n"); 15618 goto err; 15619 } 15620 } 15621 15622 if (!drm_any_plane_has_format(&dev_priv->drm, 15623 mode_cmd->pixel_format, 15624 mode_cmd->modifier[0])) { 15625 struct drm_format_name_buf format_name; 15626 15627 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n", 15628 drm_get_format_name(mode_cmd->pixel_format, 15629 &format_name), 15630 mode_cmd->modifier[0]); 15631 goto err; 15632 } 15633 15634 /* 15635 * gen2/3 display engine uses the fence if present, 15636 * so the tiling mode must match the fb modifier exactly. 15637 */ 15638 if (INTEL_GEN(dev_priv) < 4 && 15639 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 15640 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n"); 15641 goto err; 15642 } 15643 15644 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format, 15645 mode_cmd->modifier[0]); 15646 if (mode_cmd->pitches[0] > max_stride) { 15647 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n", 15648 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? 15649 "tiled" : "linear", 15650 mode_cmd->pitches[0], max_stride); 15651 goto err; 15652 } 15653 15654 /* 15655 * If there's a fence, enforce that 15656 * the fb pitch and fence stride match. 15657 */ 15658 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) { 15659 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n", 15660 mode_cmd->pitches[0], stride); 15661 goto err; 15662 } 15663 15664 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 15665 if (mode_cmd->offsets[0] != 0) 15666 goto err; 15667 15668 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd); 15669 15670 for (i = 0; i < fb->format->num_planes; i++) { 15671 u32 stride_alignment; 15672 15673 if (mode_cmd->handles[i] != mode_cmd->handles[0]) { 15674 DRM_DEBUG_KMS("bad plane %d handle\n", i); 15675 goto err; 15676 } 15677 15678 stride_alignment = intel_fb_stride_alignment(fb, i); 15679 15680 /* 15681 * Display WA #0531: skl,bxt,kbl,glk 15682 * 15683 * Render decompression and plane width > 3840 15684 * combined with horizontal panning requires the 15685 * plane stride to be a multiple of 4. We'll just 15686 * require the entire fb to accommodate that to avoid 15687 * potential runtime errors at plane configuration time. 15688 */ 15689 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 && 15690 is_ccs_modifier(fb->modifier)) 15691 stride_alignment *= 4; 15692 15693 if (fb->pitches[i] & (stride_alignment - 1)) { 15694 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n", 15695 i, fb->pitches[i], stride_alignment); 15696 goto err; 15697 } 15698 15699 fb->obj[i] = &obj->base; 15700 } 15701 15702 ret = intel_fill_fb_info(dev_priv, fb); 15703 if (ret) 15704 goto err; 15705 15706 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs); 15707 if (ret) { 15708 DRM_ERROR("framebuffer init failed %d\n", ret); 15709 goto err; 15710 } 15711 15712 return 0; 15713 15714 err: 15715 intel_frontbuffer_put(intel_fb->frontbuffer); 15716 return ret; 15717 } 15718 15719 static struct drm_framebuffer * 15720 intel_user_framebuffer_create(struct drm_device *dev, 15721 struct drm_file *filp, 15722 const struct drm_mode_fb_cmd2 *user_mode_cmd) 15723 { 15724 struct drm_framebuffer *fb; 15725 struct drm_i915_gem_object *obj; 15726 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 15727 15728 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); 15729 if (!obj) 15730 return ERR_PTR(-ENOENT); 15731 15732 fb = intel_framebuffer_create(obj, &mode_cmd); 15733 i915_gem_object_put(obj); 15734 15735 return fb; 15736 } 15737 15738 static void intel_atomic_state_free(struct drm_atomic_state *state) 15739 { 15740 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 15741 15742 drm_atomic_state_default_release(state); 15743 15744 i915_sw_fence_fini(&intel_state->commit_ready); 15745 15746 kfree(state); 15747 } 15748 15749 static enum drm_mode_status 15750 intel_mode_valid(struct drm_device *dev, 15751 const struct drm_display_mode *mode) 15752 { 15753 struct drm_i915_private *dev_priv = to_i915(dev); 15754 int hdisplay_max, htotal_max; 15755 int vdisplay_max, vtotal_max; 15756 15757 /* 15758 * Can't reject DBLSCAN here because Xorg ddxen can add piles 15759 * of DBLSCAN modes to the output's mode list when they detect 15760 * the scaling mode property on the connector. And they don't 15761 * ask the kernel to validate those modes in any way until 15762 * modeset time at which point the client gets a protocol error. 15763 * So in order to not upset those clients we silently ignore the 15764 * DBLSCAN flag on such connectors. For other connectors we will 15765 * reject modes with the DBLSCAN flag in encoder->compute_config(). 15766 * And we always reject DBLSCAN modes in connector->mode_valid() 15767 * as we never want such modes on the connector's mode list. 15768 */ 15769 15770 if (mode->vscan > 1) 15771 return MODE_NO_VSCAN; 15772 15773 if (mode->flags & DRM_MODE_FLAG_HSKEW) 15774 return MODE_H_ILLEGAL; 15775 15776 if (mode->flags & (DRM_MODE_FLAG_CSYNC | 15777 DRM_MODE_FLAG_NCSYNC | 15778 DRM_MODE_FLAG_PCSYNC)) 15779 return MODE_HSYNC; 15780 15781 if (mode->flags & (DRM_MODE_FLAG_BCAST | 15782 DRM_MODE_FLAG_PIXMUX | 15783 DRM_MODE_FLAG_CLKDIV2)) 15784 return MODE_BAD; 15785 15786 if (INTEL_GEN(dev_priv) >= 9 || 15787 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 15788 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 15789 vdisplay_max = 4096; 15790 htotal_max = 8192; 15791 vtotal_max = 8192; 15792 } else if (INTEL_GEN(dev_priv) >= 3) { 15793 hdisplay_max = 4096; 15794 vdisplay_max = 4096; 15795 htotal_max = 8192; 15796 vtotal_max = 8192; 15797 } else { 15798 hdisplay_max = 2048; 15799 vdisplay_max = 2048; 15800 htotal_max = 4096; 15801 vtotal_max = 4096; 15802 } 15803 15804 if (mode->hdisplay > hdisplay_max || 15805 mode->hsync_start > htotal_max || 15806 mode->hsync_end > htotal_max || 15807 mode->htotal > htotal_max) 15808 return MODE_H_ILLEGAL; 15809 15810 if (mode->vdisplay > vdisplay_max || 15811 mode->vsync_start > vtotal_max || 15812 mode->vsync_end > vtotal_max || 15813 mode->vtotal > vtotal_max) 15814 return MODE_V_ILLEGAL; 15815 15816 return MODE_OK; 15817 } 15818 15819 static const struct drm_mode_config_funcs intel_mode_funcs = { 15820 .fb_create = intel_user_framebuffer_create, 15821 .get_format_info = intel_get_format_info, 15822 .output_poll_changed = intel_fbdev_output_poll_changed, 15823 .mode_valid = intel_mode_valid, 15824 .atomic_check = intel_atomic_check, 15825 .atomic_commit = intel_atomic_commit, 15826 .atomic_state_alloc = intel_atomic_state_alloc, 15827 .atomic_state_clear = intel_atomic_state_clear, 15828 .atomic_state_free = intel_atomic_state_free, 15829 }; 15830 15831 /** 15832 * intel_init_display_hooks - initialize the display modesetting hooks 15833 * @dev_priv: device private 15834 */ 15835 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 15836 { 15837 intel_init_cdclk_hooks(dev_priv); 15838 15839 if (INTEL_GEN(dev_priv) >= 9) { 15840 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 15841 dev_priv->display.get_initial_plane_config = 15842 skylake_get_initial_plane_config; 15843 dev_priv->display.crtc_compute_clock = 15844 haswell_crtc_compute_clock; 15845 dev_priv->display.crtc_enable = haswell_crtc_enable; 15846 dev_priv->display.crtc_disable = haswell_crtc_disable; 15847 } else if (HAS_DDI(dev_priv)) { 15848 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 15849 dev_priv->display.get_initial_plane_config = 15850 i9xx_get_initial_plane_config; 15851 dev_priv->display.crtc_compute_clock = 15852 haswell_crtc_compute_clock; 15853 dev_priv->display.crtc_enable = haswell_crtc_enable; 15854 dev_priv->display.crtc_disable = haswell_crtc_disable; 15855 } else if (HAS_PCH_SPLIT(dev_priv)) { 15856 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 15857 dev_priv->display.get_initial_plane_config = 15858 i9xx_get_initial_plane_config; 15859 dev_priv->display.crtc_compute_clock = 15860 ironlake_crtc_compute_clock; 15861 dev_priv->display.crtc_enable = ironlake_crtc_enable; 15862 dev_priv->display.crtc_disable = ironlake_crtc_disable; 15863 } else if (IS_CHERRYVIEW(dev_priv)) { 15864 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15865 dev_priv->display.get_initial_plane_config = 15866 i9xx_get_initial_plane_config; 15867 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock; 15868 dev_priv->display.crtc_enable = valleyview_crtc_enable; 15869 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15870 } else if (IS_VALLEYVIEW(dev_priv)) { 15871 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15872 dev_priv->display.get_initial_plane_config = 15873 i9xx_get_initial_plane_config; 15874 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock; 15875 dev_priv->display.crtc_enable = valleyview_crtc_enable; 15876 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15877 } else if (IS_G4X(dev_priv)) { 15878 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15879 dev_priv->display.get_initial_plane_config = 15880 i9xx_get_initial_plane_config; 15881 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock; 15882 dev_priv->display.crtc_enable = i9xx_crtc_enable; 15883 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15884 } else if (IS_PINEVIEW(dev_priv)) { 15885 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15886 dev_priv->display.get_initial_plane_config = 15887 i9xx_get_initial_plane_config; 15888 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock; 15889 dev_priv->display.crtc_enable = i9xx_crtc_enable; 15890 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15891 } else if (!IS_GEN(dev_priv, 2)) { 15892 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15893 dev_priv->display.get_initial_plane_config = 15894 i9xx_get_initial_plane_config; 15895 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 15896 dev_priv->display.crtc_enable = i9xx_crtc_enable; 15897 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15898 } else { 15899 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15900 dev_priv->display.get_initial_plane_config = 15901 i9xx_get_initial_plane_config; 15902 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock; 15903 dev_priv->display.crtc_enable = i9xx_crtc_enable; 15904 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15905 } 15906 15907 if (IS_GEN(dev_priv, 5)) { 15908 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 15909 } else if (IS_GEN(dev_priv, 6)) { 15910 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 15911 } else if (IS_IVYBRIDGE(dev_priv)) { 15912 /* FIXME: detect B0+ stepping and use auto training */ 15913 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 15914 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 15915 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 15916 } 15917 15918 if (INTEL_GEN(dev_priv) >= 9) 15919 dev_priv->display.update_crtcs = skl_update_crtcs; 15920 else 15921 dev_priv->display.update_crtcs = intel_update_crtcs; 15922 } 15923 15924 static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv) 15925 { 15926 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 15927 return VLV_VGACNTRL; 15928 else if (INTEL_GEN(dev_priv) >= 5) 15929 return CPU_VGACNTRL; 15930 else 15931 return VGACNTRL; 15932 } 15933 15934 /* Disable the VGA plane that we never use */ 15935 static void i915_disable_vga(struct drm_i915_private *dev_priv) 15936 { 15937 struct pci_dev *pdev = dev_priv->drm.pdev; 15938 u8 sr1; 15939 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv); 15940 15941 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ 15942 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); 15943 outb(SR01, VGA_SR_INDEX); 15944 sr1 = inb(VGA_SR_DATA); 15945 outb(sr1 | 1<<5, VGA_SR_DATA); 15946 vga_put(pdev, VGA_RSRC_LEGACY_IO); 15947 udelay(300); 15948 15949 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 15950 POSTING_READ(vga_reg); 15951 } 15952 15953 void intel_modeset_init_hw(struct drm_device *dev) 15954 { 15955 struct drm_i915_private *dev_priv = to_i915(dev); 15956 15957 intel_update_cdclk(dev_priv); 15958 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); 15959 dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw; 15960 } 15961 15962 /* 15963 * Calculate what we think the watermarks should be for the state we've read 15964 * out of the hardware and then immediately program those watermarks so that 15965 * we ensure the hardware settings match our internal state. 15966 * 15967 * We can calculate what we think WM's should be by creating a duplicate of the 15968 * current state (which was constructed during hardware readout) and running it 15969 * through the atomic check code to calculate new watermark values in the 15970 * state object. 15971 */ 15972 static void sanitize_watermarks(struct drm_device *dev) 15973 { 15974 struct drm_i915_private *dev_priv = to_i915(dev); 15975 struct drm_atomic_state *state; 15976 struct intel_atomic_state *intel_state; 15977 struct intel_crtc *crtc; 15978 struct intel_crtc_state *crtc_state; 15979 struct drm_modeset_acquire_ctx ctx; 15980 int ret; 15981 int i; 15982 15983 /* Only supported on platforms that use atomic watermark design */ 15984 if (!dev_priv->display.optimize_watermarks) 15985 return; 15986 15987 /* 15988 * We need to hold connection_mutex before calling duplicate_state so 15989 * that the connector loop is protected. 15990 */ 15991 drm_modeset_acquire_init(&ctx, 0); 15992 retry: 15993 ret = drm_modeset_lock_all_ctx(dev, &ctx); 15994 if (ret == -EDEADLK) { 15995 drm_modeset_backoff(&ctx); 15996 goto retry; 15997 } else if (WARN_ON(ret)) { 15998 goto fail; 15999 } 16000 16001 state = drm_atomic_helper_duplicate_state(dev, &ctx); 16002 if (WARN_ON(IS_ERR(state))) 16003 goto fail; 16004 16005 intel_state = to_intel_atomic_state(state); 16006 16007 /* 16008 * Hardware readout is the only time we don't want to calculate 16009 * intermediate watermarks (since we don't trust the current 16010 * watermarks). 16011 */ 16012 if (!HAS_GMCH(dev_priv)) 16013 intel_state->skip_intermediate_wm = true; 16014 16015 ret = intel_atomic_check(dev, state); 16016 if (ret) { 16017 /* 16018 * If we fail here, it means that the hardware appears to be 16019 * programmed in a way that shouldn't be possible, given our 16020 * understanding of watermark requirements. This might mean a 16021 * mistake in the hardware readout code or a mistake in the 16022 * watermark calculations for a given platform. Raise a WARN 16023 * so that this is noticeable. 16024 * 16025 * If this actually happens, we'll have to just leave the 16026 * BIOS-programmed watermarks untouched and hope for the best. 16027 */ 16028 WARN(true, "Could not determine valid watermarks for inherited state\n"); 16029 goto put_state; 16030 } 16031 16032 /* Write calculated watermark values back */ 16033 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { 16034 crtc_state->wm.need_postvbl_update = true; 16035 dev_priv->display.optimize_watermarks(intel_state, crtc_state); 16036 16037 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; 16038 } 16039 16040 put_state: 16041 drm_atomic_state_put(state); 16042 fail: 16043 drm_modeset_drop_locks(&ctx); 16044 drm_modeset_acquire_fini(&ctx); 16045 } 16046 16047 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv) 16048 { 16049 if (IS_GEN(dev_priv, 5)) { 16050 u32 fdi_pll_clk = 16051 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; 16052 16053 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000; 16054 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) { 16055 dev_priv->fdi_pll_freq = 270000; 16056 } else { 16057 return; 16058 } 16059 16060 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq); 16061 } 16062 16063 static int intel_initial_commit(struct drm_device *dev) 16064 { 16065 struct drm_atomic_state *state = NULL; 16066 struct drm_modeset_acquire_ctx ctx; 16067 struct drm_crtc *crtc; 16068 struct drm_crtc_state *crtc_state; 16069 int ret = 0; 16070 16071 state = drm_atomic_state_alloc(dev); 16072 if (!state) 16073 return -ENOMEM; 16074 16075 drm_modeset_acquire_init(&ctx, 0); 16076 16077 retry: 16078 state->acquire_ctx = &ctx; 16079 16080 drm_for_each_crtc(crtc, dev) { 16081 crtc_state = drm_atomic_get_crtc_state(state, crtc); 16082 if (IS_ERR(crtc_state)) { 16083 ret = PTR_ERR(crtc_state); 16084 goto out; 16085 } 16086 16087 if (crtc_state->active) { 16088 ret = drm_atomic_add_affected_planes(state, crtc); 16089 if (ret) 16090 goto out; 16091 16092 /* 16093 * FIXME hack to force a LUT update to avoid the 16094 * plane update forcing the pipe gamma on without 16095 * having a proper LUT loaded. Remove once we 16096 * have readout for pipe gamma enable. 16097 */ 16098 crtc_state->color_mgmt_changed = true; 16099 } 16100 } 16101 16102 ret = drm_atomic_commit(state); 16103 16104 out: 16105 if (ret == -EDEADLK) { 16106 drm_atomic_state_clear(state); 16107 drm_modeset_backoff(&ctx); 16108 goto retry; 16109 } 16110 16111 drm_atomic_state_put(state); 16112 16113 drm_modeset_drop_locks(&ctx); 16114 drm_modeset_acquire_fini(&ctx); 16115 16116 return ret; 16117 } 16118 16119 int intel_modeset_init(struct drm_device *dev) 16120 { 16121 struct drm_i915_private *dev_priv = to_i915(dev); 16122 enum pipe pipe; 16123 struct intel_crtc *crtc; 16124 int ret; 16125 16126 dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); 16127 16128 drm_mode_config_init(dev); 16129 16130 ret = intel_bw_init(dev_priv); 16131 if (ret) 16132 return ret; 16133 16134 dev->mode_config.min_width = 0; 16135 dev->mode_config.min_height = 0; 16136 16137 dev->mode_config.preferred_depth = 24; 16138 dev->mode_config.prefer_shadow = 1; 16139 16140 dev->mode_config.allow_fb_modifiers = true; 16141 16142 dev->mode_config.funcs = &intel_mode_funcs; 16143 16144 init_llist_head(&dev_priv->atomic_helper.free_list); 16145 INIT_WORK(&dev_priv->atomic_helper.free_work, 16146 intel_atomic_helper_free_state_worker); 16147 16148 intel_init_quirks(dev_priv); 16149 16150 intel_fbc_init(dev_priv); 16151 16152 intel_init_pm(dev_priv); 16153 16154 /* 16155 * There may be no VBT; and if the BIOS enabled SSC we can 16156 * just keep using it to avoid unnecessary flicker. Whereas if the 16157 * BIOS isn't using it, don't assume it will work even if the VBT 16158 * indicates as much. 16159 */ 16160 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 16161 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & 16162 DREF_SSC1_ENABLE); 16163 16164 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 16165 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n", 16166 bios_lvds_use_ssc ? "en" : "dis", 16167 dev_priv->vbt.lvds_use_ssc ? "en" : "dis"); 16168 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 16169 } 16170 } 16171 16172 /* 16173 * Maximum framebuffer dimensions, chosen to match 16174 * the maximum render engine surface size on gen4+. 16175 */ 16176 if (INTEL_GEN(dev_priv) >= 7) { 16177 dev->mode_config.max_width = 16384; 16178 dev->mode_config.max_height = 16384; 16179 } else if (INTEL_GEN(dev_priv) >= 4) { 16180 dev->mode_config.max_width = 8192; 16181 dev->mode_config.max_height = 8192; 16182 } else if (IS_GEN(dev_priv, 3)) { 16183 dev->mode_config.max_width = 4096; 16184 dev->mode_config.max_height = 4096; 16185 } else { 16186 dev->mode_config.max_width = 2048; 16187 dev->mode_config.max_height = 2048; 16188 } 16189 16190 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 16191 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512; 16192 dev->mode_config.cursor_height = 1023; 16193 } else if (IS_GEN(dev_priv, 2)) { 16194 dev->mode_config.cursor_width = 64; 16195 dev->mode_config.cursor_height = 64; 16196 } else { 16197 dev->mode_config.cursor_width = 256; 16198 dev->mode_config.cursor_height = 256; 16199 } 16200 16201 DRM_DEBUG_KMS("%d display pipe%s available.\n", 16202 INTEL_INFO(dev_priv)->num_pipes, 16203 INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : ""); 16204 16205 for_each_pipe(dev_priv, pipe) { 16206 ret = intel_crtc_init(dev_priv, pipe); 16207 if (ret) { 16208 drm_mode_config_cleanup(dev); 16209 return ret; 16210 } 16211 } 16212 16213 intel_shared_dpll_init(dev); 16214 intel_update_fdi_pll_freq(dev_priv); 16215 16216 intel_update_czclk(dev_priv); 16217 intel_modeset_init_hw(dev); 16218 16219 intel_hdcp_component_init(dev_priv); 16220 16221 if (dev_priv->max_cdclk_freq == 0) 16222 intel_update_max_cdclk(dev_priv); 16223 16224 /* Just disable it once at startup */ 16225 i915_disable_vga(dev_priv); 16226 intel_setup_outputs(dev_priv); 16227 16228 drm_modeset_lock_all(dev); 16229 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); 16230 drm_modeset_unlock_all(dev); 16231 16232 for_each_intel_crtc(dev, crtc) { 16233 struct intel_initial_plane_config plane_config = {}; 16234 16235 if (!crtc->active) 16236 continue; 16237 16238 /* 16239 * Note that reserving the BIOS fb up front prevents us 16240 * from stuffing other stolen allocations like the ring 16241 * on top. This prevents some ugliness at boot time, and 16242 * can even allow for smooth boot transitions if the BIOS 16243 * fb is large enough for the active pipe configuration. 16244 */ 16245 dev_priv->display.get_initial_plane_config(crtc, 16246 &plane_config); 16247 16248 /* 16249 * If the fb is shared between multiple heads, we'll 16250 * just get the first one. 16251 */ 16252 intel_find_initial_plane_obj(crtc, &plane_config); 16253 } 16254 16255 /* 16256 * Make sure hardware watermarks really match the state we read out. 16257 * Note that we need to do this after reconstructing the BIOS fb's 16258 * since the watermark calculation done here will use pstate->fb. 16259 */ 16260 if (!HAS_GMCH(dev_priv)) 16261 sanitize_watermarks(dev); 16262 16263 /* 16264 * Force all active planes to recompute their states. So that on 16265 * mode_setcrtc after probe, all the intel_plane_state variables 16266 * are already calculated and there is no assert_plane warnings 16267 * during bootup. 16268 */ 16269 ret = intel_initial_commit(dev); 16270 if (ret) 16271 DRM_DEBUG_KMS("Initial commit in probe failed.\n"); 16272 16273 return 0; 16274 } 16275 16276 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 16277 { 16278 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 16279 /* 640x480@60Hz, ~25175 kHz */ 16280 struct dpll clock = { 16281 .m1 = 18, 16282 .m2 = 7, 16283 .p1 = 13, 16284 .p2 = 4, 16285 .n = 2, 16286 }; 16287 u32 dpll, fp; 16288 int i; 16289 16290 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154); 16291 16292 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 16293 pipe_name(pipe), clock.vco, clock.dot); 16294 16295 fp = i9xx_dpll_compute_fp(&clock); 16296 dpll = DPLL_DVO_2X_MODE | 16297 DPLL_VGA_MODE_DIS | 16298 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 16299 PLL_P2_DIVIDE_BY_4 | 16300 PLL_REF_INPUT_DREFCLK | 16301 DPLL_VCO_ENABLE; 16302 16303 I915_WRITE(FP0(pipe), fp); 16304 I915_WRITE(FP1(pipe), fp); 16305 16306 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); 16307 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); 16308 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); 16309 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16)); 16310 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16)); 16311 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); 16312 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); 16313 16314 /* 16315 * Apparently we need to have VGA mode enabled prior to changing 16316 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 16317 * dividers, even though the register value does change. 16318 */ 16319 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); 16320 I915_WRITE(DPLL(pipe), dpll); 16321 16322 /* Wait for the clocks to stabilize. */ 16323 POSTING_READ(DPLL(pipe)); 16324 udelay(150); 16325 16326 /* The pixel multiplier can only be updated once the 16327 * DPLL is enabled and the clocks are stable. 16328 * 16329 * So write it again. 16330 */ 16331 I915_WRITE(DPLL(pipe), dpll); 16332 16333 /* We do this three times for luck */ 16334 for (i = 0; i < 3 ; i++) { 16335 I915_WRITE(DPLL(pipe), dpll); 16336 POSTING_READ(DPLL(pipe)); 16337 udelay(150); /* wait for warmup */ 16338 } 16339 16340 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE); 16341 POSTING_READ(PIPECONF(pipe)); 16342 16343 intel_wait_for_pipe_scanline_moving(crtc); 16344 } 16345 16346 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 16347 { 16348 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 16349 16350 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n", 16351 pipe_name(pipe)); 16352 16353 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE); 16354 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE); 16355 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE); 16356 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE); 16357 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE); 16358 16359 I915_WRITE(PIPECONF(pipe), 0); 16360 POSTING_READ(PIPECONF(pipe)); 16361 16362 intel_wait_for_pipe_scanline_stopped(crtc); 16363 16364 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 16365 POSTING_READ(DPLL(pipe)); 16366 } 16367 16368 static void 16369 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) 16370 { 16371 struct intel_crtc *crtc; 16372 16373 if (INTEL_GEN(dev_priv) >= 4) 16374 return; 16375 16376 for_each_intel_crtc(&dev_priv->drm, crtc) { 16377 struct intel_plane *plane = 16378 to_intel_plane(crtc->base.primary); 16379 struct intel_crtc *plane_crtc; 16380 enum pipe pipe; 16381 16382 if (!plane->get_hw_state(plane, &pipe)) 16383 continue; 16384 16385 if (pipe == crtc->pipe) 16386 continue; 16387 16388 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", 16389 plane->base.base.id, plane->base.name); 16390 16391 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 16392 intel_plane_disable_noatomic(plane_crtc, plane); 16393 } 16394 } 16395 16396 static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 16397 { 16398 struct drm_device *dev = crtc->base.dev; 16399 struct intel_encoder *encoder; 16400 16401 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 16402 return true; 16403 16404 return false; 16405 } 16406 16407 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder) 16408 { 16409 struct drm_device *dev = encoder->base.dev; 16410 struct intel_connector *connector; 16411 16412 for_each_connector_on_encoder(dev, &encoder->base, connector) 16413 return connector; 16414 16415 return NULL; 16416 } 16417 16418 static bool has_pch_trancoder(struct drm_i915_private *dev_priv, 16419 enum pipe pch_transcoder) 16420 { 16421 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 16422 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A); 16423 } 16424 16425 static void intel_sanitize_crtc(struct intel_crtc *crtc, 16426 struct drm_modeset_acquire_ctx *ctx) 16427 { 16428 struct drm_device *dev = crtc->base.dev; 16429 struct drm_i915_private *dev_priv = to_i915(dev); 16430 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); 16431 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 16432 16433 /* Clear any frame start delays used for debugging left by the BIOS */ 16434 if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) { 16435 i915_reg_t reg = PIPECONF(cpu_transcoder); 16436 16437 I915_WRITE(reg, 16438 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 16439 } 16440 16441 if (crtc_state->base.active) { 16442 struct intel_plane *plane; 16443 16444 /* Disable everything but the primary plane */ 16445 for_each_intel_plane_on_crtc(dev, crtc, plane) { 16446 const struct intel_plane_state *plane_state = 16447 to_intel_plane_state(plane->base.state); 16448 16449 if (plane_state->base.visible && 16450 plane->base.type != DRM_PLANE_TYPE_PRIMARY) 16451 intel_plane_disable_noatomic(crtc, plane); 16452 } 16453 16454 /* 16455 * Disable any background color set by the BIOS, but enable the 16456 * gamma and CSC to match how we program our planes. 16457 */ 16458 if (INTEL_GEN(dev_priv) >= 9) 16459 I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe), 16460 SKL_BOTTOM_COLOR_GAMMA_ENABLE | 16461 SKL_BOTTOM_COLOR_CSC_ENABLE); 16462 } 16463 16464 /* Adjust the state of the output pipe according to whether we 16465 * have active connectors/encoders. */ 16466 if (crtc_state->base.active && !intel_crtc_has_encoders(crtc)) 16467 intel_crtc_disable_noatomic(&crtc->base, ctx); 16468 16469 if (crtc_state->base.active || HAS_GMCH(dev_priv)) { 16470 /* 16471 * We start out with underrun reporting disabled to avoid races. 16472 * For correct bookkeeping mark this on active crtcs. 16473 * 16474 * Also on gmch platforms we dont have any hardware bits to 16475 * disable the underrun reporting. Which means we need to start 16476 * out with underrun reporting disabled also on inactive pipes, 16477 * since otherwise we'll complain about the garbage we read when 16478 * e.g. coming up after runtime pm. 16479 * 16480 * No protection against concurrent access is required - at 16481 * worst a fifo underrun happens which also sets this to false. 16482 */ 16483 crtc->cpu_fifo_underrun_disabled = true; 16484 /* 16485 * We track the PCH trancoder underrun reporting state 16486 * within the crtc. With crtc for pipe A housing the underrun 16487 * reporting state for PCH transcoder A, crtc for pipe B housing 16488 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, 16489 * and marking underrun reporting as disabled for the non-existing 16490 * PCH transcoders B and C would prevent enabling the south 16491 * error interrupt (see cpt_can_enable_serr_int()). 16492 */ 16493 if (has_pch_trancoder(dev_priv, crtc->pipe)) 16494 crtc->pch_fifo_underrun_disabled = true; 16495 } 16496 } 16497 16498 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) 16499 { 16500 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 16501 16502 /* 16503 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram 16504 * the hardware when a high res displays plugged in. DPLL P 16505 * divider is zero, and the pipe timings are bonkers. We'll 16506 * try to disable everything in that case. 16507 * 16508 * FIXME would be nice to be able to sanitize this state 16509 * without several WARNs, but for now let's take the easy 16510 * road. 16511 */ 16512 return IS_GEN(dev_priv, 6) && 16513 crtc_state->base.active && 16514 crtc_state->shared_dpll && 16515 crtc_state->port_clock == 0; 16516 } 16517 16518 static void intel_sanitize_encoder(struct intel_encoder *encoder) 16519 { 16520 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 16521 struct intel_connector *connector; 16522 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 16523 struct intel_crtc_state *crtc_state = crtc ? 16524 to_intel_crtc_state(crtc->base.state) : NULL; 16525 16526 /* We need to check both for a crtc link (meaning that the 16527 * encoder is active and trying to read from a pipe) and the 16528 * pipe itself being active. */ 16529 bool has_active_crtc = crtc_state && 16530 crtc_state->base.active; 16531 16532 if (crtc_state && has_bogus_dpll_config(crtc_state)) { 16533 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n", 16534 pipe_name(crtc->pipe)); 16535 has_active_crtc = false; 16536 } 16537 16538 connector = intel_encoder_find_connector(encoder); 16539 if (connector && !has_active_crtc) { 16540 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 16541 encoder->base.base.id, 16542 encoder->base.name); 16543 16544 /* Connector is active, but has no active pipe. This is 16545 * fallout from our resume register restoring. Disable 16546 * the encoder manually again. */ 16547 if (crtc_state) { 16548 struct drm_encoder *best_encoder; 16549 16550 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 16551 encoder->base.base.id, 16552 encoder->base.name); 16553 16554 /* avoid oopsing in case the hooks consult best_encoder */ 16555 best_encoder = connector->base.state->best_encoder; 16556 connector->base.state->best_encoder = &encoder->base; 16557 16558 if (encoder->disable) 16559 encoder->disable(encoder, crtc_state, 16560 connector->base.state); 16561 if (encoder->post_disable) 16562 encoder->post_disable(encoder, crtc_state, 16563 connector->base.state); 16564 16565 connector->base.state->best_encoder = best_encoder; 16566 } 16567 encoder->base.crtc = NULL; 16568 16569 /* Inconsistent output/port/pipe state happens presumably due to 16570 * a bug in one of the get_hw_state functions. Or someplace else 16571 * in our code, like the register restore mess on resume. Clamp 16572 * things to off as a safer default. */ 16573 16574 connector->base.dpms = DRM_MODE_DPMS_OFF; 16575 connector->base.encoder = NULL; 16576 } 16577 16578 /* notify opregion of the sanitized encoder state */ 16579 intel_opregion_notify_encoder(encoder, connector && has_active_crtc); 16580 16581 if (INTEL_GEN(dev_priv) >= 11) 16582 icl_sanitize_encoder_pll_mapping(encoder); 16583 } 16584 16585 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv) 16586 { 16587 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv); 16588 16589 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 16590 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 16591 i915_disable_vga(dev_priv); 16592 } 16593 } 16594 16595 void i915_redisable_vga(struct drm_i915_private *dev_priv) 16596 { 16597 intel_wakeref_t wakeref; 16598 16599 /* 16600 * This function can be called both from intel_modeset_setup_hw_state or 16601 * at a very early point in our resume sequence, where the power well 16602 * structures are not yet restored. Since this function is at a very 16603 * paranoid "someone might have enabled VGA while we were not looking" 16604 * level, just check if the power well is enabled instead of trying to 16605 * follow the "don't touch the power well if we don't need it" policy 16606 * the rest of the driver uses. 16607 */ 16608 wakeref = intel_display_power_get_if_enabled(dev_priv, 16609 POWER_DOMAIN_VGA); 16610 if (!wakeref) 16611 return; 16612 16613 i915_redisable_vga_power_on(dev_priv); 16614 16615 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref); 16616 } 16617 16618 /* FIXME read out full plane state for all planes */ 16619 static void readout_plane_state(struct drm_i915_private *dev_priv) 16620 { 16621 struct intel_plane *plane; 16622 struct intel_crtc *crtc; 16623 16624 for_each_intel_plane(&dev_priv->drm, plane) { 16625 struct intel_plane_state *plane_state = 16626 to_intel_plane_state(plane->base.state); 16627 struct intel_crtc_state *crtc_state; 16628 enum pipe pipe = PIPE_A; 16629 bool visible; 16630 16631 visible = plane->get_hw_state(plane, &pipe); 16632 16633 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 16634 crtc_state = to_intel_crtc_state(crtc->base.state); 16635 16636 intel_set_plane_visible(crtc_state, plane_state, visible); 16637 16638 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n", 16639 plane->base.base.id, plane->base.name, 16640 enableddisabled(visible), pipe_name(pipe)); 16641 } 16642 16643 for_each_intel_crtc(&dev_priv->drm, crtc) { 16644 struct intel_crtc_state *crtc_state = 16645 to_intel_crtc_state(crtc->base.state); 16646 16647 fixup_active_planes(crtc_state); 16648 } 16649 } 16650 16651 static void intel_modeset_readout_hw_state(struct drm_device *dev) 16652 { 16653 struct drm_i915_private *dev_priv = to_i915(dev); 16654 enum pipe pipe; 16655 struct intel_crtc *crtc; 16656 struct intel_encoder *encoder; 16657 struct intel_connector *connector; 16658 struct drm_connector_list_iter conn_iter; 16659 int i; 16660 16661 dev_priv->active_crtcs = 0; 16662 16663 for_each_intel_crtc(dev, crtc) { 16664 struct intel_crtc_state *crtc_state = 16665 to_intel_crtc_state(crtc->base.state); 16666 16667 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base); 16668 memset(crtc_state, 0, sizeof(*crtc_state)); 16669 __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base); 16670 16671 crtc_state->base.active = crtc_state->base.enable = 16672 dev_priv->display.get_pipe_config(crtc, crtc_state); 16673 16674 crtc->base.enabled = crtc_state->base.enable; 16675 crtc->active = crtc_state->base.active; 16676 16677 if (crtc_state->base.active) 16678 dev_priv->active_crtcs |= 1 << crtc->pipe; 16679 16680 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", 16681 crtc->base.base.id, crtc->base.name, 16682 enableddisabled(crtc_state->base.active)); 16683 } 16684 16685 readout_plane_state(dev_priv); 16686 16687 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 16688 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 16689 16690 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll, 16691 &pll->state.hw_state); 16692 16693 if (IS_ELKHARTLAKE(dev_priv) && pll->on && 16694 pll->info->id == DPLL_ID_EHL_DPLL4) { 16695 pll->wakeref = intel_display_power_get(dev_priv, 16696 POWER_DOMAIN_DPLL_DC_OFF); 16697 } 16698 16699 pll->state.crtc_mask = 0; 16700 for_each_intel_crtc(dev, crtc) { 16701 struct intel_crtc_state *crtc_state = 16702 to_intel_crtc_state(crtc->base.state); 16703 16704 if (crtc_state->base.active && 16705 crtc_state->shared_dpll == pll) 16706 pll->state.crtc_mask |= 1 << crtc->pipe; 16707 } 16708 pll->active_mask = pll->state.crtc_mask; 16709 16710 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", 16711 pll->info->name, pll->state.crtc_mask, pll->on); 16712 } 16713 16714 for_each_intel_encoder(dev, encoder) { 16715 pipe = 0; 16716 16717 if (encoder->get_hw_state(encoder, &pipe)) { 16718 struct intel_crtc_state *crtc_state; 16719 16720 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 16721 crtc_state = to_intel_crtc_state(crtc->base.state); 16722 16723 encoder->base.crtc = &crtc->base; 16724 encoder->get_config(encoder, crtc_state); 16725 } else { 16726 encoder->base.crtc = NULL; 16727 } 16728 16729 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 16730 encoder->base.base.id, encoder->base.name, 16731 enableddisabled(encoder->base.crtc), 16732 pipe_name(pipe)); 16733 } 16734 16735 drm_connector_list_iter_begin(dev, &conn_iter); 16736 for_each_intel_connector_iter(connector, &conn_iter) { 16737 if (connector->get_hw_state(connector)) { 16738 connector->base.dpms = DRM_MODE_DPMS_ON; 16739 16740 encoder = connector->encoder; 16741 connector->base.encoder = &encoder->base; 16742 16743 if (encoder->base.crtc && 16744 encoder->base.crtc->state->active) { 16745 /* 16746 * This has to be done during hardware readout 16747 * because anything calling .crtc_disable may 16748 * rely on the connector_mask being accurate. 16749 */ 16750 encoder->base.crtc->state->connector_mask |= 16751 drm_connector_mask(&connector->base); 16752 encoder->base.crtc->state->encoder_mask |= 16753 drm_encoder_mask(&encoder->base); 16754 } 16755 16756 } else { 16757 connector->base.dpms = DRM_MODE_DPMS_OFF; 16758 connector->base.encoder = NULL; 16759 } 16760 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 16761 connector->base.base.id, connector->base.name, 16762 enableddisabled(connector->base.encoder)); 16763 } 16764 drm_connector_list_iter_end(&conn_iter); 16765 16766 for_each_intel_crtc(dev, crtc) { 16767 struct intel_bw_state *bw_state = 16768 to_intel_bw_state(dev_priv->bw_obj.state); 16769 struct intel_crtc_state *crtc_state = 16770 to_intel_crtc_state(crtc->base.state); 16771 struct intel_plane *plane; 16772 int min_cdclk = 0; 16773 16774 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); 16775 if (crtc_state->base.active) { 16776 intel_mode_from_pipe_config(&crtc->base.mode, crtc_state); 16777 crtc->base.mode.hdisplay = crtc_state->pipe_src_w; 16778 crtc->base.mode.vdisplay = crtc_state->pipe_src_h; 16779 intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state); 16780 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); 16781 16782 /* 16783 * The initial mode needs to be set in order to keep 16784 * the atomic core happy. It wants a valid mode if the 16785 * crtc's enabled, so we do the above call. 16786 * 16787 * But we don't set all the derived state fully, hence 16788 * set a flag to indicate that a full recalculation is 16789 * needed on the next commit. 16790 */ 16791 crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED; 16792 16793 intel_crtc_compute_pixel_rate(crtc_state); 16794 16795 if (dev_priv->display.modeset_calc_cdclk) { 16796 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); 16797 if (WARN_ON(min_cdclk < 0)) 16798 min_cdclk = 0; 16799 } 16800 16801 drm_calc_timestamping_constants(&crtc->base, 16802 &crtc_state->base.adjusted_mode); 16803 update_scanline_offset(crtc_state); 16804 } 16805 16806 dev_priv->min_cdclk[crtc->pipe] = min_cdclk; 16807 dev_priv->min_voltage_level[crtc->pipe] = 16808 crtc_state->min_voltage_level; 16809 16810 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 16811 const struct intel_plane_state *plane_state = 16812 to_intel_plane_state(plane->base.state); 16813 16814 /* 16815 * FIXME don't have the fb yet, so can't 16816 * use intel_plane_data_rate() :( 16817 */ 16818 if (plane_state->base.visible) 16819 crtc_state->data_rate[plane->id] = 16820 4 * crtc_state->pixel_rate; 16821 } 16822 16823 intel_bw_crtc_update(bw_state, crtc_state); 16824 16825 intel_pipe_config_sanity_check(dev_priv, crtc_state); 16826 } 16827 } 16828 16829 static void 16830 get_encoder_power_domains(struct drm_i915_private *dev_priv) 16831 { 16832 struct intel_encoder *encoder; 16833 16834 for_each_intel_encoder(&dev_priv->drm, encoder) { 16835 struct intel_crtc_state *crtc_state; 16836 16837 if (!encoder->get_power_domains) 16838 continue; 16839 16840 /* 16841 * MST-primary and inactive encoders don't have a crtc state 16842 * and neither of these require any power domain references. 16843 */ 16844 if (!encoder->base.crtc) 16845 continue; 16846 16847 crtc_state = to_intel_crtc_state(encoder->base.crtc->state); 16848 encoder->get_power_domains(encoder, crtc_state); 16849 } 16850 } 16851 16852 static void intel_early_display_was(struct drm_i915_private *dev_priv) 16853 { 16854 /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */ 16855 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) 16856 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | 16857 DARBF_GATING_DIS); 16858 16859 if (IS_HASWELL(dev_priv)) { 16860 /* 16861 * WaRsPkgCStateDisplayPMReq:hsw 16862 * System hang if this isn't done before disabling all planes! 16863 */ 16864 I915_WRITE(CHICKEN_PAR1_1, 16865 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); 16866 } 16867 } 16868 16869 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv, 16870 enum port port, i915_reg_t hdmi_reg) 16871 { 16872 u32 val = I915_READ(hdmi_reg); 16873 16874 if (val & SDVO_ENABLE || 16875 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A)) 16876 return; 16877 16878 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n", 16879 port_name(port)); 16880 16881 val &= ~SDVO_PIPE_SEL_MASK; 16882 val |= SDVO_PIPE_SEL(PIPE_A); 16883 16884 I915_WRITE(hdmi_reg, val); 16885 } 16886 16887 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv, 16888 enum port port, i915_reg_t dp_reg) 16889 { 16890 u32 val = I915_READ(dp_reg); 16891 16892 if (val & DP_PORT_EN || 16893 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A)) 16894 return; 16895 16896 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n", 16897 port_name(port)); 16898 16899 val &= ~DP_PIPE_SEL_MASK; 16900 val |= DP_PIPE_SEL(PIPE_A); 16901 16902 I915_WRITE(dp_reg, val); 16903 } 16904 16905 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv) 16906 { 16907 /* 16908 * The BIOS may select transcoder B on some of the PCH 16909 * ports even it doesn't enable the port. This would trip 16910 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled(). 16911 * Sanitize the transcoder select bits to prevent that. We 16912 * assume that the BIOS never actually enabled the port, 16913 * because if it did we'd actually have to toggle the port 16914 * on and back off to make the transcoder A select stick 16915 * (see. intel_dp_link_down(), intel_disable_hdmi(), 16916 * intel_disable_sdvo()). 16917 */ 16918 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B); 16919 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C); 16920 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D); 16921 16922 /* PCH SDVOB multiplex with HDMIB */ 16923 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB); 16924 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC); 16925 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID); 16926 } 16927 16928 /* Scan out the current hw modeset state, 16929 * and sanitizes it to the current state 16930 */ 16931 static void 16932 intel_modeset_setup_hw_state(struct drm_device *dev, 16933 struct drm_modeset_acquire_ctx *ctx) 16934 { 16935 struct drm_i915_private *dev_priv = to_i915(dev); 16936 struct intel_crtc_state *crtc_state; 16937 struct intel_encoder *encoder; 16938 struct intel_crtc *crtc; 16939 intel_wakeref_t wakeref; 16940 int i; 16941 16942 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 16943 16944 intel_early_display_was(dev_priv); 16945 intel_modeset_readout_hw_state(dev); 16946 16947 /* HW state is read out, now we need to sanitize this mess. */ 16948 16949 /* Sanitize the TypeC port mode upfront, encoders depend on this */ 16950 for_each_intel_encoder(dev, encoder) { 16951 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 16952 16953 /* We need to sanitize only the MST primary port. */ 16954 if (encoder->type != INTEL_OUTPUT_DP_MST && 16955 intel_phy_is_tc(dev_priv, phy)) 16956 intel_tc_port_sanitize(enc_to_dig_port(&encoder->base)); 16957 } 16958 16959 get_encoder_power_domains(dev_priv); 16960 16961 if (HAS_PCH_IBX(dev_priv)) 16962 ibx_sanitize_pch_ports(dev_priv); 16963 16964 /* 16965 * intel_sanitize_plane_mapping() may need to do vblank 16966 * waits, so we need vblank interrupts restored beforehand. 16967 */ 16968 for_each_intel_crtc(&dev_priv->drm, crtc) { 16969 crtc_state = to_intel_crtc_state(crtc->base.state); 16970 16971 drm_crtc_vblank_reset(&crtc->base); 16972 16973 if (crtc_state->base.active) 16974 intel_crtc_vblank_on(crtc_state); 16975 } 16976 16977 intel_sanitize_plane_mapping(dev_priv); 16978 16979 for_each_intel_encoder(dev, encoder) 16980 intel_sanitize_encoder(encoder); 16981 16982 for_each_intel_crtc(&dev_priv->drm, crtc) { 16983 crtc_state = to_intel_crtc_state(crtc->base.state); 16984 intel_sanitize_crtc(crtc, ctx); 16985 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]"); 16986 } 16987 16988 intel_modeset_update_connector_atomic_state(dev); 16989 16990 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 16991 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 16992 16993 if (!pll->on || pll->active_mask) 16994 continue; 16995 16996 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", 16997 pll->info->name); 16998 16999 pll->info->funcs->disable(dev_priv, pll); 17000 pll->on = false; 17001 } 17002 17003 if (IS_G4X(dev_priv)) { 17004 g4x_wm_get_hw_state(dev_priv); 17005 g4x_wm_sanitize(dev_priv); 17006 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 17007 vlv_wm_get_hw_state(dev_priv); 17008 vlv_wm_sanitize(dev_priv); 17009 } else if (INTEL_GEN(dev_priv) >= 9) { 17010 skl_wm_get_hw_state(dev_priv); 17011 } else if (HAS_PCH_SPLIT(dev_priv)) { 17012 ilk_wm_get_hw_state(dev_priv); 17013 } 17014 17015 for_each_intel_crtc(dev, crtc) { 17016 u64 put_domains; 17017 17018 crtc_state = to_intel_crtc_state(crtc->base.state); 17019 put_domains = modeset_get_crtc_power_domains(crtc_state); 17020 if (WARN_ON(put_domains)) 17021 modeset_put_power_domains(dev_priv, put_domains); 17022 } 17023 17024 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); 17025 17026 intel_fbc_init_pipe_state(dev_priv); 17027 } 17028 17029 void intel_display_resume(struct drm_device *dev) 17030 { 17031 struct drm_i915_private *dev_priv = to_i915(dev); 17032 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 17033 struct drm_modeset_acquire_ctx ctx; 17034 int ret; 17035 17036 dev_priv->modeset_restore_state = NULL; 17037 if (state) 17038 state->acquire_ctx = &ctx; 17039 17040 drm_modeset_acquire_init(&ctx, 0); 17041 17042 while (1) { 17043 ret = drm_modeset_lock_all_ctx(dev, &ctx); 17044 if (ret != -EDEADLK) 17045 break; 17046 17047 drm_modeset_backoff(&ctx); 17048 } 17049 17050 if (!ret) 17051 ret = __intel_display_resume(dev, state, &ctx); 17052 17053 intel_enable_ipc(dev_priv); 17054 drm_modeset_drop_locks(&ctx); 17055 drm_modeset_acquire_fini(&ctx); 17056 17057 if (ret) 17058 DRM_ERROR("Restoring old state failed with %i\n", ret); 17059 if (state) 17060 drm_atomic_state_put(state); 17061 } 17062 17063 static void intel_hpd_poll_fini(struct drm_device *dev) 17064 { 17065 struct intel_connector *connector; 17066 struct drm_connector_list_iter conn_iter; 17067 17068 /* Kill all the work that may have been queued by hpd. */ 17069 drm_connector_list_iter_begin(dev, &conn_iter); 17070 for_each_intel_connector_iter(connector, &conn_iter) { 17071 if (connector->modeset_retry_work.func) 17072 cancel_work_sync(&connector->modeset_retry_work); 17073 if (connector->hdcp.shim) { 17074 cancel_delayed_work_sync(&connector->hdcp.check_work); 17075 cancel_work_sync(&connector->hdcp.prop_work); 17076 } 17077 } 17078 drm_connector_list_iter_end(&conn_iter); 17079 } 17080 17081 void intel_modeset_driver_remove(struct drm_device *dev) 17082 { 17083 struct drm_i915_private *dev_priv = to_i915(dev); 17084 17085 flush_workqueue(dev_priv->modeset_wq); 17086 17087 flush_work(&dev_priv->atomic_helper.free_work); 17088 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list)); 17089 17090 /* 17091 * Interrupts and polling as the first thing to avoid creating havoc. 17092 * Too much stuff here (turning of connectors, ...) would 17093 * experience fancy races otherwise. 17094 */ 17095 intel_irq_uninstall(dev_priv); 17096 17097 /* 17098 * Due to the hpd irq storm handling the hotplug work can re-arm the 17099 * poll handlers. Hence disable polling after hpd handling is shut down. 17100 */ 17101 intel_hpd_poll_fini(dev); 17102 17103 /* poll work can call into fbdev, hence clean that up afterwards */ 17104 intel_fbdev_fini(dev_priv); 17105 17106 intel_unregister_dsm_handler(); 17107 17108 intel_fbc_global_disable(dev_priv); 17109 17110 /* flush any delayed tasks or pending work */ 17111 flush_scheduled_work(); 17112 17113 intel_hdcp_component_fini(dev_priv); 17114 17115 drm_mode_config_cleanup(dev); 17116 17117 intel_overlay_cleanup(dev_priv); 17118 17119 intel_gmbus_teardown(dev_priv); 17120 17121 destroy_workqueue(dev_priv->modeset_wq); 17122 17123 intel_fbc_cleanup_cfb(dev_priv); 17124 } 17125 17126 /* 17127 * set vga decode state - true == enable VGA decode 17128 */ 17129 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state) 17130 { 17131 unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 17132 u16 gmch_ctrl; 17133 17134 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { 17135 DRM_ERROR("failed to read control word\n"); 17136 return -EIO; 17137 } 17138 17139 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) 17140 return 0; 17141 17142 if (state) 17143 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 17144 else 17145 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 17146 17147 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { 17148 DRM_ERROR("failed to write control word\n"); 17149 return -EIO; 17150 } 17151 17152 return 0; 17153 } 17154 17155 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 17156 17157 struct intel_display_error_state { 17158 17159 u32 power_well_driver; 17160 17161 struct intel_cursor_error_state { 17162 u32 control; 17163 u32 position; 17164 u32 base; 17165 u32 size; 17166 } cursor[I915_MAX_PIPES]; 17167 17168 struct intel_pipe_error_state { 17169 bool power_domain_on; 17170 u32 source; 17171 u32 stat; 17172 } pipe[I915_MAX_PIPES]; 17173 17174 struct intel_plane_error_state { 17175 u32 control; 17176 u32 stride; 17177 u32 size; 17178 u32 pos; 17179 u32 addr; 17180 u32 surface; 17181 u32 tile_offset; 17182 } plane[I915_MAX_PIPES]; 17183 17184 struct intel_transcoder_error_state { 17185 bool available; 17186 bool power_domain_on; 17187 enum transcoder cpu_transcoder; 17188 17189 u32 conf; 17190 17191 u32 htotal; 17192 u32 hblank; 17193 u32 hsync; 17194 u32 vtotal; 17195 u32 vblank; 17196 u32 vsync; 17197 } transcoder[5]; 17198 }; 17199 17200 struct intel_display_error_state * 17201 intel_display_capture_error_state(struct drm_i915_private *dev_priv) 17202 { 17203 struct intel_display_error_state *error; 17204 int transcoders[] = { 17205 TRANSCODER_A, 17206 TRANSCODER_B, 17207 TRANSCODER_C, 17208 TRANSCODER_D, 17209 TRANSCODER_EDP, 17210 }; 17211 int i; 17212 17213 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder)); 17214 17215 if (!HAS_DISPLAY(dev_priv)) 17216 return NULL; 17217 17218 error = kzalloc(sizeof(*error), GFP_ATOMIC); 17219 if (error == NULL) 17220 return NULL; 17221 17222 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 17223 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2); 17224 17225 for_each_pipe(dev_priv, i) { 17226 error->pipe[i].power_domain_on = 17227 __intel_display_power_is_enabled(dev_priv, 17228 POWER_DOMAIN_PIPE(i)); 17229 if (!error->pipe[i].power_domain_on) 17230 continue; 17231 17232 error->cursor[i].control = I915_READ(CURCNTR(i)); 17233 error->cursor[i].position = I915_READ(CURPOS(i)); 17234 error->cursor[i].base = I915_READ(CURBASE(i)); 17235 17236 error->plane[i].control = I915_READ(DSPCNTR(i)); 17237 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 17238 if (INTEL_GEN(dev_priv) <= 3) { 17239 error->plane[i].size = I915_READ(DSPSIZE(i)); 17240 error->plane[i].pos = I915_READ(DSPPOS(i)); 17241 } 17242 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 17243 error->plane[i].addr = I915_READ(DSPADDR(i)); 17244 if (INTEL_GEN(dev_priv) >= 4) { 17245 error->plane[i].surface = I915_READ(DSPSURF(i)); 17246 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 17247 } 17248 17249 error->pipe[i].source = I915_READ(PIPESRC(i)); 17250 17251 if (HAS_GMCH(dev_priv)) 17252 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 17253 } 17254 17255 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { 17256 enum transcoder cpu_transcoder = transcoders[i]; 17257 17258 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder]) 17259 continue; 17260 17261 error->transcoder[i].available = true; 17262 error->transcoder[i].power_domain_on = 17263 __intel_display_power_is_enabled(dev_priv, 17264 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 17265 if (!error->transcoder[i].power_domain_on) 17266 continue; 17267 17268 error->transcoder[i].cpu_transcoder = cpu_transcoder; 17269 17270 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 17271 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 17272 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 17273 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 17274 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 17275 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 17276 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 17277 } 17278 17279 return error; 17280 } 17281 17282 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 17283 17284 void 17285 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 17286 struct intel_display_error_state *error) 17287 { 17288 struct drm_i915_private *dev_priv = m->i915; 17289 int i; 17290 17291 if (!error) 17292 return; 17293 17294 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes); 17295 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 17296 err_printf(m, "PWR_WELL_CTL2: %08x\n", 17297 error->power_well_driver); 17298 for_each_pipe(dev_priv, i) { 17299 err_printf(m, "Pipe [%d]:\n", i); 17300 err_printf(m, " Power: %s\n", 17301 onoff(error->pipe[i].power_domain_on)); 17302 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 17303 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 17304 17305 err_printf(m, "Plane [%d]:\n", i); 17306 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 17307 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 17308 if (INTEL_GEN(dev_priv) <= 3) { 17309 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 17310 err_printf(m, " POS: %08x\n", error->plane[i].pos); 17311 } 17312 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 17313 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 17314 if (INTEL_GEN(dev_priv) >= 4) { 17315 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 17316 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 17317 } 17318 17319 err_printf(m, "Cursor [%d]:\n", i); 17320 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 17321 err_printf(m, " POS: %08x\n", error->cursor[i].position); 17322 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 17323 } 17324 17325 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { 17326 if (!error->transcoder[i].available) 17327 continue; 17328 17329 err_printf(m, "CPU transcoder: %s\n", 17330 transcoder_name(error->transcoder[i].cpu_transcoder)); 17331 err_printf(m, " Power: %s\n", 17332 onoff(error->transcoder[i].power_domain_on)); 17333 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 17334 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 17335 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 17336 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 17337 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 17338 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 17339 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 17340 } 17341 } 17342 17343 #endif 17344