1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/i2c.h> 28 #include <linux/input.h> 29 #include <linux/intel-iommu.h> 30 #include <linux/kernel.h> 31 #include <linux/module.h> 32 #include <linux/dma-resv.h> 33 #include <linux/slab.h> 34 35 #include <drm/drm_atomic.h> 36 #include <drm/drm_atomic_helper.h> 37 #include <drm/drm_atomic_uapi.h> 38 #include <drm/drm_dp_helper.h> 39 #include <drm/drm_edid.h> 40 #include <drm/drm_fourcc.h> 41 #include <drm/drm_plane_helper.h> 42 #include <drm/drm_probe_helper.h> 43 #include <drm/drm_rect.h> 44 #include <drm/i915_drm.h> 45 46 #include "display/intel_crt.h" 47 #include "display/intel_ddi.h" 48 #include "display/intel_dp.h" 49 #include "display/intel_dsi.h" 50 #include "display/intel_dvo.h" 51 #include "display/intel_gmbus.h" 52 #include "display/intel_hdmi.h" 53 #include "display/intel_lvds.h" 54 #include "display/intel_sdvo.h" 55 #include "display/intel_tv.h" 56 #include "display/intel_vdsc.h" 57 58 #include "i915_drv.h" 59 #include "i915_trace.h" 60 #include "intel_acpi.h" 61 #include "intel_atomic.h" 62 #include "intel_atomic_plane.h" 63 #include "intel_bw.h" 64 #include "intel_cdclk.h" 65 #include "intel_color.h" 66 #include "intel_display_types.h" 67 #include "intel_fbc.h" 68 #include "intel_fbdev.h" 69 #include "intel_fifo_underrun.h" 70 #include "intel_frontbuffer.h" 71 #include "intel_hdcp.h" 72 #include "intel_hotplug.h" 73 #include "intel_overlay.h" 74 #include "intel_pipe_crc.h" 75 #include "intel_pm.h" 76 #include "intel_psr.h" 77 #include "intel_quirks.h" 78 #include "intel_sideband.h" 79 #include "intel_sprite.h" 80 #include "intel_tc.h" 81 #include "intel_vga.h" 82 83 /* Primary plane formats for gen <= 3 */ 84 static const u32 i8xx_primary_formats[] = { 85 DRM_FORMAT_C8, 86 DRM_FORMAT_RGB565, 87 DRM_FORMAT_XRGB1555, 88 DRM_FORMAT_XRGB8888, 89 }; 90 91 /* Primary plane formats for gen >= 4 */ 92 static const u32 i965_primary_formats[] = { 93 DRM_FORMAT_C8, 94 DRM_FORMAT_RGB565, 95 DRM_FORMAT_XRGB8888, 96 DRM_FORMAT_XBGR8888, 97 DRM_FORMAT_XRGB2101010, 98 DRM_FORMAT_XBGR2101010, 99 }; 100 101 static const u64 i9xx_format_modifiers[] = { 102 I915_FORMAT_MOD_X_TILED, 103 DRM_FORMAT_MOD_LINEAR, 104 DRM_FORMAT_MOD_INVALID 105 }; 106 107 /* Cursor formats */ 108 static const u32 intel_cursor_formats[] = { 109 DRM_FORMAT_ARGB8888, 110 }; 111 112 static const u64 cursor_format_modifiers[] = { 113 DRM_FORMAT_MOD_LINEAR, 114 DRM_FORMAT_MOD_INVALID 115 }; 116 117 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 118 struct intel_crtc_state *pipe_config); 119 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 120 struct intel_crtc_state *pipe_config); 121 122 static int intel_framebuffer_init(struct intel_framebuffer *ifb, 123 struct drm_i915_gem_object *obj, 124 struct drm_mode_fb_cmd2 *mode_cmd); 125 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state); 126 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 127 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 128 const struct intel_link_m_n *m_n, 129 const struct intel_link_m_n *m2_n2); 130 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); 131 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state); 132 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state); 133 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); 134 static void vlv_prepare_pll(struct intel_crtc *crtc, 135 const struct intel_crtc_state *pipe_config); 136 static void chv_prepare_pll(struct intel_crtc *crtc, 137 const struct intel_crtc_state *pipe_config); 138 static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *); 139 static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *); 140 static void intel_crtc_init_scalers(struct intel_crtc *crtc, 141 struct intel_crtc_state *crtc_state); 142 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state); 143 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state); 144 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state); 145 static void intel_modeset_setup_hw_state(struct drm_device *dev, 146 struct drm_modeset_acquire_ctx *ctx); 147 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 148 149 struct intel_limit { 150 struct { 151 int min, max; 152 } dot, vco, n, m, m1, m2, p, p1; 153 154 struct { 155 int dot_limit; 156 int p2_slow, p2_fast; 157 } p2; 158 }; 159 160 /* returns HPLL frequency in kHz */ 161 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 162 { 163 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 164 165 /* Obtain SKU information */ 166 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 167 CCK_FUSE_HPLL_FREQ_MASK; 168 169 return vco_freq[hpll_freq] * 1000; 170 } 171 172 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 173 const char *name, u32 reg, int ref_freq) 174 { 175 u32 val; 176 int divider; 177 178 val = vlv_cck_read(dev_priv, reg); 179 divider = val & CCK_FREQUENCY_VALUES; 180 181 WARN((val & CCK_FREQUENCY_STATUS) != 182 (divider << CCK_FREQUENCY_STATUS_SHIFT), 183 "%s change in progress\n", name); 184 185 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 186 } 187 188 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 189 const char *name, u32 reg) 190 { 191 int hpll; 192 193 vlv_cck_get(dev_priv); 194 195 if (dev_priv->hpll_freq == 0) 196 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 197 198 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); 199 200 vlv_cck_put(dev_priv); 201 202 return hpll; 203 } 204 205 static void intel_update_czclk(struct drm_i915_private *dev_priv) 206 { 207 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 208 return; 209 210 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 211 CCK_CZ_CLOCK_CONTROL); 212 213 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq); 214 } 215 216 static inline u32 /* units of 100MHz */ 217 intel_fdi_link_freq(struct drm_i915_private *dev_priv, 218 const struct intel_crtc_state *pipe_config) 219 { 220 if (HAS_DDI(dev_priv)) 221 return pipe_config->port_clock; /* SPLL */ 222 else 223 return dev_priv->fdi_pll_freq; 224 } 225 226 static const struct intel_limit intel_limits_i8xx_dac = { 227 .dot = { .min = 25000, .max = 350000 }, 228 .vco = { .min = 908000, .max = 1512000 }, 229 .n = { .min = 2, .max = 16 }, 230 .m = { .min = 96, .max = 140 }, 231 .m1 = { .min = 18, .max = 26 }, 232 .m2 = { .min = 6, .max = 16 }, 233 .p = { .min = 4, .max = 128 }, 234 .p1 = { .min = 2, .max = 33 }, 235 .p2 = { .dot_limit = 165000, 236 .p2_slow = 4, .p2_fast = 2 }, 237 }; 238 239 static const struct intel_limit intel_limits_i8xx_dvo = { 240 .dot = { .min = 25000, .max = 350000 }, 241 .vco = { .min = 908000, .max = 1512000 }, 242 .n = { .min = 2, .max = 16 }, 243 .m = { .min = 96, .max = 140 }, 244 .m1 = { .min = 18, .max = 26 }, 245 .m2 = { .min = 6, .max = 16 }, 246 .p = { .min = 4, .max = 128 }, 247 .p1 = { .min = 2, .max = 33 }, 248 .p2 = { .dot_limit = 165000, 249 .p2_slow = 4, .p2_fast = 4 }, 250 }; 251 252 static const struct intel_limit intel_limits_i8xx_lvds = { 253 .dot = { .min = 25000, .max = 350000 }, 254 .vco = { .min = 908000, .max = 1512000 }, 255 .n = { .min = 2, .max = 16 }, 256 .m = { .min = 96, .max = 140 }, 257 .m1 = { .min = 18, .max = 26 }, 258 .m2 = { .min = 6, .max = 16 }, 259 .p = { .min = 4, .max = 128 }, 260 .p1 = { .min = 1, .max = 6 }, 261 .p2 = { .dot_limit = 165000, 262 .p2_slow = 14, .p2_fast = 7 }, 263 }; 264 265 static const struct intel_limit intel_limits_i9xx_sdvo = { 266 .dot = { .min = 20000, .max = 400000 }, 267 .vco = { .min = 1400000, .max = 2800000 }, 268 .n = { .min = 1, .max = 6 }, 269 .m = { .min = 70, .max = 120 }, 270 .m1 = { .min = 8, .max = 18 }, 271 .m2 = { .min = 3, .max = 7 }, 272 .p = { .min = 5, .max = 80 }, 273 .p1 = { .min = 1, .max = 8 }, 274 .p2 = { .dot_limit = 200000, 275 .p2_slow = 10, .p2_fast = 5 }, 276 }; 277 278 static const struct intel_limit intel_limits_i9xx_lvds = { 279 .dot = { .min = 20000, .max = 400000 }, 280 .vco = { .min = 1400000, .max = 2800000 }, 281 .n = { .min = 1, .max = 6 }, 282 .m = { .min = 70, .max = 120 }, 283 .m1 = { .min = 8, .max = 18 }, 284 .m2 = { .min = 3, .max = 7 }, 285 .p = { .min = 7, .max = 98 }, 286 .p1 = { .min = 1, .max = 8 }, 287 .p2 = { .dot_limit = 112000, 288 .p2_slow = 14, .p2_fast = 7 }, 289 }; 290 291 292 static const struct intel_limit intel_limits_g4x_sdvo = { 293 .dot = { .min = 25000, .max = 270000 }, 294 .vco = { .min = 1750000, .max = 3500000}, 295 .n = { .min = 1, .max = 4 }, 296 .m = { .min = 104, .max = 138 }, 297 .m1 = { .min = 17, .max = 23 }, 298 .m2 = { .min = 5, .max = 11 }, 299 .p = { .min = 10, .max = 30 }, 300 .p1 = { .min = 1, .max = 3}, 301 .p2 = { .dot_limit = 270000, 302 .p2_slow = 10, 303 .p2_fast = 10 304 }, 305 }; 306 307 static const struct intel_limit intel_limits_g4x_hdmi = { 308 .dot = { .min = 22000, .max = 400000 }, 309 .vco = { .min = 1750000, .max = 3500000}, 310 .n = { .min = 1, .max = 4 }, 311 .m = { .min = 104, .max = 138 }, 312 .m1 = { .min = 16, .max = 23 }, 313 .m2 = { .min = 5, .max = 11 }, 314 .p = { .min = 5, .max = 80 }, 315 .p1 = { .min = 1, .max = 8}, 316 .p2 = { .dot_limit = 165000, 317 .p2_slow = 10, .p2_fast = 5 }, 318 }; 319 320 static const struct intel_limit intel_limits_g4x_single_channel_lvds = { 321 .dot = { .min = 20000, .max = 115000 }, 322 .vco = { .min = 1750000, .max = 3500000 }, 323 .n = { .min = 1, .max = 3 }, 324 .m = { .min = 104, .max = 138 }, 325 .m1 = { .min = 17, .max = 23 }, 326 .m2 = { .min = 5, .max = 11 }, 327 .p = { .min = 28, .max = 112 }, 328 .p1 = { .min = 2, .max = 8 }, 329 .p2 = { .dot_limit = 0, 330 .p2_slow = 14, .p2_fast = 14 331 }, 332 }; 333 334 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { 335 .dot = { .min = 80000, .max = 224000 }, 336 .vco = { .min = 1750000, .max = 3500000 }, 337 .n = { .min = 1, .max = 3 }, 338 .m = { .min = 104, .max = 138 }, 339 .m1 = { .min = 17, .max = 23 }, 340 .m2 = { .min = 5, .max = 11 }, 341 .p = { .min = 14, .max = 42 }, 342 .p1 = { .min = 2, .max = 6 }, 343 .p2 = { .dot_limit = 0, 344 .p2_slow = 7, .p2_fast = 7 345 }, 346 }; 347 348 static const struct intel_limit intel_limits_pineview_sdvo = { 349 .dot = { .min = 20000, .max = 400000}, 350 .vco = { .min = 1700000, .max = 3500000 }, 351 /* Pineview's Ncounter is a ring counter */ 352 .n = { .min = 3, .max = 6 }, 353 .m = { .min = 2, .max = 256 }, 354 /* Pineview only has one combined m divider, which we treat as m2. */ 355 .m1 = { .min = 0, .max = 0 }, 356 .m2 = { .min = 0, .max = 254 }, 357 .p = { .min = 5, .max = 80 }, 358 .p1 = { .min = 1, .max = 8 }, 359 .p2 = { .dot_limit = 200000, 360 .p2_slow = 10, .p2_fast = 5 }, 361 }; 362 363 static const struct intel_limit intel_limits_pineview_lvds = { 364 .dot = { .min = 20000, .max = 400000 }, 365 .vco = { .min = 1700000, .max = 3500000 }, 366 .n = { .min = 3, .max = 6 }, 367 .m = { .min = 2, .max = 256 }, 368 .m1 = { .min = 0, .max = 0 }, 369 .m2 = { .min = 0, .max = 254 }, 370 .p = { .min = 7, .max = 112 }, 371 .p1 = { .min = 1, .max = 8 }, 372 .p2 = { .dot_limit = 112000, 373 .p2_slow = 14, .p2_fast = 14 }, 374 }; 375 376 /* Ironlake / Sandybridge 377 * 378 * We calculate clock using (register_value + 2) for N/M1/M2, so here 379 * the range value for them is (actual_value - 2). 380 */ 381 static const struct intel_limit intel_limits_ironlake_dac = { 382 .dot = { .min = 25000, .max = 350000 }, 383 .vco = { .min = 1760000, .max = 3510000 }, 384 .n = { .min = 1, .max = 5 }, 385 .m = { .min = 79, .max = 127 }, 386 .m1 = { .min = 12, .max = 22 }, 387 .m2 = { .min = 5, .max = 9 }, 388 .p = { .min = 5, .max = 80 }, 389 .p1 = { .min = 1, .max = 8 }, 390 .p2 = { .dot_limit = 225000, 391 .p2_slow = 10, .p2_fast = 5 }, 392 }; 393 394 static const struct intel_limit intel_limits_ironlake_single_lvds = { 395 .dot = { .min = 25000, .max = 350000 }, 396 .vco = { .min = 1760000, .max = 3510000 }, 397 .n = { .min = 1, .max = 3 }, 398 .m = { .min = 79, .max = 118 }, 399 .m1 = { .min = 12, .max = 22 }, 400 .m2 = { .min = 5, .max = 9 }, 401 .p = { .min = 28, .max = 112 }, 402 .p1 = { .min = 2, .max = 8 }, 403 .p2 = { .dot_limit = 225000, 404 .p2_slow = 14, .p2_fast = 14 }, 405 }; 406 407 static const struct intel_limit intel_limits_ironlake_dual_lvds = { 408 .dot = { .min = 25000, .max = 350000 }, 409 .vco = { .min = 1760000, .max = 3510000 }, 410 .n = { .min = 1, .max = 3 }, 411 .m = { .min = 79, .max = 127 }, 412 .m1 = { .min = 12, .max = 22 }, 413 .m2 = { .min = 5, .max = 9 }, 414 .p = { .min = 14, .max = 56 }, 415 .p1 = { .min = 2, .max = 8 }, 416 .p2 = { .dot_limit = 225000, 417 .p2_slow = 7, .p2_fast = 7 }, 418 }; 419 420 /* LVDS 100mhz refclk limits. */ 421 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = { 422 .dot = { .min = 25000, .max = 350000 }, 423 .vco = { .min = 1760000, .max = 3510000 }, 424 .n = { .min = 1, .max = 2 }, 425 .m = { .min = 79, .max = 126 }, 426 .m1 = { .min = 12, .max = 22 }, 427 .m2 = { .min = 5, .max = 9 }, 428 .p = { .min = 28, .max = 112 }, 429 .p1 = { .min = 2, .max = 8 }, 430 .p2 = { .dot_limit = 225000, 431 .p2_slow = 14, .p2_fast = 14 }, 432 }; 433 434 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = { 435 .dot = { .min = 25000, .max = 350000 }, 436 .vco = { .min = 1760000, .max = 3510000 }, 437 .n = { .min = 1, .max = 3 }, 438 .m = { .min = 79, .max = 126 }, 439 .m1 = { .min = 12, .max = 22 }, 440 .m2 = { .min = 5, .max = 9 }, 441 .p = { .min = 14, .max = 42 }, 442 .p1 = { .min = 2, .max = 6 }, 443 .p2 = { .dot_limit = 225000, 444 .p2_slow = 7, .p2_fast = 7 }, 445 }; 446 447 static const struct intel_limit intel_limits_vlv = { 448 /* 449 * These are the data rate limits (measured in fast clocks) 450 * since those are the strictest limits we have. The fast 451 * clock and actual rate limits are more relaxed, so checking 452 * them would make no difference. 453 */ 454 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 455 .vco = { .min = 4000000, .max = 6000000 }, 456 .n = { .min = 1, .max = 7 }, 457 .m1 = { .min = 2, .max = 3 }, 458 .m2 = { .min = 11, .max = 156 }, 459 .p1 = { .min = 2, .max = 3 }, 460 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 461 }; 462 463 static const struct intel_limit intel_limits_chv = { 464 /* 465 * These are the data rate limits (measured in fast clocks) 466 * since those are the strictest limits we have. The fast 467 * clock and actual rate limits are more relaxed, so checking 468 * them would make no difference. 469 */ 470 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 471 .vco = { .min = 4800000, .max = 6480000 }, 472 .n = { .min = 1, .max = 1 }, 473 .m1 = { .min = 2, .max = 2 }, 474 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 475 .p1 = { .min = 2, .max = 4 }, 476 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 477 }; 478 479 static const struct intel_limit intel_limits_bxt = { 480 /* FIXME: find real dot limits */ 481 .dot = { .min = 0, .max = INT_MAX }, 482 .vco = { .min = 4800000, .max = 6700000 }, 483 .n = { .min = 1, .max = 1 }, 484 .m1 = { .min = 2, .max = 2 }, 485 /* FIXME: find real m2 limits */ 486 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 487 .p1 = { .min = 2, .max = 4 }, 488 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 489 }; 490 491 /* WA Display #0827: Gen9:all */ 492 static void 493 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) 494 { 495 if (enable) 496 I915_WRITE(CLKGATE_DIS_PSL(pipe), 497 I915_READ(CLKGATE_DIS_PSL(pipe)) | 498 DUPS1_GATING_DIS | DUPS2_GATING_DIS); 499 else 500 I915_WRITE(CLKGATE_DIS_PSL(pipe), 501 I915_READ(CLKGATE_DIS_PSL(pipe)) & 502 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS)); 503 } 504 505 /* Wa_2006604312:icl */ 506 static void 507 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 508 bool enable) 509 { 510 if (enable) 511 I915_WRITE(CLKGATE_DIS_PSL(pipe), 512 I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS); 513 else 514 I915_WRITE(CLKGATE_DIS_PSL(pipe), 515 I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS); 516 } 517 518 static bool 519 needs_modeset(const struct intel_crtc_state *state) 520 { 521 return drm_atomic_crtc_needs_modeset(&state->base); 522 } 523 524 /* 525 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 526 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 527 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 528 * The helpers' return value is the rate of the clock that is fed to the 529 * display engine's pipe which can be the above fast dot clock rate or a 530 * divided-down version of it. 531 */ 532 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 533 static int pnv_calc_dpll_params(int refclk, struct dpll *clock) 534 { 535 clock->m = clock->m2 + 2; 536 clock->p = clock->p1 * clock->p2; 537 if (WARN_ON(clock->n == 0 || clock->p == 0)) 538 return 0; 539 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 540 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 541 542 return clock->dot; 543 } 544 545 static u32 i9xx_dpll_compute_m(struct dpll *dpll) 546 { 547 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 548 } 549 550 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock) 551 { 552 clock->m = i9xx_dpll_compute_m(clock); 553 clock->p = clock->p1 * clock->p2; 554 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 555 return 0; 556 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 557 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 558 559 return clock->dot; 560 } 561 562 static int vlv_calc_dpll_params(int refclk, struct dpll *clock) 563 { 564 clock->m = clock->m1 * clock->m2; 565 clock->p = clock->p1 * clock->p2; 566 if (WARN_ON(clock->n == 0 || clock->p == 0)) 567 return 0; 568 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 569 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 570 571 return clock->dot / 5; 572 } 573 574 int chv_calc_dpll_params(int refclk, struct dpll *clock) 575 { 576 clock->m = clock->m1 * clock->m2; 577 clock->p = clock->p1 * clock->p2; 578 if (WARN_ON(clock->n == 0 || clock->p == 0)) 579 return 0; 580 clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), 581 clock->n << 22); 582 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 583 584 return clock->dot / 5; 585 } 586 587 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 588 589 /* 590 * Returns whether the given set of divisors are valid for a given refclk with 591 * the given connectors. 592 */ 593 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv, 594 const struct intel_limit *limit, 595 const struct dpll *clock) 596 { 597 if (clock->n < limit->n.min || limit->n.max < clock->n) 598 INTELPllInvalid("n out of range\n"); 599 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 600 INTELPllInvalid("p1 out of range\n"); 601 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 602 INTELPllInvalid("m2 out of range\n"); 603 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 604 INTELPllInvalid("m1 out of range\n"); 605 606 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) && 607 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv)) 608 if (clock->m1 <= clock->m2) 609 INTELPllInvalid("m1 <= m2\n"); 610 611 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 612 !IS_GEN9_LP(dev_priv)) { 613 if (clock->p < limit->p.min || limit->p.max < clock->p) 614 INTELPllInvalid("p out of range\n"); 615 if (clock->m < limit->m.min || limit->m.max < clock->m) 616 INTELPllInvalid("m out of range\n"); 617 } 618 619 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 620 INTELPllInvalid("vco out of range\n"); 621 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 622 * connector, etc., rather than just a single range. 623 */ 624 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 625 INTELPllInvalid("dot out of range\n"); 626 627 return true; 628 } 629 630 static int 631 i9xx_select_p2_div(const struct intel_limit *limit, 632 const struct intel_crtc_state *crtc_state, 633 int target) 634 { 635 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 636 637 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 638 /* 639 * For LVDS just rely on its current settings for dual-channel. 640 * We haven't figured out how to reliably set up different 641 * single/dual channel state, if we even can. 642 */ 643 if (intel_is_dual_link_lvds(dev_priv)) 644 return limit->p2.p2_fast; 645 else 646 return limit->p2.p2_slow; 647 } else { 648 if (target < limit->p2.dot_limit) 649 return limit->p2.p2_slow; 650 else 651 return limit->p2.p2_fast; 652 } 653 } 654 655 /* 656 * Returns a set of divisors for the desired target clock with the given 657 * refclk, or FALSE. The returned values represent the clock equation: 658 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 659 * 660 * Target and reference clocks are specified in kHz. 661 * 662 * If match_clock is provided, then best_clock P divider must match the P 663 * divider from @match_clock used for LVDS downclocking. 664 */ 665 static bool 666 i9xx_find_best_dpll(const struct intel_limit *limit, 667 struct intel_crtc_state *crtc_state, 668 int target, int refclk, struct dpll *match_clock, 669 struct dpll *best_clock) 670 { 671 struct drm_device *dev = crtc_state->base.crtc->dev; 672 struct dpll clock; 673 int err = target; 674 675 memset(best_clock, 0, sizeof(*best_clock)); 676 677 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 678 679 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 680 clock.m1++) { 681 for (clock.m2 = limit->m2.min; 682 clock.m2 <= limit->m2.max; clock.m2++) { 683 if (clock.m2 >= clock.m1) 684 break; 685 for (clock.n = limit->n.min; 686 clock.n <= limit->n.max; clock.n++) { 687 for (clock.p1 = limit->p1.min; 688 clock.p1 <= limit->p1.max; clock.p1++) { 689 int this_err; 690 691 i9xx_calc_dpll_params(refclk, &clock); 692 if (!intel_PLL_is_valid(to_i915(dev), 693 limit, 694 &clock)) 695 continue; 696 if (match_clock && 697 clock.p != match_clock->p) 698 continue; 699 700 this_err = abs(clock.dot - target); 701 if (this_err < err) { 702 *best_clock = clock; 703 err = this_err; 704 } 705 } 706 } 707 } 708 } 709 710 return (err != target); 711 } 712 713 /* 714 * Returns a set of divisors for the desired target clock with the given 715 * refclk, or FALSE. The returned values represent the clock equation: 716 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 717 * 718 * Target and reference clocks are specified in kHz. 719 * 720 * If match_clock is provided, then best_clock P divider must match the P 721 * divider from @match_clock used for LVDS downclocking. 722 */ 723 static bool 724 pnv_find_best_dpll(const struct intel_limit *limit, 725 struct intel_crtc_state *crtc_state, 726 int target, int refclk, struct dpll *match_clock, 727 struct dpll *best_clock) 728 { 729 struct drm_device *dev = crtc_state->base.crtc->dev; 730 struct dpll clock; 731 int err = target; 732 733 memset(best_clock, 0, sizeof(*best_clock)); 734 735 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 736 737 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 738 clock.m1++) { 739 for (clock.m2 = limit->m2.min; 740 clock.m2 <= limit->m2.max; clock.m2++) { 741 for (clock.n = limit->n.min; 742 clock.n <= limit->n.max; clock.n++) { 743 for (clock.p1 = limit->p1.min; 744 clock.p1 <= limit->p1.max; clock.p1++) { 745 int this_err; 746 747 pnv_calc_dpll_params(refclk, &clock); 748 if (!intel_PLL_is_valid(to_i915(dev), 749 limit, 750 &clock)) 751 continue; 752 if (match_clock && 753 clock.p != match_clock->p) 754 continue; 755 756 this_err = abs(clock.dot - target); 757 if (this_err < err) { 758 *best_clock = clock; 759 err = this_err; 760 } 761 } 762 } 763 } 764 } 765 766 return (err != target); 767 } 768 769 /* 770 * Returns a set of divisors for the desired target clock with the given 771 * refclk, or FALSE. The returned values represent the clock equation: 772 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 773 * 774 * Target and reference clocks are specified in kHz. 775 * 776 * If match_clock is provided, then best_clock P divider must match the P 777 * divider from @match_clock used for LVDS downclocking. 778 */ 779 static bool 780 g4x_find_best_dpll(const struct intel_limit *limit, 781 struct intel_crtc_state *crtc_state, 782 int target, int refclk, struct dpll *match_clock, 783 struct dpll *best_clock) 784 { 785 struct drm_device *dev = crtc_state->base.crtc->dev; 786 struct dpll clock; 787 int max_n; 788 bool found = false; 789 /* approximately equals target * 0.00585 */ 790 int err_most = (target >> 8) + (target >> 9); 791 792 memset(best_clock, 0, sizeof(*best_clock)); 793 794 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 795 796 max_n = limit->n.max; 797 /* based on hardware requirement, prefer smaller n to precision */ 798 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 799 /* based on hardware requirement, prefere larger m1,m2 */ 800 for (clock.m1 = limit->m1.max; 801 clock.m1 >= limit->m1.min; clock.m1--) { 802 for (clock.m2 = limit->m2.max; 803 clock.m2 >= limit->m2.min; clock.m2--) { 804 for (clock.p1 = limit->p1.max; 805 clock.p1 >= limit->p1.min; clock.p1--) { 806 int this_err; 807 808 i9xx_calc_dpll_params(refclk, &clock); 809 if (!intel_PLL_is_valid(to_i915(dev), 810 limit, 811 &clock)) 812 continue; 813 814 this_err = abs(clock.dot - target); 815 if (this_err < err_most) { 816 *best_clock = clock; 817 err_most = this_err; 818 max_n = clock.n; 819 found = true; 820 } 821 } 822 } 823 } 824 } 825 return found; 826 } 827 828 /* 829 * Check if the calculated PLL configuration is more optimal compared to the 830 * best configuration and error found so far. Return the calculated error. 831 */ 832 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 833 const struct dpll *calculated_clock, 834 const struct dpll *best_clock, 835 unsigned int best_error_ppm, 836 unsigned int *error_ppm) 837 { 838 /* 839 * For CHV ignore the error and consider only the P value. 840 * Prefer a bigger P value based on HW requirements. 841 */ 842 if (IS_CHERRYVIEW(to_i915(dev))) { 843 *error_ppm = 0; 844 845 return calculated_clock->p > best_clock->p; 846 } 847 848 if (WARN_ON_ONCE(!target_freq)) 849 return false; 850 851 *error_ppm = div_u64(1000000ULL * 852 abs(target_freq - calculated_clock->dot), 853 target_freq); 854 /* 855 * Prefer a better P value over a better (smaller) error if the error 856 * is small. Ensure this preference for future configurations too by 857 * setting the error to 0. 858 */ 859 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 860 *error_ppm = 0; 861 862 return true; 863 } 864 865 return *error_ppm + 10 < best_error_ppm; 866 } 867 868 /* 869 * Returns a set of divisors for the desired target clock with the given 870 * refclk, or FALSE. The returned values represent the clock equation: 871 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 872 */ 873 static bool 874 vlv_find_best_dpll(const struct intel_limit *limit, 875 struct intel_crtc_state *crtc_state, 876 int target, int refclk, struct dpll *match_clock, 877 struct dpll *best_clock) 878 { 879 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 880 struct drm_device *dev = crtc->base.dev; 881 struct dpll clock; 882 unsigned int bestppm = 1000000; 883 /* min update 19.2 MHz */ 884 int max_n = min(limit->n.max, refclk / 19200); 885 bool found = false; 886 887 target *= 5; /* fast clock */ 888 889 memset(best_clock, 0, sizeof(*best_clock)); 890 891 /* based on hardware requirement, prefer smaller n to precision */ 892 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 893 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 894 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 895 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 896 clock.p = clock.p1 * clock.p2; 897 /* based on hardware requirement, prefer bigger m1,m2 values */ 898 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 899 unsigned int ppm; 900 901 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 902 refclk * clock.m1); 903 904 vlv_calc_dpll_params(refclk, &clock); 905 906 if (!intel_PLL_is_valid(to_i915(dev), 907 limit, 908 &clock)) 909 continue; 910 911 if (!vlv_PLL_is_optimal(dev, target, 912 &clock, 913 best_clock, 914 bestppm, &ppm)) 915 continue; 916 917 *best_clock = clock; 918 bestppm = ppm; 919 found = true; 920 } 921 } 922 } 923 } 924 925 return found; 926 } 927 928 /* 929 * Returns a set of divisors for the desired target clock with the given 930 * refclk, or FALSE. The returned values represent the clock equation: 931 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 932 */ 933 static bool 934 chv_find_best_dpll(const struct intel_limit *limit, 935 struct intel_crtc_state *crtc_state, 936 int target, int refclk, struct dpll *match_clock, 937 struct dpll *best_clock) 938 { 939 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 940 struct drm_device *dev = crtc->base.dev; 941 unsigned int best_error_ppm; 942 struct dpll clock; 943 u64 m2; 944 int found = false; 945 946 memset(best_clock, 0, sizeof(*best_clock)); 947 best_error_ppm = 1000000; 948 949 /* 950 * Based on hardware doc, the n always set to 1, and m1 always 951 * set to 2. If requires to support 200Mhz refclk, we need to 952 * revisit this because n may not 1 anymore. 953 */ 954 clock.n = 1, clock.m1 = 2; 955 target *= 5; /* fast clock */ 956 957 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 958 for (clock.p2 = limit->p2.p2_fast; 959 clock.p2 >= limit->p2.p2_slow; 960 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 961 unsigned int error_ppm; 962 963 clock.p = clock.p1 * clock.p2; 964 965 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22, 966 refclk * clock.m1); 967 968 if (m2 > INT_MAX/clock.m1) 969 continue; 970 971 clock.m2 = m2; 972 973 chv_calc_dpll_params(refclk, &clock); 974 975 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock)) 976 continue; 977 978 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 979 best_error_ppm, &error_ppm)) 980 continue; 981 982 *best_clock = clock; 983 best_error_ppm = error_ppm; 984 found = true; 985 } 986 } 987 988 return found; 989 } 990 991 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, 992 struct dpll *best_clock) 993 { 994 int refclk = 100000; 995 const struct intel_limit *limit = &intel_limits_bxt; 996 997 return chv_find_best_dpll(limit, crtc_state, 998 crtc_state->port_clock, refclk, 999 NULL, best_clock); 1000 } 1001 1002 bool intel_crtc_active(struct intel_crtc *crtc) 1003 { 1004 /* Be paranoid as we can arrive here with only partial 1005 * state retrieved from the hardware during setup. 1006 * 1007 * We can ditch the adjusted_mode.crtc_clock check as soon 1008 * as Haswell has gained clock readout/fastboot support. 1009 * 1010 * We can ditch the crtc->primary->state->fb check as soon as we can 1011 * properly reconstruct framebuffers. 1012 * 1013 * FIXME: The intel_crtc->active here should be switched to 1014 * crtc->state->active once we have proper CRTC states wired up 1015 * for atomic. 1016 */ 1017 return crtc->active && crtc->base.primary->state->fb && 1018 crtc->config->base.adjusted_mode.crtc_clock; 1019 } 1020 1021 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 1022 enum pipe pipe) 1023 { 1024 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1025 1026 return crtc->config->cpu_transcoder; 1027 } 1028 1029 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, 1030 enum pipe pipe) 1031 { 1032 i915_reg_t reg = PIPEDSL(pipe); 1033 u32 line1, line2; 1034 u32 line_mask; 1035 1036 if (IS_GEN(dev_priv, 2)) 1037 line_mask = DSL_LINEMASK_GEN2; 1038 else 1039 line_mask = DSL_LINEMASK_GEN3; 1040 1041 line1 = I915_READ(reg) & line_mask; 1042 msleep(5); 1043 line2 = I915_READ(reg) & line_mask; 1044 1045 return line1 != line2; 1046 } 1047 1048 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state) 1049 { 1050 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1051 enum pipe pipe = crtc->pipe; 1052 1053 /* Wait for the display line to settle/start moving */ 1054 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100)) 1055 DRM_ERROR("pipe %c scanline %s wait timed out\n", 1056 pipe_name(pipe), onoff(state)); 1057 } 1058 1059 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc) 1060 { 1061 wait_for_pipe_scanline_moving(crtc, false); 1062 } 1063 1064 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) 1065 { 1066 wait_for_pipe_scanline_moving(crtc, true); 1067 } 1068 1069 static void 1070 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 1071 { 1072 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 1073 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1074 1075 if (INTEL_GEN(dev_priv) >= 4) { 1076 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 1077 i915_reg_t reg = PIPECONF(cpu_transcoder); 1078 1079 /* Wait for the Pipe State to go off */ 1080 if (intel_de_wait_for_clear(dev_priv, reg, 1081 I965_PIPECONF_ACTIVE, 100)) 1082 WARN(1, "pipe_off wait timed out\n"); 1083 } else { 1084 intel_wait_for_pipe_scanline_stopped(crtc); 1085 } 1086 } 1087 1088 /* Only for pre-ILK configs */ 1089 void assert_pll(struct drm_i915_private *dev_priv, 1090 enum pipe pipe, bool state) 1091 { 1092 u32 val; 1093 bool cur_state; 1094 1095 val = I915_READ(DPLL(pipe)); 1096 cur_state = !!(val & DPLL_VCO_ENABLE); 1097 I915_STATE_WARN(cur_state != state, 1098 "PLL state assertion failure (expected %s, current %s)\n", 1099 onoff(state), onoff(cur_state)); 1100 } 1101 1102 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1103 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1104 { 1105 u32 val; 1106 bool cur_state; 1107 1108 vlv_cck_get(dev_priv); 1109 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1110 vlv_cck_put(dev_priv); 1111 1112 cur_state = val & DSI_PLL_VCO_EN; 1113 I915_STATE_WARN(cur_state != state, 1114 "DSI PLL state assertion failure (expected %s, current %s)\n", 1115 onoff(state), onoff(cur_state)); 1116 } 1117 1118 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1119 enum pipe pipe, bool state) 1120 { 1121 bool cur_state; 1122 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1123 pipe); 1124 1125 if (HAS_DDI(dev_priv)) { 1126 /* DDI does not have a specific FDI_TX register */ 1127 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1128 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1129 } else { 1130 u32 val = I915_READ(FDI_TX_CTL(pipe)); 1131 cur_state = !!(val & FDI_TX_ENABLE); 1132 } 1133 I915_STATE_WARN(cur_state != state, 1134 "FDI TX state assertion failure (expected %s, current %s)\n", 1135 onoff(state), onoff(cur_state)); 1136 } 1137 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1138 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1139 1140 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1141 enum pipe pipe, bool state) 1142 { 1143 u32 val; 1144 bool cur_state; 1145 1146 val = I915_READ(FDI_RX_CTL(pipe)); 1147 cur_state = !!(val & FDI_RX_ENABLE); 1148 I915_STATE_WARN(cur_state != state, 1149 "FDI RX state assertion failure (expected %s, current %s)\n", 1150 onoff(state), onoff(cur_state)); 1151 } 1152 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1153 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1154 1155 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1156 enum pipe pipe) 1157 { 1158 u32 val; 1159 1160 /* ILK FDI PLL is always enabled */ 1161 if (IS_GEN(dev_priv, 5)) 1162 return; 1163 1164 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1165 if (HAS_DDI(dev_priv)) 1166 return; 1167 1168 val = I915_READ(FDI_TX_CTL(pipe)); 1169 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1170 } 1171 1172 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1173 enum pipe pipe, bool state) 1174 { 1175 u32 val; 1176 bool cur_state; 1177 1178 val = I915_READ(FDI_RX_CTL(pipe)); 1179 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1180 I915_STATE_WARN(cur_state != state, 1181 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1182 onoff(state), onoff(cur_state)); 1183 } 1184 1185 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) 1186 { 1187 i915_reg_t pp_reg; 1188 u32 val; 1189 enum pipe panel_pipe = INVALID_PIPE; 1190 bool locked = true; 1191 1192 if (WARN_ON(HAS_DDI(dev_priv))) 1193 return; 1194 1195 if (HAS_PCH_SPLIT(dev_priv)) { 1196 u32 port_sel; 1197 1198 pp_reg = PP_CONTROL(0); 1199 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1200 1201 switch (port_sel) { 1202 case PANEL_PORT_SELECT_LVDS: 1203 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe); 1204 break; 1205 case PANEL_PORT_SELECT_DPA: 1206 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe); 1207 break; 1208 case PANEL_PORT_SELECT_DPC: 1209 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe); 1210 break; 1211 case PANEL_PORT_SELECT_DPD: 1212 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe); 1213 break; 1214 default: 1215 MISSING_CASE(port_sel); 1216 break; 1217 } 1218 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1219 /* presumably write lock depends on pipe, not port select */ 1220 pp_reg = PP_CONTROL(pipe); 1221 panel_pipe = pipe; 1222 } else { 1223 u32 port_sel; 1224 1225 pp_reg = PP_CONTROL(0); 1226 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1227 1228 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS); 1229 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe); 1230 } 1231 1232 val = I915_READ(pp_reg); 1233 if (!(val & PANEL_POWER_ON) || 1234 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1235 locked = false; 1236 1237 I915_STATE_WARN(panel_pipe == pipe && locked, 1238 "panel assertion failure, pipe %c regs locked\n", 1239 pipe_name(pipe)); 1240 } 1241 1242 void assert_pipe(struct drm_i915_private *dev_priv, 1243 enum pipe pipe, bool state) 1244 { 1245 bool cur_state; 1246 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1247 pipe); 1248 enum intel_display_power_domain power_domain; 1249 intel_wakeref_t wakeref; 1250 1251 /* we keep both pipes enabled on 830 */ 1252 if (IS_I830(dev_priv)) 1253 state = true; 1254 1255 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 1256 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 1257 if (wakeref) { 1258 u32 val = I915_READ(PIPECONF(cpu_transcoder)); 1259 cur_state = !!(val & PIPECONF_ENABLE); 1260 1261 intel_display_power_put(dev_priv, power_domain, wakeref); 1262 } else { 1263 cur_state = false; 1264 } 1265 1266 I915_STATE_WARN(cur_state != state, 1267 "pipe %c assertion failure (expected %s, current %s)\n", 1268 pipe_name(pipe), onoff(state), onoff(cur_state)); 1269 } 1270 1271 static void assert_plane(struct intel_plane *plane, bool state) 1272 { 1273 enum pipe pipe; 1274 bool cur_state; 1275 1276 cur_state = plane->get_hw_state(plane, &pipe); 1277 1278 I915_STATE_WARN(cur_state != state, 1279 "%s assertion failure (expected %s, current %s)\n", 1280 plane->base.name, onoff(state), onoff(cur_state)); 1281 } 1282 1283 #define assert_plane_enabled(p) assert_plane(p, true) 1284 #define assert_plane_disabled(p) assert_plane(p, false) 1285 1286 static void assert_planes_disabled(struct intel_crtc *crtc) 1287 { 1288 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1289 struct intel_plane *plane; 1290 1291 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 1292 assert_plane_disabled(plane); 1293 } 1294 1295 static void assert_vblank_disabled(struct drm_crtc *crtc) 1296 { 1297 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1298 drm_crtc_vblank_put(crtc); 1299 } 1300 1301 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1302 enum pipe pipe) 1303 { 1304 u32 val; 1305 bool enabled; 1306 1307 val = I915_READ(PCH_TRANSCONF(pipe)); 1308 enabled = !!(val & TRANS_ENABLE); 1309 I915_STATE_WARN(enabled, 1310 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1311 pipe_name(pipe)); 1312 } 1313 1314 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1315 enum pipe pipe, enum port port, 1316 i915_reg_t dp_reg) 1317 { 1318 enum pipe port_pipe; 1319 bool state; 1320 1321 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); 1322 1323 I915_STATE_WARN(state && port_pipe == pipe, 1324 "PCH DP %c enabled on transcoder %c, should be disabled\n", 1325 port_name(port), pipe_name(pipe)); 1326 1327 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 1328 "IBX PCH DP %c still using transcoder B\n", 1329 port_name(port)); 1330 } 1331 1332 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1333 enum pipe pipe, enum port port, 1334 i915_reg_t hdmi_reg) 1335 { 1336 enum pipe port_pipe; 1337 bool state; 1338 1339 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); 1340 1341 I915_STATE_WARN(state && port_pipe == pipe, 1342 "PCH HDMI %c enabled on transcoder %c, should be disabled\n", 1343 port_name(port), pipe_name(pipe)); 1344 1345 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 1346 "IBX PCH HDMI %c still using transcoder B\n", 1347 port_name(port)); 1348 } 1349 1350 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1351 enum pipe pipe) 1352 { 1353 enum pipe port_pipe; 1354 1355 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); 1356 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); 1357 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); 1358 1359 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && 1360 port_pipe == pipe, 1361 "PCH VGA enabled on transcoder %c, should be disabled\n", 1362 pipe_name(pipe)); 1363 1364 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && 1365 port_pipe == pipe, 1366 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1367 pipe_name(pipe)); 1368 1369 /* PCH SDVOB multiplex with HDMIB */ 1370 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); 1371 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); 1372 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); 1373 } 1374 1375 static void _vlv_enable_pll(struct intel_crtc *crtc, 1376 const struct intel_crtc_state *pipe_config) 1377 { 1378 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1379 enum pipe pipe = crtc->pipe; 1380 1381 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1382 POSTING_READ(DPLL(pipe)); 1383 udelay(150); 1384 1385 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) 1386 DRM_ERROR("DPLL %d failed to lock\n", pipe); 1387 } 1388 1389 static void vlv_enable_pll(struct intel_crtc *crtc, 1390 const struct intel_crtc_state *pipe_config) 1391 { 1392 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1393 enum pipe pipe = crtc->pipe; 1394 1395 assert_pipe_disabled(dev_priv, pipe); 1396 1397 /* PLL is protected by panel, make sure we can write it */ 1398 assert_panel_unlocked(dev_priv, pipe); 1399 1400 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1401 _vlv_enable_pll(crtc, pipe_config); 1402 1403 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1404 POSTING_READ(DPLL_MD(pipe)); 1405 } 1406 1407 1408 static void _chv_enable_pll(struct intel_crtc *crtc, 1409 const struct intel_crtc_state *pipe_config) 1410 { 1411 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1412 enum pipe pipe = crtc->pipe; 1413 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1414 u32 tmp; 1415 1416 vlv_dpio_get(dev_priv); 1417 1418 /* Enable back the 10bit clock to display controller */ 1419 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1420 tmp |= DPIO_DCLKP_EN; 1421 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1422 1423 vlv_dpio_put(dev_priv); 1424 1425 /* 1426 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1427 */ 1428 udelay(1); 1429 1430 /* Enable PLL */ 1431 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1432 1433 /* Check PLL is locked */ 1434 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) 1435 DRM_ERROR("PLL %d failed to lock\n", pipe); 1436 } 1437 1438 static void chv_enable_pll(struct intel_crtc *crtc, 1439 const struct intel_crtc_state *pipe_config) 1440 { 1441 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1442 enum pipe pipe = crtc->pipe; 1443 1444 assert_pipe_disabled(dev_priv, pipe); 1445 1446 /* PLL is protected by panel, make sure we can write it */ 1447 assert_panel_unlocked(dev_priv, pipe); 1448 1449 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1450 _chv_enable_pll(crtc, pipe_config); 1451 1452 if (pipe != PIPE_A) { 1453 /* 1454 * WaPixelRepeatModeFixForC0:chv 1455 * 1456 * DPLLCMD is AWOL. Use chicken bits to propagate 1457 * the value from DPLLBMD to either pipe B or C. 1458 */ 1459 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); 1460 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md); 1461 I915_WRITE(CBR4_VLV, 0); 1462 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; 1463 1464 /* 1465 * DPLLB VGA mode also seems to cause problems. 1466 * We should always have it disabled. 1467 */ 1468 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0); 1469 } else { 1470 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1471 POSTING_READ(DPLL_MD(pipe)); 1472 } 1473 } 1474 1475 static bool i9xx_has_pps(struct drm_i915_private *dev_priv) 1476 { 1477 if (IS_I830(dev_priv)) 1478 return false; 1479 1480 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 1481 } 1482 1483 static void i9xx_enable_pll(struct intel_crtc *crtc, 1484 const struct intel_crtc_state *crtc_state) 1485 { 1486 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1487 i915_reg_t reg = DPLL(crtc->pipe); 1488 u32 dpll = crtc_state->dpll_hw_state.dpll; 1489 int i; 1490 1491 assert_pipe_disabled(dev_priv, crtc->pipe); 1492 1493 /* PLL is protected by panel, make sure we can write it */ 1494 if (i9xx_has_pps(dev_priv)) 1495 assert_panel_unlocked(dev_priv, crtc->pipe); 1496 1497 /* 1498 * Apparently we need to have VGA mode enabled prior to changing 1499 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1500 * dividers, even though the register value does change. 1501 */ 1502 I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS); 1503 I915_WRITE(reg, dpll); 1504 1505 /* Wait for the clocks to stabilize. */ 1506 POSTING_READ(reg); 1507 udelay(150); 1508 1509 if (INTEL_GEN(dev_priv) >= 4) { 1510 I915_WRITE(DPLL_MD(crtc->pipe), 1511 crtc_state->dpll_hw_state.dpll_md); 1512 } else { 1513 /* The pixel multiplier can only be updated once the 1514 * DPLL is enabled and the clocks are stable. 1515 * 1516 * So write it again. 1517 */ 1518 I915_WRITE(reg, dpll); 1519 } 1520 1521 /* We do this three times for luck */ 1522 for (i = 0; i < 3; i++) { 1523 I915_WRITE(reg, dpll); 1524 POSTING_READ(reg); 1525 udelay(150); /* wait for warmup */ 1526 } 1527 } 1528 1529 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) 1530 { 1531 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1532 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1533 enum pipe pipe = crtc->pipe; 1534 1535 /* Don't disable pipe or pipe PLLs if needed */ 1536 if (IS_I830(dev_priv)) 1537 return; 1538 1539 /* Make sure the pipe isn't still relying on us */ 1540 assert_pipe_disabled(dev_priv, pipe); 1541 1542 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 1543 POSTING_READ(DPLL(pipe)); 1544 } 1545 1546 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1547 { 1548 u32 val; 1549 1550 /* Make sure the pipe isn't still relying on us */ 1551 assert_pipe_disabled(dev_priv, pipe); 1552 1553 val = DPLL_INTEGRATED_REF_CLK_VLV | 1554 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1555 if (pipe != PIPE_A) 1556 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1557 1558 I915_WRITE(DPLL(pipe), val); 1559 POSTING_READ(DPLL(pipe)); 1560 } 1561 1562 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1563 { 1564 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1565 u32 val; 1566 1567 /* Make sure the pipe isn't still relying on us */ 1568 assert_pipe_disabled(dev_priv, pipe); 1569 1570 val = DPLL_SSC_REF_CLK_CHV | 1571 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1572 if (pipe != PIPE_A) 1573 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1574 1575 I915_WRITE(DPLL(pipe), val); 1576 POSTING_READ(DPLL(pipe)); 1577 1578 vlv_dpio_get(dev_priv); 1579 1580 /* Disable 10bit clock to display controller */ 1581 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1582 val &= ~DPIO_DCLKP_EN; 1583 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1584 1585 vlv_dpio_put(dev_priv); 1586 } 1587 1588 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1589 struct intel_digital_port *dport, 1590 unsigned int expected_mask) 1591 { 1592 u32 port_mask; 1593 i915_reg_t dpll_reg; 1594 1595 switch (dport->base.port) { 1596 case PORT_B: 1597 port_mask = DPLL_PORTB_READY_MASK; 1598 dpll_reg = DPLL(0); 1599 break; 1600 case PORT_C: 1601 port_mask = DPLL_PORTC_READY_MASK; 1602 dpll_reg = DPLL(0); 1603 expected_mask <<= 4; 1604 break; 1605 case PORT_D: 1606 port_mask = DPLL_PORTD_READY_MASK; 1607 dpll_reg = DPIO_PHY_STATUS; 1608 break; 1609 default: 1610 BUG(); 1611 } 1612 1613 if (intel_de_wait_for_register(dev_priv, dpll_reg, 1614 port_mask, expected_mask, 1000)) 1615 WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", 1616 dport->base.base.base.id, dport->base.base.name, 1617 I915_READ(dpll_reg) & port_mask, expected_mask); 1618 } 1619 1620 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) 1621 { 1622 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1623 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1624 enum pipe pipe = crtc->pipe; 1625 i915_reg_t reg; 1626 u32 val, pipeconf_val; 1627 1628 /* Make sure PCH DPLL is enabled */ 1629 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); 1630 1631 /* FDI must be feeding us bits for PCH ports */ 1632 assert_fdi_tx_enabled(dev_priv, pipe); 1633 assert_fdi_rx_enabled(dev_priv, pipe); 1634 1635 if (HAS_PCH_CPT(dev_priv)) { 1636 /* Workaround: Set the timing override bit before enabling the 1637 * pch transcoder. */ 1638 reg = TRANS_CHICKEN2(pipe); 1639 val = I915_READ(reg); 1640 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1641 I915_WRITE(reg, val); 1642 } 1643 1644 reg = PCH_TRANSCONF(pipe); 1645 val = I915_READ(reg); 1646 pipeconf_val = I915_READ(PIPECONF(pipe)); 1647 1648 if (HAS_PCH_IBX(dev_priv)) { 1649 /* 1650 * Make the BPC in transcoder be consistent with 1651 * that in pipeconf reg. For HDMI we must use 8bpc 1652 * here for both 8bpc and 12bpc. 1653 */ 1654 val &= ~PIPECONF_BPC_MASK; 1655 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 1656 val |= PIPECONF_8BPC; 1657 else 1658 val |= pipeconf_val & PIPECONF_BPC_MASK; 1659 } 1660 1661 val &= ~TRANS_INTERLACE_MASK; 1662 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { 1663 if (HAS_PCH_IBX(dev_priv) && 1664 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 1665 val |= TRANS_LEGACY_INTERLACED_ILK; 1666 else 1667 val |= TRANS_INTERLACED; 1668 } else { 1669 val |= TRANS_PROGRESSIVE; 1670 } 1671 1672 I915_WRITE(reg, val | TRANS_ENABLE); 1673 if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100)) 1674 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 1675 } 1676 1677 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1678 enum transcoder cpu_transcoder) 1679 { 1680 u32 val, pipeconf_val; 1681 1682 /* FDI must be feeding us bits for PCH ports */ 1683 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); 1684 assert_fdi_rx_enabled(dev_priv, PIPE_A); 1685 1686 /* Workaround: set timing override bit. */ 1687 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1688 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1689 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1690 1691 val = TRANS_ENABLE; 1692 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 1693 1694 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 1695 PIPECONF_INTERLACED_ILK) 1696 val |= TRANS_INTERLACED; 1697 else 1698 val |= TRANS_PROGRESSIVE; 1699 1700 I915_WRITE(LPT_TRANSCONF, val); 1701 if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, 1702 TRANS_STATE_ENABLE, 100)) 1703 DRM_ERROR("Failed to enable PCH transcoder\n"); 1704 } 1705 1706 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1707 enum pipe pipe) 1708 { 1709 i915_reg_t reg; 1710 u32 val; 1711 1712 /* FDI relies on the transcoder */ 1713 assert_fdi_tx_disabled(dev_priv, pipe); 1714 assert_fdi_rx_disabled(dev_priv, pipe); 1715 1716 /* Ports must be off as well */ 1717 assert_pch_ports_disabled(dev_priv, pipe); 1718 1719 reg = PCH_TRANSCONF(pipe); 1720 val = I915_READ(reg); 1721 val &= ~TRANS_ENABLE; 1722 I915_WRITE(reg, val); 1723 /* wait for PCH transcoder off, transcoder state */ 1724 if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) 1725 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 1726 1727 if (HAS_PCH_CPT(dev_priv)) { 1728 /* Workaround: Clear the timing override chicken bit again. */ 1729 reg = TRANS_CHICKEN2(pipe); 1730 val = I915_READ(reg); 1731 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1732 I915_WRITE(reg, val); 1733 } 1734 } 1735 1736 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 1737 { 1738 u32 val; 1739 1740 val = I915_READ(LPT_TRANSCONF); 1741 val &= ~TRANS_ENABLE; 1742 I915_WRITE(LPT_TRANSCONF, val); 1743 /* wait for PCH transcoder off, transcoder state */ 1744 if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, 1745 TRANS_STATE_ENABLE, 50)) 1746 DRM_ERROR("Failed to disable PCH transcoder\n"); 1747 1748 /* Workaround: clear timing override bit. */ 1749 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1750 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1751 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1752 } 1753 1754 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) 1755 { 1756 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1757 1758 if (HAS_PCH_LPT(dev_priv)) 1759 return PIPE_A; 1760 else 1761 return crtc->pipe; 1762 } 1763 1764 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state) 1765 { 1766 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 1767 1768 /* 1769 * On i965gm the hardware frame counter reads 1770 * zero when the TV encoder is enabled :( 1771 */ 1772 if (IS_I965GM(dev_priv) && 1773 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT))) 1774 return 0; 1775 1776 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1777 return 0xffffffff; /* full 32 bit counter */ 1778 else if (INTEL_GEN(dev_priv) >= 3) 1779 return 0xffffff; /* only 24 bits of frame count */ 1780 else 1781 return 0; /* Gen2 doesn't have a hardware frame counter */ 1782 } 1783 1784 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) 1785 { 1786 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1787 1788 drm_crtc_set_max_vblank_count(&crtc->base, 1789 intel_crtc_max_vblank_count(crtc_state)); 1790 drm_crtc_vblank_on(&crtc->base); 1791 } 1792 1793 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) 1794 { 1795 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 1796 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1797 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 1798 enum pipe pipe = crtc->pipe; 1799 i915_reg_t reg; 1800 u32 val; 1801 1802 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); 1803 1804 assert_planes_disabled(crtc); 1805 1806 /* 1807 * A pipe without a PLL won't actually be able to drive bits from 1808 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1809 * need the check. 1810 */ 1811 if (HAS_GMCH(dev_priv)) { 1812 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 1813 assert_dsi_pll_enabled(dev_priv); 1814 else 1815 assert_pll_enabled(dev_priv, pipe); 1816 } else { 1817 if (new_crtc_state->has_pch_encoder) { 1818 /* if driving the PCH, we need FDI enabled */ 1819 assert_fdi_rx_pll_enabled(dev_priv, 1820 intel_crtc_pch_transcoder(crtc)); 1821 assert_fdi_tx_pll_enabled(dev_priv, 1822 (enum pipe) cpu_transcoder); 1823 } 1824 /* FIXME: assert CPU port conditions for SNB+ */ 1825 } 1826 1827 trace_intel_pipe_enable(crtc); 1828 1829 reg = PIPECONF(cpu_transcoder); 1830 val = I915_READ(reg); 1831 if (val & PIPECONF_ENABLE) { 1832 /* we keep both pipes enabled on 830 */ 1833 WARN_ON(!IS_I830(dev_priv)); 1834 return; 1835 } 1836 1837 I915_WRITE(reg, val | PIPECONF_ENABLE); 1838 POSTING_READ(reg); 1839 1840 /* 1841 * Until the pipe starts PIPEDSL reads will return a stale value, 1842 * which causes an apparent vblank timestamp jump when PIPEDSL 1843 * resets to its proper value. That also messes up the frame count 1844 * when it's derived from the timestamps. So let's wait for the 1845 * pipe to start properly before we call drm_crtc_vblank_on() 1846 */ 1847 if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 1848 intel_wait_for_pipe_scanline_moving(crtc); 1849 } 1850 1851 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) 1852 { 1853 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 1854 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1855 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 1856 enum pipe pipe = crtc->pipe; 1857 i915_reg_t reg; 1858 u32 val; 1859 1860 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); 1861 1862 /* 1863 * Make sure planes won't keep trying to pump pixels to us, 1864 * or we might hang the display. 1865 */ 1866 assert_planes_disabled(crtc); 1867 1868 trace_intel_pipe_disable(crtc); 1869 1870 reg = PIPECONF(cpu_transcoder); 1871 val = I915_READ(reg); 1872 if ((val & PIPECONF_ENABLE) == 0) 1873 return; 1874 1875 /* 1876 * Double wide has implications for planes 1877 * so best keep it disabled when not needed. 1878 */ 1879 if (old_crtc_state->double_wide) 1880 val &= ~PIPECONF_DOUBLE_WIDE; 1881 1882 /* Don't disable pipe or pipe PLLs if needed */ 1883 if (!IS_I830(dev_priv)) 1884 val &= ~PIPECONF_ENABLE; 1885 1886 I915_WRITE(reg, val); 1887 if ((val & PIPECONF_ENABLE) == 0) 1888 intel_wait_for_pipe_off(old_crtc_state); 1889 } 1890 1891 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) 1892 { 1893 return IS_GEN(dev_priv, 2) ? 2048 : 4096; 1894 } 1895 1896 static unsigned int 1897 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) 1898 { 1899 struct drm_i915_private *dev_priv = to_i915(fb->dev); 1900 unsigned int cpp = fb->format->cpp[color_plane]; 1901 1902 switch (fb->modifier) { 1903 case DRM_FORMAT_MOD_LINEAR: 1904 return intel_tile_size(dev_priv); 1905 case I915_FORMAT_MOD_X_TILED: 1906 if (IS_GEN(dev_priv, 2)) 1907 return 128; 1908 else 1909 return 512; 1910 case I915_FORMAT_MOD_Y_TILED_CCS: 1911 if (color_plane == 1) 1912 return 128; 1913 /* fall through */ 1914 case I915_FORMAT_MOD_Y_TILED: 1915 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv)) 1916 return 128; 1917 else 1918 return 512; 1919 case I915_FORMAT_MOD_Yf_TILED_CCS: 1920 if (color_plane == 1) 1921 return 128; 1922 /* fall through */ 1923 case I915_FORMAT_MOD_Yf_TILED: 1924 switch (cpp) { 1925 case 1: 1926 return 64; 1927 case 2: 1928 case 4: 1929 return 128; 1930 case 8: 1931 case 16: 1932 return 256; 1933 default: 1934 MISSING_CASE(cpp); 1935 return cpp; 1936 } 1937 break; 1938 default: 1939 MISSING_CASE(fb->modifier); 1940 return cpp; 1941 } 1942 } 1943 1944 static unsigned int 1945 intel_tile_height(const struct drm_framebuffer *fb, int color_plane) 1946 { 1947 return intel_tile_size(to_i915(fb->dev)) / 1948 intel_tile_width_bytes(fb, color_plane); 1949 } 1950 1951 /* Return the tile dimensions in pixel units */ 1952 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane, 1953 unsigned int *tile_width, 1954 unsigned int *tile_height) 1955 { 1956 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane); 1957 unsigned int cpp = fb->format->cpp[color_plane]; 1958 1959 *tile_width = tile_width_bytes / cpp; 1960 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes; 1961 } 1962 1963 unsigned int 1964 intel_fb_align_height(const struct drm_framebuffer *fb, 1965 int color_plane, unsigned int height) 1966 { 1967 unsigned int tile_height = intel_tile_height(fb, color_plane); 1968 1969 return ALIGN(height, tile_height); 1970 } 1971 1972 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 1973 { 1974 unsigned int size = 0; 1975 int i; 1976 1977 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 1978 size += rot_info->plane[i].width * rot_info->plane[i].height; 1979 1980 return size; 1981 } 1982 1983 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info) 1984 { 1985 unsigned int size = 0; 1986 int i; 1987 1988 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) 1989 size += rem_info->plane[i].width * rem_info->plane[i].height; 1990 1991 return size; 1992 } 1993 1994 static void 1995 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, 1996 const struct drm_framebuffer *fb, 1997 unsigned int rotation) 1998 { 1999 view->type = I915_GGTT_VIEW_NORMAL; 2000 if (drm_rotation_90_or_270(rotation)) { 2001 view->type = I915_GGTT_VIEW_ROTATED; 2002 view->rotated = to_intel_framebuffer(fb)->rot_info; 2003 } 2004 } 2005 2006 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv) 2007 { 2008 if (IS_I830(dev_priv)) 2009 return 16 * 1024; 2010 else if (IS_I85X(dev_priv)) 2011 return 256; 2012 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) 2013 return 32; 2014 else 2015 return 4 * 1024; 2016 } 2017 2018 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) 2019 { 2020 if (INTEL_GEN(dev_priv) >= 9) 2021 return 256 * 1024; 2022 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || 2023 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2024 return 128 * 1024; 2025 else if (INTEL_GEN(dev_priv) >= 4) 2026 return 4 * 1024; 2027 else 2028 return 0; 2029 } 2030 2031 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, 2032 int color_plane) 2033 { 2034 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2035 2036 /* AUX_DIST needs only 4K alignment */ 2037 if (color_plane == 1) 2038 return 4096; 2039 2040 switch (fb->modifier) { 2041 case DRM_FORMAT_MOD_LINEAR: 2042 return intel_linear_alignment(dev_priv); 2043 case I915_FORMAT_MOD_X_TILED: 2044 if (INTEL_GEN(dev_priv) >= 9) 2045 return 256 * 1024; 2046 return 0; 2047 case I915_FORMAT_MOD_Y_TILED_CCS: 2048 case I915_FORMAT_MOD_Yf_TILED_CCS: 2049 case I915_FORMAT_MOD_Y_TILED: 2050 case I915_FORMAT_MOD_Yf_TILED: 2051 return 1 * 1024 * 1024; 2052 default: 2053 MISSING_CASE(fb->modifier); 2054 return 0; 2055 } 2056 } 2057 2058 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) 2059 { 2060 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 2061 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 2062 2063 return INTEL_GEN(dev_priv) < 4 || 2064 (plane->has_fbc && 2065 plane_state->view.type == I915_GGTT_VIEW_NORMAL); 2066 } 2067 2068 struct i915_vma * 2069 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 2070 const struct i915_ggtt_view *view, 2071 bool uses_fence, 2072 unsigned long *out_flags) 2073 { 2074 struct drm_device *dev = fb->dev; 2075 struct drm_i915_private *dev_priv = to_i915(dev); 2076 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2077 intel_wakeref_t wakeref; 2078 struct i915_vma *vma; 2079 unsigned int pinctl; 2080 u32 alignment; 2081 2082 if (WARN_ON(!i915_gem_object_is_framebuffer(obj))) 2083 return ERR_PTR(-EINVAL); 2084 2085 alignment = intel_surf_alignment(fb, 0); 2086 2087 /* Note that the w/a also requires 64 PTE of padding following the 2088 * bo. We currently fill all unused PTE with the shadow page and so 2089 * we should always have valid PTE following the scanout preventing 2090 * the VT-d warning. 2091 */ 2092 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) 2093 alignment = 256 * 1024; 2094 2095 /* 2096 * Global gtt pte registers are special registers which actually forward 2097 * writes to a chunk of system memory. Which means that there is no risk 2098 * that the register values disappear as soon as we call 2099 * intel_runtime_pm_put(), so it is correct to wrap only the 2100 * pin/unpin/fence and not more. 2101 */ 2102 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2103 i915_gem_object_lock(obj); 2104 2105 atomic_inc(&dev_priv->gpu_error.pending_fb_pin); 2106 2107 pinctl = 0; 2108 2109 /* Valleyview is definitely limited to scanning out the first 2110 * 512MiB. Lets presume this behaviour was inherited from the 2111 * g4x display engine and that all earlier gen are similarly 2112 * limited. Testing suggests that it is a little more 2113 * complicated than this. For example, Cherryview appears quite 2114 * happy to scanout from anywhere within its global aperture. 2115 */ 2116 if (HAS_GMCH(dev_priv)) 2117 pinctl |= PIN_MAPPABLE; 2118 2119 vma = i915_gem_object_pin_to_display_plane(obj, 2120 alignment, view, pinctl); 2121 if (IS_ERR(vma)) 2122 goto err; 2123 2124 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) { 2125 int ret; 2126 2127 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2128 * fence, whereas 965+ only requires a fence if using 2129 * framebuffer compression. For simplicity, we always, when 2130 * possible, install a fence as the cost is not that onerous. 2131 * 2132 * If we fail to fence the tiled scanout, then either the 2133 * modeset will reject the change (which is highly unlikely as 2134 * the affected systems, all but one, do not have unmappable 2135 * space) or we will not be able to enable full powersaving 2136 * techniques (also likely not to apply due to various limits 2137 * FBC and the like impose on the size of the buffer, which 2138 * presumably we violated anyway with this unmappable buffer). 2139 * Anyway, it is presumably better to stumble onwards with 2140 * something and try to run the system in a "less than optimal" 2141 * mode that matches the user configuration. 2142 */ 2143 ret = i915_vma_pin_fence(vma); 2144 if (ret != 0 && INTEL_GEN(dev_priv) < 4) { 2145 i915_gem_object_unpin_from_display_plane(vma); 2146 vma = ERR_PTR(ret); 2147 goto err; 2148 } 2149 2150 if (ret == 0 && vma->fence) 2151 *out_flags |= PLANE_HAS_FENCE; 2152 } 2153 2154 i915_vma_get(vma); 2155 err: 2156 atomic_dec(&dev_priv->gpu_error.pending_fb_pin); 2157 2158 i915_gem_object_unlock(obj); 2159 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2160 return vma; 2161 } 2162 2163 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) 2164 { 2165 i915_gem_object_lock(vma->obj); 2166 if (flags & PLANE_HAS_FENCE) 2167 i915_vma_unpin_fence(vma); 2168 i915_gem_object_unpin_from_display_plane(vma); 2169 i915_gem_object_unlock(vma->obj); 2170 2171 i915_vma_put(vma); 2172 } 2173 2174 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane, 2175 unsigned int rotation) 2176 { 2177 if (drm_rotation_90_or_270(rotation)) 2178 return to_intel_framebuffer(fb)->rotated[color_plane].pitch; 2179 else 2180 return fb->pitches[color_plane]; 2181 } 2182 2183 /* 2184 * Convert the x/y offsets into a linear offset. 2185 * Only valid with 0/180 degree rotation, which is fine since linear 2186 * offset is only used with linear buffers on pre-hsw and tiled buffers 2187 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 2188 */ 2189 u32 intel_fb_xy_to_linear(int x, int y, 2190 const struct intel_plane_state *state, 2191 int color_plane) 2192 { 2193 const struct drm_framebuffer *fb = state->base.fb; 2194 unsigned int cpp = fb->format->cpp[color_plane]; 2195 unsigned int pitch = state->color_plane[color_plane].stride; 2196 2197 return y * pitch + x * cpp; 2198 } 2199 2200 /* 2201 * Add the x/y offsets derived from fb->offsets[] to the user 2202 * specified plane src x/y offsets. The resulting x/y offsets 2203 * specify the start of scanout from the beginning of the gtt mapping. 2204 */ 2205 void intel_add_fb_offsets(int *x, int *y, 2206 const struct intel_plane_state *state, 2207 int color_plane) 2208 2209 { 2210 *x += state->color_plane[color_plane].x; 2211 *y += state->color_plane[color_plane].y; 2212 } 2213 2214 static u32 intel_adjust_tile_offset(int *x, int *y, 2215 unsigned int tile_width, 2216 unsigned int tile_height, 2217 unsigned int tile_size, 2218 unsigned int pitch_tiles, 2219 u32 old_offset, 2220 u32 new_offset) 2221 { 2222 unsigned int pitch_pixels = pitch_tiles * tile_width; 2223 unsigned int tiles; 2224 2225 WARN_ON(old_offset & (tile_size - 1)); 2226 WARN_ON(new_offset & (tile_size - 1)); 2227 WARN_ON(new_offset > old_offset); 2228 2229 tiles = (old_offset - new_offset) / tile_size; 2230 2231 *y += tiles / pitch_tiles * tile_height; 2232 *x += tiles % pitch_tiles * tile_width; 2233 2234 /* minimize x in case it got needlessly big */ 2235 *y += *x / pitch_pixels * tile_height; 2236 *x %= pitch_pixels; 2237 2238 return new_offset; 2239 } 2240 2241 static bool is_surface_linear(u64 modifier, int color_plane) 2242 { 2243 return modifier == DRM_FORMAT_MOD_LINEAR; 2244 } 2245 2246 static u32 intel_adjust_aligned_offset(int *x, int *y, 2247 const struct drm_framebuffer *fb, 2248 int color_plane, 2249 unsigned int rotation, 2250 unsigned int pitch, 2251 u32 old_offset, u32 new_offset) 2252 { 2253 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2254 unsigned int cpp = fb->format->cpp[color_plane]; 2255 2256 WARN_ON(new_offset > old_offset); 2257 2258 if (!is_surface_linear(fb->modifier, color_plane)) { 2259 unsigned int tile_size, tile_width, tile_height; 2260 unsigned int pitch_tiles; 2261 2262 tile_size = intel_tile_size(dev_priv); 2263 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2264 2265 if (drm_rotation_90_or_270(rotation)) { 2266 pitch_tiles = pitch / tile_height; 2267 swap(tile_width, tile_height); 2268 } else { 2269 pitch_tiles = pitch / (tile_width * cpp); 2270 } 2271 2272 intel_adjust_tile_offset(x, y, tile_width, tile_height, 2273 tile_size, pitch_tiles, 2274 old_offset, new_offset); 2275 } else { 2276 old_offset += *y * pitch + *x * cpp; 2277 2278 *y = (old_offset - new_offset) / pitch; 2279 *x = ((old_offset - new_offset) - *y * pitch) / cpp; 2280 } 2281 2282 return new_offset; 2283 } 2284 2285 /* 2286 * Adjust the tile offset by moving the difference into 2287 * the x/y offsets. 2288 */ 2289 static u32 intel_plane_adjust_aligned_offset(int *x, int *y, 2290 const struct intel_plane_state *state, 2291 int color_plane, 2292 u32 old_offset, u32 new_offset) 2293 { 2294 return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane, 2295 state->base.rotation, 2296 state->color_plane[color_plane].stride, 2297 old_offset, new_offset); 2298 } 2299 2300 /* 2301 * Computes the aligned offset to the base tile and adjusts 2302 * x, y. bytes per pixel is assumed to be a power-of-two. 2303 * 2304 * In the 90/270 rotated case, x and y are assumed 2305 * to be already rotated to match the rotated GTT view, and 2306 * pitch is the tile_height aligned framebuffer height. 2307 * 2308 * This function is used when computing the derived information 2309 * under intel_framebuffer, so using any of that information 2310 * here is not allowed. Anything under drm_framebuffer can be 2311 * used. This is why the user has to pass in the pitch since it 2312 * is specified in the rotated orientation. 2313 */ 2314 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv, 2315 int *x, int *y, 2316 const struct drm_framebuffer *fb, 2317 int color_plane, 2318 unsigned int pitch, 2319 unsigned int rotation, 2320 u32 alignment) 2321 { 2322 unsigned int cpp = fb->format->cpp[color_plane]; 2323 u32 offset, offset_aligned; 2324 2325 if (alignment) 2326 alignment--; 2327 2328 if (!is_surface_linear(fb->modifier, color_plane)) { 2329 unsigned int tile_size, tile_width, tile_height; 2330 unsigned int tile_rows, tiles, pitch_tiles; 2331 2332 tile_size = intel_tile_size(dev_priv); 2333 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2334 2335 if (drm_rotation_90_or_270(rotation)) { 2336 pitch_tiles = pitch / tile_height; 2337 swap(tile_width, tile_height); 2338 } else { 2339 pitch_tiles = pitch / (tile_width * cpp); 2340 } 2341 2342 tile_rows = *y / tile_height; 2343 *y %= tile_height; 2344 2345 tiles = *x / tile_width; 2346 *x %= tile_width; 2347 2348 offset = (tile_rows * pitch_tiles + tiles) * tile_size; 2349 offset_aligned = offset & ~alignment; 2350 2351 intel_adjust_tile_offset(x, y, tile_width, tile_height, 2352 tile_size, pitch_tiles, 2353 offset, offset_aligned); 2354 } else { 2355 offset = *y * pitch + *x * cpp; 2356 offset_aligned = offset & ~alignment; 2357 2358 *y = (offset & alignment) / pitch; 2359 *x = ((offset & alignment) - *y * pitch) / cpp; 2360 } 2361 2362 return offset_aligned; 2363 } 2364 2365 static u32 intel_plane_compute_aligned_offset(int *x, int *y, 2366 const struct intel_plane_state *state, 2367 int color_plane) 2368 { 2369 struct intel_plane *intel_plane = to_intel_plane(state->base.plane); 2370 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 2371 const struct drm_framebuffer *fb = state->base.fb; 2372 unsigned int rotation = state->base.rotation; 2373 int pitch = state->color_plane[color_plane].stride; 2374 u32 alignment; 2375 2376 if (intel_plane->id == PLANE_CURSOR) 2377 alignment = intel_cursor_alignment(dev_priv); 2378 else 2379 alignment = intel_surf_alignment(fb, color_plane); 2380 2381 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane, 2382 pitch, rotation, alignment); 2383 } 2384 2385 /* Convert the fb->offset[] into x/y offsets */ 2386 static int intel_fb_offset_to_xy(int *x, int *y, 2387 const struct drm_framebuffer *fb, 2388 int color_plane) 2389 { 2390 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2391 unsigned int height; 2392 2393 if (fb->modifier != DRM_FORMAT_MOD_LINEAR && 2394 fb->offsets[color_plane] % intel_tile_size(dev_priv)) { 2395 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n", 2396 fb->offsets[color_plane], color_plane); 2397 return -EINVAL; 2398 } 2399 2400 height = drm_framebuffer_plane_height(fb->height, fb, color_plane); 2401 height = ALIGN(height, intel_tile_height(fb, color_plane)); 2402 2403 /* Catch potential overflows early */ 2404 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]), 2405 fb->offsets[color_plane])) { 2406 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n", 2407 fb->offsets[color_plane], fb->pitches[color_plane], 2408 color_plane); 2409 return -ERANGE; 2410 } 2411 2412 *x = 0; 2413 *y = 0; 2414 2415 intel_adjust_aligned_offset(x, y, 2416 fb, color_plane, DRM_MODE_ROTATE_0, 2417 fb->pitches[color_plane], 2418 fb->offsets[color_plane], 0); 2419 2420 return 0; 2421 } 2422 2423 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) 2424 { 2425 switch (fb_modifier) { 2426 case I915_FORMAT_MOD_X_TILED: 2427 return I915_TILING_X; 2428 case I915_FORMAT_MOD_Y_TILED: 2429 case I915_FORMAT_MOD_Y_TILED_CCS: 2430 return I915_TILING_Y; 2431 default: 2432 return I915_TILING_NONE; 2433 } 2434 } 2435 2436 /* 2437 * From the Sky Lake PRM: 2438 * "The Color Control Surface (CCS) contains the compression status of 2439 * the cache-line pairs. The compression state of the cache-line pair 2440 * is specified by 2 bits in the CCS. Each CCS cache-line represents 2441 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled 2442 * cache-line-pairs. CCS is always Y tiled." 2443 * 2444 * Since cache line pairs refers to horizontally adjacent cache lines, 2445 * each cache line in the CCS corresponds to an area of 32x16 cache 2446 * lines on the main surface. Since each pixel is 4 bytes, this gives 2447 * us a ratio of one byte in the CCS for each 8x16 pixels in the 2448 * main surface. 2449 */ 2450 static const struct drm_format_info ccs_formats[] = { 2451 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 2452 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2453 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 2454 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2455 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 2456 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 2457 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 2458 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 2459 }; 2460 2461 static const struct drm_format_info * 2462 lookup_format_info(const struct drm_format_info formats[], 2463 int num_formats, u32 format) 2464 { 2465 int i; 2466 2467 for (i = 0; i < num_formats; i++) { 2468 if (formats[i].format == format) 2469 return &formats[i]; 2470 } 2471 2472 return NULL; 2473 } 2474 2475 static const struct drm_format_info * 2476 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 2477 { 2478 switch (cmd->modifier[0]) { 2479 case I915_FORMAT_MOD_Y_TILED_CCS: 2480 case I915_FORMAT_MOD_Yf_TILED_CCS: 2481 return lookup_format_info(ccs_formats, 2482 ARRAY_SIZE(ccs_formats), 2483 cmd->pixel_format); 2484 default: 2485 return NULL; 2486 } 2487 } 2488 2489 bool is_ccs_modifier(u64 modifier) 2490 { 2491 return modifier == I915_FORMAT_MOD_Y_TILED_CCS || 2492 modifier == I915_FORMAT_MOD_Yf_TILED_CCS; 2493 } 2494 2495 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 2496 u32 pixel_format, u64 modifier) 2497 { 2498 struct intel_crtc *crtc; 2499 struct intel_plane *plane; 2500 2501 /* 2502 * We assume the primary plane for pipe A has 2503 * the highest stride limits of them all. 2504 */ 2505 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); 2506 plane = to_intel_plane(crtc->base.primary); 2507 2508 return plane->max_stride(plane, pixel_format, modifier, 2509 DRM_MODE_ROTATE_0); 2510 } 2511 2512 static 2513 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv, 2514 u32 pixel_format, u64 modifier) 2515 { 2516 /* 2517 * Arbitrary limit for gen4+ chosen to match the 2518 * render engine max stride. 2519 * 2520 * The new CCS hash mode makes remapping impossible 2521 */ 2522 if (!is_ccs_modifier(modifier)) { 2523 if (INTEL_GEN(dev_priv) >= 7) 2524 return 256*1024; 2525 else if (INTEL_GEN(dev_priv) >= 4) 2526 return 128*1024; 2527 } 2528 2529 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier); 2530 } 2531 2532 static u32 2533 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) 2534 { 2535 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2536 2537 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) { 2538 u32 max_stride = intel_plane_fb_max_stride(dev_priv, 2539 fb->format->format, 2540 fb->modifier); 2541 2542 /* 2543 * To make remapping with linear generally feasible 2544 * we need the stride to be page aligned. 2545 */ 2546 if (fb->pitches[color_plane] > max_stride) 2547 return intel_tile_size(dev_priv); 2548 else 2549 return 64; 2550 } else { 2551 return intel_tile_width_bytes(fb, color_plane); 2552 } 2553 } 2554 2555 bool intel_plane_can_remap(const struct intel_plane_state *plane_state) 2556 { 2557 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 2558 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 2559 const struct drm_framebuffer *fb = plane_state->base.fb; 2560 int i; 2561 2562 /* We don't want to deal with remapping with cursors */ 2563 if (plane->id == PLANE_CURSOR) 2564 return false; 2565 2566 /* 2567 * The display engine limits already match/exceed the 2568 * render engine limits, so not much point in remapping. 2569 * Would also need to deal with the fence POT alignment 2570 * and gen2 2KiB GTT tile size. 2571 */ 2572 if (INTEL_GEN(dev_priv) < 4) 2573 return false; 2574 2575 /* 2576 * The new CCS hash mode isn't compatible with remapping as 2577 * the virtual address of the pages affects the compressed data. 2578 */ 2579 if (is_ccs_modifier(fb->modifier)) 2580 return false; 2581 2582 /* Linear needs a page aligned stride for remapping */ 2583 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) { 2584 unsigned int alignment = intel_tile_size(dev_priv) - 1; 2585 2586 for (i = 0; i < fb->format->num_planes; i++) { 2587 if (fb->pitches[i] & alignment) 2588 return false; 2589 } 2590 } 2591 2592 return true; 2593 } 2594 2595 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) 2596 { 2597 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 2598 const struct drm_framebuffer *fb = plane_state->base.fb; 2599 unsigned int rotation = plane_state->base.rotation; 2600 u32 stride, max_stride; 2601 2602 /* 2603 * No remapping for invisible planes since we don't have 2604 * an actual source viewport to remap. 2605 */ 2606 if (!plane_state->base.visible) 2607 return false; 2608 2609 if (!intel_plane_can_remap(plane_state)) 2610 return false; 2611 2612 /* 2613 * FIXME: aux plane limits on gen9+ are 2614 * unclear in Bspec, for now no checking. 2615 */ 2616 stride = intel_fb_pitch(fb, 0, rotation); 2617 max_stride = plane->max_stride(plane, fb->format->format, 2618 fb->modifier, rotation); 2619 2620 return stride > max_stride; 2621 } 2622 2623 static int 2624 intel_fill_fb_info(struct drm_i915_private *dev_priv, 2625 struct drm_framebuffer *fb) 2626 { 2627 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2628 struct intel_rotation_info *rot_info = &intel_fb->rot_info; 2629 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2630 u32 gtt_offset_rotated = 0; 2631 unsigned int max_size = 0; 2632 int i, num_planes = fb->format->num_planes; 2633 unsigned int tile_size = intel_tile_size(dev_priv); 2634 2635 for (i = 0; i < num_planes; i++) { 2636 unsigned int width, height; 2637 unsigned int cpp, size; 2638 u32 offset; 2639 int x, y; 2640 int ret; 2641 2642 cpp = fb->format->cpp[i]; 2643 width = drm_framebuffer_plane_width(fb->width, fb, i); 2644 height = drm_framebuffer_plane_height(fb->height, fb, i); 2645 2646 ret = intel_fb_offset_to_xy(&x, &y, fb, i); 2647 if (ret) { 2648 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", 2649 i, fb->offsets[i]); 2650 return ret; 2651 } 2652 2653 if (is_ccs_modifier(fb->modifier) && i == 1) { 2654 int hsub = fb->format->hsub; 2655 int vsub = fb->format->vsub; 2656 int tile_width, tile_height; 2657 int main_x, main_y; 2658 int ccs_x, ccs_y; 2659 2660 intel_tile_dims(fb, i, &tile_width, &tile_height); 2661 tile_width *= hsub; 2662 tile_height *= vsub; 2663 2664 ccs_x = (x * hsub) % tile_width; 2665 ccs_y = (y * vsub) % tile_height; 2666 main_x = intel_fb->normal[0].x % tile_width; 2667 main_y = intel_fb->normal[0].y % tile_height; 2668 2669 /* 2670 * CCS doesn't have its own x/y offset register, so the intra CCS tile 2671 * x/y offsets must match between CCS and the main surface. 2672 */ 2673 if (main_x != ccs_x || main_y != ccs_y) { 2674 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n", 2675 main_x, main_y, 2676 ccs_x, ccs_y, 2677 intel_fb->normal[0].x, 2678 intel_fb->normal[0].y, 2679 x, y); 2680 return -EINVAL; 2681 } 2682 } 2683 2684 /* 2685 * The fence (if used) is aligned to the start of the object 2686 * so having the framebuffer wrap around across the edge of the 2687 * fenced region doesn't really work. We have no API to configure 2688 * the fence start offset within the object (nor could we probably 2689 * on gen2/3). So it's just easier if we just require that the 2690 * fb layout agrees with the fence layout. We already check that the 2691 * fb stride matches the fence stride elsewhere. 2692 */ 2693 if (i == 0 && i915_gem_object_is_tiled(obj) && 2694 (x + width) * cpp > fb->pitches[i]) { 2695 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", 2696 i, fb->offsets[i]); 2697 return -EINVAL; 2698 } 2699 2700 /* 2701 * First pixel of the framebuffer from 2702 * the start of the normal gtt mapping. 2703 */ 2704 intel_fb->normal[i].x = x; 2705 intel_fb->normal[i].y = y; 2706 2707 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i, 2708 fb->pitches[i], 2709 DRM_MODE_ROTATE_0, 2710 tile_size); 2711 offset /= tile_size; 2712 2713 if (!is_surface_linear(fb->modifier, i)) { 2714 unsigned int tile_width, tile_height; 2715 unsigned int pitch_tiles; 2716 struct drm_rect r; 2717 2718 intel_tile_dims(fb, i, &tile_width, &tile_height); 2719 2720 rot_info->plane[i].offset = offset; 2721 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp); 2722 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); 2723 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); 2724 2725 intel_fb->rotated[i].pitch = 2726 rot_info->plane[i].height * tile_height; 2727 2728 /* how many tiles does this plane need */ 2729 size = rot_info->plane[i].stride * rot_info->plane[i].height; 2730 /* 2731 * If the plane isn't horizontally tile aligned, 2732 * we need one more tile. 2733 */ 2734 if (x != 0) 2735 size++; 2736 2737 /* rotate the x/y offsets to match the GTT view */ 2738 r.x1 = x; 2739 r.y1 = y; 2740 r.x2 = x + width; 2741 r.y2 = y + height; 2742 drm_rect_rotate(&r, 2743 rot_info->plane[i].width * tile_width, 2744 rot_info->plane[i].height * tile_height, 2745 DRM_MODE_ROTATE_270); 2746 x = r.x1; 2747 y = r.y1; 2748 2749 /* rotate the tile dimensions to match the GTT view */ 2750 pitch_tiles = intel_fb->rotated[i].pitch / tile_height; 2751 swap(tile_width, tile_height); 2752 2753 /* 2754 * We only keep the x/y offsets, so push all of the 2755 * gtt offset into the x/y offsets. 2756 */ 2757 intel_adjust_tile_offset(&x, &y, 2758 tile_width, tile_height, 2759 tile_size, pitch_tiles, 2760 gtt_offset_rotated * tile_size, 0); 2761 2762 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; 2763 2764 /* 2765 * First pixel of the framebuffer from 2766 * the start of the rotated gtt mapping. 2767 */ 2768 intel_fb->rotated[i].x = x; 2769 intel_fb->rotated[i].y = y; 2770 } else { 2771 size = DIV_ROUND_UP((y + height) * fb->pitches[i] + 2772 x * cpp, tile_size); 2773 } 2774 2775 /* how many tiles in total needed in the bo */ 2776 max_size = max(max_size, offset + size); 2777 } 2778 2779 if (mul_u32_u32(max_size, tile_size) > obj->base.size) { 2780 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n", 2781 mul_u32_u32(max_size, tile_size), obj->base.size); 2782 return -EINVAL; 2783 } 2784 2785 return 0; 2786 } 2787 2788 static void 2789 intel_plane_remap_gtt(struct intel_plane_state *plane_state) 2790 { 2791 struct drm_i915_private *dev_priv = 2792 to_i915(plane_state->base.plane->dev); 2793 struct drm_framebuffer *fb = plane_state->base.fb; 2794 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2795 struct intel_rotation_info *info = &plane_state->view.rotated; 2796 unsigned int rotation = plane_state->base.rotation; 2797 int i, num_planes = fb->format->num_planes; 2798 unsigned int tile_size = intel_tile_size(dev_priv); 2799 unsigned int src_x, src_y; 2800 unsigned int src_w, src_h; 2801 u32 gtt_offset = 0; 2802 2803 memset(&plane_state->view, 0, sizeof(plane_state->view)); 2804 plane_state->view.type = drm_rotation_90_or_270(rotation) ? 2805 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED; 2806 2807 src_x = plane_state->base.src.x1 >> 16; 2808 src_y = plane_state->base.src.y1 >> 16; 2809 src_w = drm_rect_width(&plane_state->base.src) >> 16; 2810 src_h = drm_rect_height(&plane_state->base.src) >> 16; 2811 2812 WARN_ON(is_ccs_modifier(fb->modifier)); 2813 2814 /* Make src coordinates relative to the viewport */ 2815 drm_rect_translate(&plane_state->base.src, 2816 -(src_x << 16), -(src_y << 16)); 2817 2818 /* Rotate src coordinates to match rotated GTT view */ 2819 if (drm_rotation_90_or_270(rotation)) 2820 drm_rect_rotate(&plane_state->base.src, 2821 src_w << 16, src_h << 16, 2822 DRM_MODE_ROTATE_270); 2823 2824 for (i = 0; i < num_planes; i++) { 2825 unsigned int hsub = i ? fb->format->hsub : 1; 2826 unsigned int vsub = i ? fb->format->vsub : 1; 2827 unsigned int cpp = fb->format->cpp[i]; 2828 unsigned int tile_width, tile_height; 2829 unsigned int width, height; 2830 unsigned int pitch_tiles; 2831 unsigned int x, y; 2832 u32 offset; 2833 2834 intel_tile_dims(fb, i, &tile_width, &tile_height); 2835 2836 x = src_x / hsub; 2837 y = src_y / vsub; 2838 width = src_w / hsub; 2839 height = src_h / vsub; 2840 2841 /* 2842 * First pixel of the src viewport from the 2843 * start of the normal gtt mapping. 2844 */ 2845 x += intel_fb->normal[i].x; 2846 y += intel_fb->normal[i].y; 2847 2848 offset = intel_compute_aligned_offset(dev_priv, &x, &y, 2849 fb, i, fb->pitches[i], 2850 DRM_MODE_ROTATE_0, tile_size); 2851 offset /= tile_size; 2852 2853 info->plane[i].offset = offset; 2854 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], 2855 tile_width * cpp); 2856 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); 2857 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); 2858 2859 if (drm_rotation_90_or_270(rotation)) { 2860 struct drm_rect r; 2861 2862 /* rotate the x/y offsets to match the GTT view */ 2863 r.x1 = x; 2864 r.y1 = y; 2865 r.x2 = x + width; 2866 r.y2 = y + height; 2867 drm_rect_rotate(&r, 2868 info->plane[i].width * tile_width, 2869 info->plane[i].height * tile_height, 2870 DRM_MODE_ROTATE_270); 2871 x = r.x1; 2872 y = r.y1; 2873 2874 pitch_tiles = info->plane[i].height; 2875 plane_state->color_plane[i].stride = pitch_tiles * tile_height; 2876 2877 /* rotate the tile dimensions to match the GTT view */ 2878 swap(tile_width, tile_height); 2879 } else { 2880 pitch_tiles = info->plane[i].width; 2881 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp; 2882 } 2883 2884 /* 2885 * We only keep the x/y offsets, so push all of the 2886 * gtt offset into the x/y offsets. 2887 */ 2888 intel_adjust_tile_offset(&x, &y, 2889 tile_width, tile_height, 2890 tile_size, pitch_tiles, 2891 gtt_offset * tile_size, 0); 2892 2893 gtt_offset += info->plane[i].width * info->plane[i].height; 2894 2895 plane_state->color_plane[i].offset = 0; 2896 plane_state->color_plane[i].x = x; 2897 plane_state->color_plane[i].y = y; 2898 } 2899 } 2900 2901 static int 2902 intel_plane_compute_gtt(struct intel_plane_state *plane_state) 2903 { 2904 const struct intel_framebuffer *fb = 2905 to_intel_framebuffer(plane_state->base.fb); 2906 unsigned int rotation = plane_state->base.rotation; 2907 int i, num_planes; 2908 2909 if (!fb) 2910 return 0; 2911 2912 num_planes = fb->base.format->num_planes; 2913 2914 if (intel_plane_needs_remap(plane_state)) { 2915 intel_plane_remap_gtt(plane_state); 2916 2917 /* 2918 * Sometimes even remapping can't overcome 2919 * the stride limitations :( Can happen with 2920 * big plane sizes and suitably misaligned 2921 * offsets. 2922 */ 2923 return intel_plane_check_stride(plane_state); 2924 } 2925 2926 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation); 2927 2928 for (i = 0; i < num_planes; i++) { 2929 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation); 2930 plane_state->color_plane[i].offset = 0; 2931 2932 if (drm_rotation_90_or_270(rotation)) { 2933 plane_state->color_plane[i].x = fb->rotated[i].x; 2934 plane_state->color_plane[i].y = fb->rotated[i].y; 2935 } else { 2936 plane_state->color_plane[i].x = fb->normal[i].x; 2937 plane_state->color_plane[i].y = fb->normal[i].y; 2938 } 2939 } 2940 2941 /* Rotate src coordinates to match rotated GTT view */ 2942 if (drm_rotation_90_or_270(rotation)) 2943 drm_rect_rotate(&plane_state->base.src, 2944 fb->base.width << 16, fb->base.height << 16, 2945 DRM_MODE_ROTATE_270); 2946 2947 return intel_plane_check_stride(plane_state); 2948 } 2949 2950 static int i9xx_format_to_fourcc(int format) 2951 { 2952 switch (format) { 2953 case DISPPLANE_8BPP: 2954 return DRM_FORMAT_C8; 2955 case DISPPLANE_BGRX555: 2956 return DRM_FORMAT_XRGB1555; 2957 case DISPPLANE_BGRX565: 2958 return DRM_FORMAT_RGB565; 2959 default: 2960 case DISPPLANE_BGRX888: 2961 return DRM_FORMAT_XRGB8888; 2962 case DISPPLANE_RGBX888: 2963 return DRM_FORMAT_XBGR8888; 2964 case DISPPLANE_BGRX101010: 2965 return DRM_FORMAT_XRGB2101010; 2966 case DISPPLANE_RGBX101010: 2967 return DRM_FORMAT_XBGR2101010; 2968 } 2969 } 2970 2971 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 2972 { 2973 switch (format) { 2974 case PLANE_CTL_FORMAT_RGB_565: 2975 return DRM_FORMAT_RGB565; 2976 case PLANE_CTL_FORMAT_NV12: 2977 return DRM_FORMAT_NV12; 2978 case PLANE_CTL_FORMAT_P010: 2979 return DRM_FORMAT_P010; 2980 case PLANE_CTL_FORMAT_P012: 2981 return DRM_FORMAT_P012; 2982 case PLANE_CTL_FORMAT_P016: 2983 return DRM_FORMAT_P016; 2984 case PLANE_CTL_FORMAT_Y210: 2985 return DRM_FORMAT_Y210; 2986 case PLANE_CTL_FORMAT_Y212: 2987 return DRM_FORMAT_Y212; 2988 case PLANE_CTL_FORMAT_Y216: 2989 return DRM_FORMAT_Y216; 2990 case PLANE_CTL_FORMAT_Y410: 2991 return DRM_FORMAT_XVYU2101010; 2992 case PLANE_CTL_FORMAT_Y412: 2993 return DRM_FORMAT_XVYU12_16161616; 2994 case PLANE_CTL_FORMAT_Y416: 2995 return DRM_FORMAT_XVYU16161616; 2996 default: 2997 case PLANE_CTL_FORMAT_XRGB_8888: 2998 if (rgb_order) { 2999 if (alpha) 3000 return DRM_FORMAT_ABGR8888; 3001 else 3002 return DRM_FORMAT_XBGR8888; 3003 } else { 3004 if (alpha) 3005 return DRM_FORMAT_ARGB8888; 3006 else 3007 return DRM_FORMAT_XRGB8888; 3008 } 3009 case PLANE_CTL_FORMAT_XRGB_2101010: 3010 if (rgb_order) 3011 return DRM_FORMAT_XBGR2101010; 3012 else 3013 return DRM_FORMAT_XRGB2101010; 3014 case PLANE_CTL_FORMAT_XRGB_16161616F: 3015 if (rgb_order) { 3016 if (alpha) 3017 return DRM_FORMAT_ABGR16161616F; 3018 else 3019 return DRM_FORMAT_XBGR16161616F; 3020 } else { 3021 if (alpha) 3022 return DRM_FORMAT_ARGB16161616F; 3023 else 3024 return DRM_FORMAT_XRGB16161616F; 3025 } 3026 } 3027 } 3028 3029 static bool 3030 intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 3031 struct intel_initial_plane_config *plane_config) 3032 { 3033 struct drm_device *dev = crtc->base.dev; 3034 struct drm_i915_private *dev_priv = to_i915(dev); 3035 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 3036 struct drm_framebuffer *fb = &plane_config->fb->base; 3037 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); 3038 u32 size_aligned = round_up(plane_config->base + plane_config->size, 3039 PAGE_SIZE); 3040 struct drm_i915_gem_object *obj; 3041 bool ret = false; 3042 3043 size_aligned -= base_aligned; 3044 3045 if (plane_config->size == 0) 3046 return false; 3047 3048 /* If the FB is too big, just don't use it since fbdev is not very 3049 * important and we should probably use that space with FBC or other 3050 * features. */ 3051 if (size_aligned * 2 > dev_priv->stolen_usable_size) 3052 return false; 3053 3054 switch (fb->modifier) { 3055 case DRM_FORMAT_MOD_LINEAR: 3056 case I915_FORMAT_MOD_X_TILED: 3057 case I915_FORMAT_MOD_Y_TILED: 3058 break; 3059 default: 3060 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n", 3061 fb->modifier); 3062 return false; 3063 } 3064 3065 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv, 3066 base_aligned, 3067 base_aligned, 3068 size_aligned); 3069 if (IS_ERR(obj)) 3070 return false; 3071 3072 switch (plane_config->tiling) { 3073 case I915_TILING_NONE: 3074 break; 3075 case I915_TILING_X: 3076 case I915_TILING_Y: 3077 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling; 3078 break; 3079 default: 3080 MISSING_CASE(plane_config->tiling); 3081 goto out; 3082 } 3083 3084 mode_cmd.pixel_format = fb->format->format; 3085 mode_cmd.width = fb->width; 3086 mode_cmd.height = fb->height; 3087 mode_cmd.pitches[0] = fb->pitches[0]; 3088 mode_cmd.modifier[0] = fb->modifier; 3089 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 3090 3091 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) { 3092 DRM_DEBUG_KMS("intel fb init failed\n"); 3093 goto out; 3094 } 3095 3096 3097 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); 3098 ret = true; 3099 out: 3100 i915_gem_object_put(obj); 3101 return ret; 3102 } 3103 3104 static void 3105 intel_set_plane_visible(struct intel_crtc_state *crtc_state, 3106 struct intel_plane_state *plane_state, 3107 bool visible) 3108 { 3109 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 3110 3111 plane_state->base.visible = visible; 3112 3113 if (visible) 3114 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base); 3115 else 3116 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base); 3117 } 3118 3119 static void fixup_active_planes(struct intel_crtc_state *crtc_state) 3120 { 3121 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 3122 struct drm_plane *plane; 3123 3124 /* 3125 * Active_planes aliases if multiple "primary" or cursor planes 3126 * have been used on the same (or wrong) pipe. plane_mask uses 3127 * unique ids, hence we can use that to reconstruct active_planes. 3128 */ 3129 crtc_state->active_planes = 0; 3130 3131 drm_for_each_plane_mask(plane, &dev_priv->drm, 3132 crtc_state->base.plane_mask) 3133 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 3134 } 3135 3136 static void intel_plane_disable_noatomic(struct intel_crtc *crtc, 3137 struct intel_plane *plane) 3138 { 3139 struct intel_crtc_state *crtc_state = 3140 to_intel_crtc_state(crtc->base.state); 3141 struct intel_plane_state *plane_state = 3142 to_intel_plane_state(plane->base.state); 3143 3144 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 3145 plane->base.base.id, plane->base.name, 3146 crtc->base.base.id, crtc->base.name); 3147 3148 intel_set_plane_visible(crtc_state, plane_state, false); 3149 fixup_active_planes(crtc_state); 3150 crtc_state->data_rate[plane->id] = 0; 3151 3152 if (plane->id == PLANE_PRIMARY) 3153 intel_pre_disable_primary_noatomic(&crtc->base); 3154 3155 intel_disable_plane(plane, crtc_state); 3156 } 3157 3158 static struct intel_frontbuffer * 3159 to_intel_frontbuffer(struct drm_framebuffer *fb) 3160 { 3161 return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL; 3162 } 3163 3164 static void 3165 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 3166 struct intel_initial_plane_config *plane_config) 3167 { 3168 struct drm_device *dev = intel_crtc->base.dev; 3169 struct drm_i915_private *dev_priv = to_i915(dev); 3170 struct drm_crtc *c; 3171 struct drm_plane *primary = intel_crtc->base.primary; 3172 struct drm_plane_state *plane_state = primary->state; 3173 struct intel_plane *intel_plane = to_intel_plane(primary); 3174 struct intel_plane_state *intel_state = 3175 to_intel_plane_state(plane_state); 3176 struct drm_framebuffer *fb; 3177 3178 if (!plane_config->fb) 3179 return; 3180 3181 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 3182 fb = &plane_config->fb->base; 3183 goto valid_fb; 3184 } 3185 3186 kfree(plane_config->fb); 3187 3188 /* 3189 * Failed to alloc the obj, check to see if we should share 3190 * an fb with another CRTC instead 3191 */ 3192 for_each_crtc(dev, c) { 3193 struct intel_plane_state *state; 3194 3195 if (c == &intel_crtc->base) 3196 continue; 3197 3198 if (!to_intel_crtc(c)->active) 3199 continue; 3200 3201 state = to_intel_plane_state(c->primary->state); 3202 if (!state->vma) 3203 continue; 3204 3205 if (intel_plane_ggtt_offset(state) == plane_config->base) { 3206 fb = state->base.fb; 3207 drm_framebuffer_get(fb); 3208 goto valid_fb; 3209 } 3210 } 3211 3212 /* 3213 * We've failed to reconstruct the BIOS FB. Current display state 3214 * indicates that the primary plane is visible, but has a NULL FB, 3215 * which will lead to problems later if we don't fix it up. The 3216 * simplest solution is to just disable the primary plane now and 3217 * pretend the BIOS never had it enabled. 3218 */ 3219 intel_plane_disable_noatomic(intel_crtc, intel_plane); 3220 3221 return; 3222 3223 valid_fb: 3224 intel_state->base.rotation = plane_config->rotation; 3225 intel_fill_fb_ggtt_view(&intel_state->view, fb, 3226 intel_state->base.rotation); 3227 intel_state->color_plane[0].stride = 3228 intel_fb_pitch(fb, 0, intel_state->base.rotation); 3229 3230 intel_state->vma = 3231 intel_pin_and_fence_fb_obj(fb, 3232 &intel_state->view, 3233 intel_plane_uses_fence(intel_state), 3234 &intel_state->flags); 3235 if (IS_ERR(intel_state->vma)) { 3236 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n", 3237 intel_crtc->pipe, PTR_ERR(intel_state->vma)); 3238 3239 intel_state->vma = NULL; 3240 drm_framebuffer_put(fb); 3241 return; 3242 } 3243 3244 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); 3245 3246 plane_state->src_x = 0; 3247 plane_state->src_y = 0; 3248 plane_state->src_w = fb->width << 16; 3249 plane_state->src_h = fb->height << 16; 3250 3251 plane_state->crtc_x = 0; 3252 plane_state->crtc_y = 0; 3253 plane_state->crtc_w = fb->width; 3254 plane_state->crtc_h = fb->height; 3255 3256 intel_state->base.src = drm_plane_state_src(plane_state); 3257 intel_state->base.dst = drm_plane_state_dest(plane_state); 3258 3259 if (plane_config->tiling) 3260 dev_priv->preserve_bios_swizzle = true; 3261 3262 plane_state->fb = fb; 3263 plane_state->crtc = &intel_crtc->base; 3264 3265 atomic_or(to_intel_plane(primary)->frontbuffer_bit, 3266 &to_intel_frontbuffer(fb)->bits); 3267 } 3268 3269 static int skl_max_plane_width(const struct drm_framebuffer *fb, 3270 int color_plane, 3271 unsigned int rotation) 3272 { 3273 int cpp = fb->format->cpp[color_plane]; 3274 3275 switch (fb->modifier) { 3276 case DRM_FORMAT_MOD_LINEAR: 3277 case I915_FORMAT_MOD_X_TILED: 3278 /* 3279 * Validated limit is 4k, but has 5k should 3280 * work apart from the following features: 3281 * - Ytile (already limited to 4k) 3282 * - FP16 (already limited to 4k) 3283 * - render compression (already limited to 4k) 3284 * - KVMR sprite and cursor (don't care) 3285 * - horizontal panning (TODO verify this) 3286 * - pipe and plane scaling (TODO verify this) 3287 */ 3288 if (cpp == 8) 3289 return 4096; 3290 else 3291 return 5120; 3292 case I915_FORMAT_MOD_Y_TILED_CCS: 3293 case I915_FORMAT_MOD_Yf_TILED_CCS: 3294 /* FIXME AUX plane? */ 3295 case I915_FORMAT_MOD_Y_TILED: 3296 case I915_FORMAT_MOD_Yf_TILED: 3297 if (cpp == 8) 3298 return 2048; 3299 else 3300 return 4096; 3301 default: 3302 MISSING_CASE(fb->modifier); 3303 return 2048; 3304 } 3305 } 3306 3307 static int glk_max_plane_width(const struct drm_framebuffer *fb, 3308 int color_plane, 3309 unsigned int rotation) 3310 { 3311 int cpp = fb->format->cpp[color_plane]; 3312 3313 switch (fb->modifier) { 3314 case DRM_FORMAT_MOD_LINEAR: 3315 case I915_FORMAT_MOD_X_TILED: 3316 if (cpp == 8) 3317 return 4096; 3318 else 3319 return 5120; 3320 case I915_FORMAT_MOD_Y_TILED_CCS: 3321 case I915_FORMAT_MOD_Yf_TILED_CCS: 3322 /* FIXME AUX plane? */ 3323 case I915_FORMAT_MOD_Y_TILED: 3324 case I915_FORMAT_MOD_Yf_TILED: 3325 if (cpp == 8) 3326 return 2048; 3327 else 3328 return 5120; 3329 default: 3330 MISSING_CASE(fb->modifier); 3331 return 2048; 3332 } 3333 } 3334 3335 static int icl_max_plane_width(const struct drm_framebuffer *fb, 3336 int color_plane, 3337 unsigned int rotation) 3338 { 3339 return 5120; 3340 } 3341 3342 static int skl_max_plane_height(void) 3343 { 3344 return 4096; 3345 } 3346 3347 static int icl_max_plane_height(void) 3348 { 3349 return 4320; 3350 } 3351 3352 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, 3353 int main_x, int main_y, u32 main_offset) 3354 { 3355 const struct drm_framebuffer *fb = plane_state->base.fb; 3356 int hsub = fb->format->hsub; 3357 int vsub = fb->format->vsub; 3358 int aux_x = plane_state->color_plane[1].x; 3359 int aux_y = plane_state->color_plane[1].y; 3360 u32 aux_offset = plane_state->color_plane[1].offset; 3361 u32 alignment = intel_surf_alignment(fb, 1); 3362 3363 while (aux_offset >= main_offset && aux_y <= main_y) { 3364 int x, y; 3365 3366 if (aux_x == main_x && aux_y == main_y) 3367 break; 3368 3369 if (aux_offset == 0) 3370 break; 3371 3372 x = aux_x / hsub; 3373 y = aux_y / vsub; 3374 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1, 3375 aux_offset, aux_offset - alignment); 3376 aux_x = x * hsub + aux_x % hsub; 3377 aux_y = y * vsub + aux_y % vsub; 3378 } 3379 3380 if (aux_x != main_x || aux_y != main_y) 3381 return false; 3382 3383 plane_state->color_plane[1].offset = aux_offset; 3384 plane_state->color_plane[1].x = aux_x; 3385 plane_state->color_plane[1].y = aux_y; 3386 3387 return true; 3388 } 3389 3390 static int skl_check_main_surface(struct intel_plane_state *plane_state) 3391 { 3392 struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev); 3393 const struct drm_framebuffer *fb = plane_state->base.fb; 3394 unsigned int rotation = plane_state->base.rotation; 3395 int x = plane_state->base.src.x1 >> 16; 3396 int y = plane_state->base.src.y1 >> 16; 3397 int w = drm_rect_width(&plane_state->base.src) >> 16; 3398 int h = drm_rect_height(&plane_state->base.src) >> 16; 3399 int max_width; 3400 int max_height; 3401 u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset; 3402 3403 if (INTEL_GEN(dev_priv) >= 11) 3404 max_width = icl_max_plane_width(fb, 0, rotation); 3405 else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 3406 max_width = glk_max_plane_width(fb, 0, rotation); 3407 else 3408 max_width = skl_max_plane_width(fb, 0, rotation); 3409 3410 if (INTEL_GEN(dev_priv) >= 11) 3411 max_height = icl_max_plane_height(); 3412 else 3413 max_height = skl_max_plane_height(); 3414 3415 if (w > max_width || h > max_height) { 3416 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n", 3417 w, h, max_width, max_height); 3418 return -EINVAL; 3419 } 3420 3421 intel_add_fb_offsets(&x, &y, plane_state, 0); 3422 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0); 3423 alignment = intel_surf_alignment(fb, 0); 3424 3425 /* 3426 * AUX surface offset is specified as the distance from the 3427 * main surface offset, and it must be non-negative. Make 3428 * sure that is what we will get. 3429 */ 3430 if (offset > aux_offset) 3431 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3432 offset, aux_offset & ~(alignment - 1)); 3433 3434 /* 3435 * When using an X-tiled surface, the plane blows up 3436 * if the x offset + width exceed the stride. 3437 * 3438 * TODO: linear and Y-tiled seem fine, Yf untested, 3439 */ 3440 if (fb->modifier == I915_FORMAT_MOD_X_TILED) { 3441 int cpp = fb->format->cpp[0]; 3442 3443 while ((x + w) * cpp > plane_state->color_plane[0].stride) { 3444 if (offset == 0) { 3445 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n"); 3446 return -EINVAL; 3447 } 3448 3449 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3450 offset, offset - alignment); 3451 } 3452 } 3453 3454 /* 3455 * CCS AUX surface doesn't have its own x/y offsets, we must make sure 3456 * they match with the main surface x/y offsets. 3457 */ 3458 if (is_ccs_modifier(fb->modifier)) { 3459 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) { 3460 if (offset == 0) 3461 break; 3462 3463 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3464 offset, offset - alignment); 3465 } 3466 3467 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) { 3468 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n"); 3469 return -EINVAL; 3470 } 3471 } 3472 3473 plane_state->color_plane[0].offset = offset; 3474 plane_state->color_plane[0].x = x; 3475 plane_state->color_plane[0].y = y; 3476 3477 /* 3478 * Put the final coordinates back so that the src 3479 * coordinate checks will see the right values. 3480 */ 3481 drm_rect_translate(&plane_state->base.src, 3482 (x << 16) - plane_state->base.src.x1, 3483 (y << 16) - plane_state->base.src.y1); 3484 3485 return 0; 3486 } 3487 3488 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) 3489 { 3490 const struct drm_framebuffer *fb = plane_state->base.fb; 3491 unsigned int rotation = plane_state->base.rotation; 3492 int max_width = skl_max_plane_width(fb, 1, rotation); 3493 int max_height = 4096; 3494 int x = plane_state->base.src.x1 >> 17; 3495 int y = plane_state->base.src.y1 >> 17; 3496 int w = drm_rect_width(&plane_state->base.src) >> 17; 3497 int h = drm_rect_height(&plane_state->base.src) >> 17; 3498 u32 offset; 3499 3500 intel_add_fb_offsets(&x, &y, plane_state, 1); 3501 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1); 3502 3503 /* FIXME not quite sure how/if these apply to the chroma plane */ 3504 if (w > max_width || h > max_height) { 3505 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n", 3506 w, h, max_width, max_height); 3507 return -EINVAL; 3508 } 3509 3510 plane_state->color_plane[1].offset = offset; 3511 plane_state->color_plane[1].x = x; 3512 plane_state->color_plane[1].y = y; 3513 3514 return 0; 3515 } 3516 3517 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) 3518 { 3519 const struct drm_framebuffer *fb = plane_state->base.fb; 3520 int src_x = plane_state->base.src.x1 >> 16; 3521 int src_y = plane_state->base.src.y1 >> 16; 3522 int hsub = fb->format->hsub; 3523 int vsub = fb->format->vsub; 3524 int x = src_x / hsub; 3525 int y = src_y / vsub; 3526 u32 offset; 3527 3528 intel_add_fb_offsets(&x, &y, plane_state, 1); 3529 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1); 3530 3531 plane_state->color_plane[1].offset = offset; 3532 plane_state->color_plane[1].x = x * hsub + src_x % hsub; 3533 plane_state->color_plane[1].y = y * vsub + src_y % vsub; 3534 3535 return 0; 3536 } 3537 3538 int skl_check_plane_surface(struct intel_plane_state *plane_state) 3539 { 3540 const struct drm_framebuffer *fb = plane_state->base.fb; 3541 int ret; 3542 3543 ret = intel_plane_compute_gtt(plane_state); 3544 if (ret) 3545 return ret; 3546 3547 if (!plane_state->base.visible) 3548 return 0; 3549 3550 /* 3551 * Handle the AUX surface first since 3552 * the main surface setup depends on it. 3553 */ 3554 if (drm_format_info_is_yuv_semiplanar(fb->format)) { 3555 ret = skl_check_nv12_aux_surface(plane_state); 3556 if (ret) 3557 return ret; 3558 } else if (is_ccs_modifier(fb->modifier)) { 3559 ret = skl_check_ccs_aux_surface(plane_state); 3560 if (ret) 3561 return ret; 3562 } else { 3563 plane_state->color_plane[1].offset = ~0xfff; 3564 plane_state->color_plane[1].x = 0; 3565 plane_state->color_plane[1].y = 0; 3566 } 3567 3568 ret = skl_check_main_surface(plane_state); 3569 if (ret) 3570 return ret; 3571 3572 return 0; 3573 } 3574 3575 unsigned int 3576 i9xx_plane_max_stride(struct intel_plane *plane, 3577 u32 pixel_format, u64 modifier, 3578 unsigned int rotation) 3579 { 3580 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3581 3582 if (!HAS_GMCH(dev_priv)) { 3583 return 32*1024; 3584 } else if (INTEL_GEN(dev_priv) >= 4) { 3585 if (modifier == I915_FORMAT_MOD_X_TILED) 3586 return 16*1024; 3587 else 3588 return 32*1024; 3589 } else if (INTEL_GEN(dev_priv) >= 3) { 3590 if (modifier == I915_FORMAT_MOD_X_TILED) 3591 return 8*1024; 3592 else 3593 return 16*1024; 3594 } else { 3595 if (plane->i9xx_plane == PLANE_C) 3596 return 4*1024; 3597 else 3598 return 8*1024; 3599 } 3600 } 3601 3602 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 3603 { 3604 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 3605 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3606 u32 dspcntr = 0; 3607 3608 if (crtc_state->gamma_enable) 3609 dspcntr |= DISPPLANE_GAMMA_ENABLE; 3610 3611 if (crtc_state->csc_enable) 3612 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 3613 3614 if (INTEL_GEN(dev_priv) < 5) 3615 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); 3616 3617 return dspcntr; 3618 } 3619 3620 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, 3621 const struct intel_plane_state *plane_state) 3622 { 3623 struct drm_i915_private *dev_priv = 3624 to_i915(plane_state->base.plane->dev); 3625 const struct drm_framebuffer *fb = plane_state->base.fb; 3626 unsigned int rotation = plane_state->base.rotation; 3627 u32 dspcntr; 3628 3629 dspcntr = DISPLAY_PLANE_ENABLE; 3630 3631 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) || 3632 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 3633 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 3634 3635 switch (fb->format->format) { 3636 case DRM_FORMAT_C8: 3637 dspcntr |= DISPPLANE_8BPP; 3638 break; 3639 case DRM_FORMAT_XRGB1555: 3640 dspcntr |= DISPPLANE_BGRX555; 3641 break; 3642 case DRM_FORMAT_RGB565: 3643 dspcntr |= DISPPLANE_BGRX565; 3644 break; 3645 case DRM_FORMAT_XRGB8888: 3646 dspcntr |= DISPPLANE_BGRX888; 3647 break; 3648 case DRM_FORMAT_XBGR8888: 3649 dspcntr |= DISPPLANE_RGBX888; 3650 break; 3651 case DRM_FORMAT_XRGB2101010: 3652 dspcntr |= DISPPLANE_BGRX101010; 3653 break; 3654 case DRM_FORMAT_XBGR2101010: 3655 dspcntr |= DISPPLANE_RGBX101010; 3656 break; 3657 default: 3658 MISSING_CASE(fb->format->format); 3659 return 0; 3660 } 3661 3662 if (INTEL_GEN(dev_priv) >= 4 && 3663 fb->modifier == I915_FORMAT_MOD_X_TILED) 3664 dspcntr |= DISPPLANE_TILED; 3665 3666 if (rotation & DRM_MODE_ROTATE_180) 3667 dspcntr |= DISPPLANE_ROTATE_180; 3668 3669 if (rotation & DRM_MODE_REFLECT_X) 3670 dspcntr |= DISPPLANE_MIRROR; 3671 3672 return dspcntr; 3673 } 3674 3675 int i9xx_check_plane_surface(struct intel_plane_state *plane_state) 3676 { 3677 struct drm_i915_private *dev_priv = 3678 to_i915(plane_state->base.plane->dev); 3679 int src_x, src_y; 3680 u32 offset; 3681 int ret; 3682 3683 ret = intel_plane_compute_gtt(plane_state); 3684 if (ret) 3685 return ret; 3686 3687 if (!plane_state->base.visible) 3688 return 0; 3689 3690 src_x = plane_state->base.src.x1 >> 16; 3691 src_y = plane_state->base.src.y1 >> 16; 3692 3693 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 3694 3695 if (INTEL_GEN(dev_priv) >= 4) 3696 offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 3697 plane_state, 0); 3698 else 3699 offset = 0; 3700 3701 /* 3702 * Put the final coordinates back so that the src 3703 * coordinate checks will see the right values. 3704 */ 3705 drm_rect_translate(&plane_state->base.src, 3706 (src_x << 16) - plane_state->base.src.x1, 3707 (src_y << 16) - plane_state->base.src.y1); 3708 3709 /* HSW/BDW do this automagically in hardware */ 3710 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { 3711 unsigned int rotation = plane_state->base.rotation; 3712 int src_w = drm_rect_width(&plane_state->base.src) >> 16; 3713 int src_h = drm_rect_height(&plane_state->base.src) >> 16; 3714 3715 if (rotation & DRM_MODE_ROTATE_180) { 3716 src_x += src_w - 1; 3717 src_y += src_h - 1; 3718 } else if (rotation & DRM_MODE_REFLECT_X) { 3719 src_x += src_w - 1; 3720 } 3721 } 3722 3723 plane_state->color_plane[0].offset = offset; 3724 plane_state->color_plane[0].x = src_x; 3725 plane_state->color_plane[0].y = src_y; 3726 3727 return 0; 3728 } 3729 3730 static bool i9xx_plane_has_windowing(struct intel_plane *plane) 3731 { 3732 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3733 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 3734 3735 if (IS_CHERRYVIEW(dev_priv)) 3736 return i9xx_plane == PLANE_B; 3737 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 3738 return false; 3739 else if (IS_GEN(dev_priv, 4)) 3740 return i9xx_plane == PLANE_C; 3741 else 3742 return i9xx_plane == PLANE_B || 3743 i9xx_plane == PLANE_C; 3744 } 3745 3746 static int 3747 i9xx_plane_check(struct intel_crtc_state *crtc_state, 3748 struct intel_plane_state *plane_state) 3749 { 3750 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 3751 int ret; 3752 3753 ret = chv_plane_check_rotation(plane_state); 3754 if (ret) 3755 return ret; 3756 3757 ret = drm_atomic_helper_check_plane_state(&plane_state->base, 3758 &crtc_state->base, 3759 DRM_PLANE_HELPER_NO_SCALING, 3760 DRM_PLANE_HELPER_NO_SCALING, 3761 i9xx_plane_has_windowing(plane), 3762 true); 3763 if (ret) 3764 return ret; 3765 3766 ret = i9xx_check_plane_surface(plane_state); 3767 if (ret) 3768 return ret; 3769 3770 if (!plane_state->base.visible) 3771 return 0; 3772 3773 ret = intel_plane_check_src_coordinates(plane_state); 3774 if (ret) 3775 return ret; 3776 3777 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state); 3778 3779 return 0; 3780 } 3781 3782 static void i9xx_update_plane(struct intel_plane *plane, 3783 const struct intel_crtc_state *crtc_state, 3784 const struct intel_plane_state *plane_state) 3785 { 3786 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3787 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 3788 u32 linear_offset; 3789 int x = plane_state->color_plane[0].x; 3790 int y = plane_state->color_plane[0].y; 3791 int crtc_x = plane_state->base.dst.x1; 3792 int crtc_y = plane_state->base.dst.y1; 3793 int crtc_w = drm_rect_width(&plane_state->base.dst); 3794 int crtc_h = drm_rect_height(&plane_state->base.dst); 3795 unsigned long irqflags; 3796 u32 dspaddr_offset; 3797 u32 dspcntr; 3798 3799 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); 3800 3801 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 3802 3803 if (INTEL_GEN(dev_priv) >= 4) 3804 dspaddr_offset = plane_state->color_plane[0].offset; 3805 else 3806 dspaddr_offset = linear_offset; 3807 3808 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3809 3810 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride); 3811 3812 if (INTEL_GEN(dev_priv) < 4) { 3813 /* 3814 * PLANE_A doesn't actually have a full window 3815 * generator but let's assume we still need to 3816 * program whatever is there. 3817 */ 3818 I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x); 3819 I915_WRITE_FW(DSPSIZE(i9xx_plane), 3820 ((crtc_h - 1) << 16) | (crtc_w - 1)); 3821 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { 3822 I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x); 3823 I915_WRITE_FW(PRIMSIZE(i9xx_plane), 3824 ((crtc_h - 1) << 16) | (crtc_w - 1)); 3825 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0); 3826 } 3827 3828 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 3829 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x); 3830 } else if (INTEL_GEN(dev_priv) >= 4) { 3831 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset); 3832 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x); 3833 } 3834 3835 /* 3836 * The control register self-arms if the plane was previously 3837 * disabled. Try to make the plane enable atomic by writing 3838 * the control register just before the surface register. 3839 */ 3840 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr); 3841 if (INTEL_GEN(dev_priv) >= 4) 3842 I915_WRITE_FW(DSPSURF(i9xx_plane), 3843 intel_plane_ggtt_offset(plane_state) + 3844 dspaddr_offset); 3845 else 3846 I915_WRITE_FW(DSPADDR(i9xx_plane), 3847 intel_plane_ggtt_offset(plane_state) + 3848 dspaddr_offset); 3849 3850 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3851 } 3852 3853 static void i9xx_disable_plane(struct intel_plane *plane, 3854 const struct intel_crtc_state *crtc_state) 3855 { 3856 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3857 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 3858 unsigned long irqflags; 3859 u32 dspcntr; 3860 3861 /* 3862 * DSPCNTR pipe gamma enable on g4x+ and pipe csc 3863 * enable on ilk+ affect the pipe bottom color as 3864 * well, so we must configure them even if the plane 3865 * is disabled. 3866 * 3867 * On pre-g4x there is no way to gamma correct the 3868 * pipe bottom color but we'll keep on doing this 3869 * anyway so that the crtc state readout works correctly. 3870 */ 3871 dspcntr = i9xx_plane_ctl_crtc(crtc_state); 3872 3873 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3874 3875 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr); 3876 if (INTEL_GEN(dev_priv) >= 4) 3877 I915_WRITE_FW(DSPSURF(i9xx_plane), 0); 3878 else 3879 I915_WRITE_FW(DSPADDR(i9xx_plane), 0); 3880 3881 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3882 } 3883 3884 static bool i9xx_plane_get_hw_state(struct intel_plane *plane, 3885 enum pipe *pipe) 3886 { 3887 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3888 enum intel_display_power_domain power_domain; 3889 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 3890 intel_wakeref_t wakeref; 3891 bool ret; 3892 u32 val; 3893 3894 /* 3895 * Not 100% correct for planes that can move between pipes, 3896 * but that's only the case for gen2-4 which don't have any 3897 * display power wells. 3898 */ 3899 power_domain = POWER_DOMAIN_PIPE(plane->pipe); 3900 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 3901 if (!wakeref) 3902 return false; 3903 3904 val = I915_READ(DSPCNTR(i9xx_plane)); 3905 3906 ret = val & DISPLAY_PLANE_ENABLE; 3907 3908 if (INTEL_GEN(dev_priv) >= 5) 3909 *pipe = plane->pipe; 3910 else 3911 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 3912 DISPPLANE_SEL_PIPE_SHIFT; 3913 3914 intel_display_power_put(dev_priv, power_domain, wakeref); 3915 3916 return ret; 3917 } 3918 3919 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 3920 { 3921 struct drm_device *dev = intel_crtc->base.dev; 3922 struct drm_i915_private *dev_priv = to_i915(dev); 3923 3924 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); 3925 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 3926 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); 3927 } 3928 3929 /* 3930 * This function detaches (aka. unbinds) unused scalers in hardware 3931 */ 3932 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state) 3933 { 3934 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 3935 const struct intel_crtc_scaler_state *scaler_state = 3936 &crtc_state->scaler_state; 3937 int i; 3938 3939 /* loop through and disable scalers that aren't in use */ 3940 for (i = 0; i < intel_crtc->num_scalers; i++) { 3941 if (!scaler_state->scalers[i].in_use) 3942 skl_detach_scaler(intel_crtc, i); 3943 } 3944 } 3945 3946 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb, 3947 int color_plane, unsigned int rotation) 3948 { 3949 /* 3950 * The stride is either expressed as a multiple of 64 bytes chunks for 3951 * linear buffers or in number of tiles for tiled buffers. 3952 */ 3953 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) 3954 return 64; 3955 else if (drm_rotation_90_or_270(rotation)) 3956 return intel_tile_height(fb, color_plane); 3957 else 3958 return intel_tile_width_bytes(fb, color_plane); 3959 } 3960 3961 u32 skl_plane_stride(const struct intel_plane_state *plane_state, 3962 int color_plane) 3963 { 3964 const struct drm_framebuffer *fb = plane_state->base.fb; 3965 unsigned int rotation = plane_state->base.rotation; 3966 u32 stride = plane_state->color_plane[color_plane].stride; 3967 3968 if (color_plane >= fb->format->num_planes) 3969 return 0; 3970 3971 return stride / skl_plane_stride_mult(fb, color_plane, rotation); 3972 } 3973 3974 static u32 skl_plane_ctl_format(u32 pixel_format) 3975 { 3976 switch (pixel_format) { 3977 case DRM_FORMAT_C8: 3978 return PLANE_CTL_FORMAT_INDEXED; 3979 case DRM_FORMAT_RGB565: 3980 return PLANE_CTL_FORMAT_RGB_565; 3981 case DRM_FORMAT_XBGR8888: 3982 case DRM_FORMAT_ABGR8888: 3983 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; 3984 case DRM_FORMAT_XRGB8888: 3985 case DRM_FORMAT_ARGB8888: 3986 return PLANE_CTL_FORMAT_XRGB_8888; 3987 case DRM_FORMAT_XBGR2101010: 3988 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX; 3989 case DRM_FORMAT_XRGB2101010: 3990 return PLANE_CTL_FORMAT_XRGB_2101010; 3991 case DRM_FORMAT_XBGR16161616F: 3992 case DRM_FORMAT_ABGR16161616F: 3993 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX; 3994 case DRM_FORMAT_XRGB16161616F: 3995 case DRM_FORMAT_ARGB16161616F: 3996 return PLANE_CTL_FORMAT_XRGB_16161616F; 3997 case DRM_FORMAT_YUYV: 3998 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 3999 case DRM_FORMAT_YVYU: 4000 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; 4001 case DRM_FORMAT_UYVY: 4002 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; 4003 case DRM_FORMAT_VYUY: 4004 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 4005 case DRM_FORMAT_NV12: 4006 return PLANE_CTL_FORMAT_NV12; 4007 case DRM_FORMAT_P010: 4008 return PLANE_CTL_FORMAT_P010; 4009 case DRM_FORMAT_P012: 4010 return PLANE_CTL_FORMAT_P012; 4011 case DRM_FORMAT_P016: 4012 return PLANE_CTL_FORMAT_P016; 4013 case DRM_FORMAT_Y210: 4014 return PLANE_CTL_FORMAT_Y210; 4015 case DRM_FORMAT_Y212: 4016 return PLANE_CTL_FORMAT_Y212; 4017 case DRM_FORMAT_Y216: 4018 return PLANE_CTL_FORMAT_Y216; 4019 case DRM_FORMAT_XVYU2101010: 4020 return PLANE_CTL_FORMAT_Y410; 4021 case DRM_FORMAT_XVYU12_16161616: 4022 return PLANE_CTL_FORMAT_Y412; 4023 case DRM_FORMAT_XVYU16161616: 4024 return PLANE_CTL_FORMAT_Y416; 4025 default: 4026 MISSING_CASE(pixel_format); 4027 } 4028 4029 return 0; 4030 } 4031 4032 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state) 4033 { 4034 if (!plane_state->base.fb->format->has_alpha) 4035 return PLANE_CTL_ALPHA_DISABLE; 4036 4037 switch (plane_state->base.pixel_blend_mode) { 4038 case DRM_MODE_BLEND_PIXEL_NONE: 4039 return PLANE_CTL_ALPHA_DISABLE; 4040 case DRM_MODE_BLEND_PREMULTI: 4041 return PLANE_CTL_ALPHA_SW_PREMULTIPLY; 4042 case DRM_MODE_BLEND_COVERAGE: 4043 return PLANE_CTL_ALPHA_HW_PREMULTIPLY; 4044 default: 4045 MISSING_CASE(plane_state->base.pixel_blend_mode); 4046 return PLANE_CTL_ALPHA_DISABLE; 4047 } 4048 } 4049 4050 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state) 4051 { 4052 if (!plane_state->base.fb->format->has_alpha) 4053 return PLANE_COLOR_ALPHA_DISABLE; 4054 4055 switch (plane_state->base.pixel_blend_mode) { 4056 case DRM_MODE_BLEND_PIXEL_NONE: 4057 return PLANE_COLOR_ALPHA_DISABLE; 4058 case DRM_MODE_BLEND_PREMULTI: 4059 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY; 4060 case DRM_MODE_BLEND_COVERAGE: 4061 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY; 4062 default: 4063 MISSING_CASE(plane_state->base.pixel_blend_mode); 4064 return PLANE_COLOR_ALPHA_DISABLE; 4065 } 4066 } 4067 4068 static u32 skl_plane_ctl_tiling(u64 fb_modifier) 4069 { 4070 switch (fb_modifier) { 4071 case DRM_FORMAT_MOD_LINEAR: 4072 break; 4073 case I915_FORMAT_MOD_X_TILED: 4074 return PLANE_CTL_TILED_X; 4075 case I915_FORMAT_MOD_Y_TILED: 4076 return PLANE_CTL_TILED_Y; 4077 case I915_FORMAT_MOD_Y_TILED_CCS: 4078 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; 4079 case I915_FORMAT_MOD_Yf_TILED: 4080 return PLANE_CTL_TILED_YF; 4081 case I915_FORMAT_MOD_Yf_TILED_CCS: 4082 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; 4083 default: 4084 MISSING_CASE(fb_modifier); 4085 } 4086 4087 return 0; 4088 } 4089 4090 static u32 skl_plane_ctl_rotate(unsigned int rotate) 4091 { 4092 switch (rotate) { 4093 case DRM_MODE_ROTATE_0: 4094 break; 4095 /* 4096 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 4097 * while i915 HW rotation is clockwise, thats why this swapping. 4098 */ 4099 case DRM_MODE_ROTATE_90: 4100 return PLANE_CTL_ROTATE_270; 4101 case DRM_MODE_ROTATE_180: 4102 return PLANE_CTL_ROTATE_180; 4103 case DRM_MODE_ROTATE_270: 4104 return PLANE_CTL_ROTATE_90; 4105 default: 4106 MISSING_CASE(rotate); 4107 } 4108 4109 return 0; 4110 } 4111 4112 static u32 cnl_plane_ctl_flip(unsigned int reflect) 4113 { 4114 switch (reflect) { 4115 case 0: 4116 break; 4117 case DRM_MODE_REFLECT_X: 4118 return PLANE_CTL_FLIP_HORIZONTAL; 4119 case DRM_MODE_REFLECT_Y: 4120 default: 4121 MISSING_CASE(reflect); 4122 } 4123 4124 return 0; 4125 } 4126 4127 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 4128 { 4129 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 4130 u32 plane_ctl = 0; 4131 4132 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 4133 return plane_ctl; 4134 4135 if (crtc_state->gamma_enable) 4136 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE; 4137 4138 if (crtc_state->csc_enable) 4139 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE; 4140 4141 return plane_ctl; 4142 } 4143 4144 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, 4145 const struct intel_plane_state *plane_state) 4146 { 4147 struct drm_i915_private *dev_priv = 4148 to_i915(plane_state->base.plane->dev); 4149 const struct drm_framebuffer *fb = plane_state->base.fb; 4150 unsigned int rotation = plane_state->base.rotation; 4151 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 4152 u32 plane_ctl; 4153 4154 plane_ctl = PLANE_CTL_ENABLE; 4155 4156 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) { 4157 plane_ctl |= skl_plane_ctl_alpha(plane_state); 4158 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 4159 4160 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) 4161 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709; 4162 4163 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 4164 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE; 4165 } 4166 4167 plane_ctl |= skl_plane_ctl_format(fb->format->format); 4168 plane_ctl |= skl_plane_ctl_tiling(fb->modifier); 4169 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK); 4170 4171 if (INTEL_GEN(dev_priv) >= 10) 4172 plane_ctl |= cnl_plane_ctl_flip(rotation & 4173 DRM_MODE_REFLECT_MASK); 4174 4175 if (key->flags & I915_SET_COLORKEY_DESTINATION) 4176 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; 4177 else if (key->flags & I915_SET_COLORKEY_SOURCE) 4178 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; 4179 4180 return plane_ctl; 4181 } 4182 4183 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state) 4184 { 4185 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 4186 u32 plane_color_ctl = 0; 4187 4188 if (INTEL_GEN(dev_priv) >= 11) 4189 return plane_color_ctl; 4190 4191 if (crtc_state->gamma_enable) 4192 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE; 4193 4194 if (crtc_state->csc_enable) 4195 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; 4196 4197 return plane_color_ctl; 4198 } 4199 4200 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, 4201 const struct intel_plane_state *plane_state) 4202 { 4203 struct drm_i915_private *dev_priv = 4204 to_i915(plane_state->base.plane->dev); 4205 const struct drm_framebuffer *fb = plane_state->base.fb; 4206 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 4207 u32 plane_color_ctl = 0; 4208 4209 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; 4210 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state); 4211 4212 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) { 4213 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) 4214 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; 4215 else 4216 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709; 4217 4218 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 4219 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; 4220 } else if (fb->format->is_yuv) { 4221 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE; 4222 } 4223 4224 return plane_color_ctl; 4225 } 4226 4227 static int 4228 __intel_display_resume(struct drm_device *dev, 4229 struct drm_atomic_state *state, 4230 struct drm_modeset_acquire_ctx *ctx) 4231 { 4232 struct drm_crtc_state *crtc_state; 4233 struct drm_crtc *crtc; 4234 int i, ret; 4235 4236 intel_modeset_setup_hw_state(dev, ctx); 4237 intel_vga_redisable(to_i915(dev)); 4238 4239 if (!state) 4240 return 0; 4241 4242 /* 4243 * We've duplicated the state, pointers to the old state are invalid. 4244 * 4245 * Don't attempt to use the old state until we commit the duplicated state. 4246 */ 4247 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 4248 /* 4249 * Force recalculation even if we restore 4250 * current state. With fast modeset this may not result 4251 * in a modeset when the state is compatible. 4252 */ 4253 crtc_state->mode_changed = true; 4254 } 4255 4256 /* ignore any reset values/BIOS leftovers in the WM registers */ 4257 if (!HAS_GMCH(to_i915(dev))) 4258 to_intel_atomic_state(state)->skip_intermediate_wm = true; 4259 4260 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 4261 4262 WARN_ON(ret == -EDEADLK); 4263 return ret; 4264 } 4265 4266 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) 4267 { 4268 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display && 4269 intel_has_gpu_reset(&dev_priv->gt)); 4270 } 4271 4272 void intel_prepare_reset(struct drm_i915_private *dev_priv) 4273 { 4274 struct drm_device *dev = &dev_priv->drm; 4275 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 4276 struct drm_atomic_state *state; 4277 int ret; 4278 4279 /* reset doesn't touch the display */ 4280 if (!i915_modparams.force_reset_modeset_test && 4281 !gpu_reset_clobbers_display(dev_priv)) 4282 return; 4283 4284 /* We have a modeset vs reset deadlock, defensively unbreak it. */ 4285 set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 4286 smp_mb__after_atomic(); 4287 wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET); 4288 4289 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) { 4290 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n"); 4291 intel_gt_set_wedged(&dev_priv->gt); 4292 } 4293 4294 /* 4295 * Need mode_config.mutex so that we don't 4296 * trample ongoing ->detect() and whatnot. 4297 */ 4298 mutex_lock(&dev->mode_config.mutex); 4299 drm_modeset_acquire_init(ctx, 0); 4300 while (1) { 4301 ret = drm_modeset_lock_all_ctx(dev, ctx); 4302 if (ret != -EDEADLK) 4303 break; 4304 4305 drm_modeset_backoff(ctx); 4306 } 4307 /* 4308 * Disabling the crtcs gracefully seems nicer. Also the 4309 * g33 docs say we should at least disable all the planes. 4310 */ 4311 state = drm_atomic_helper_duplicate_state(dev, ctx); 4312 if (IS_ERR(state)) { 4313 ret = PTR_ERR(state); 4314 DRM_ERROR("Duplicating state failed with %i\n", ret); 4315 return; 4316 } 4317 4318 ret = drm_atomic_helper_disable_all(dev, ctx); 4319 if (ret) { 4320 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 4321 drm_atomic_state_put(state); 4322 return; 4323 } 4324 4325 dev_priv->modeset_restore_state = state; 4326 state->acquire_ctx = ctx; 4327 } 4328 4329 void intel_finish_reset(struct drm_i915_private *dev_priv) 4330 { 4331 struct drm_device *dev = &dev_priv->drm; 4332 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 4333 struct drm_atomic_state *state; 4334 int ret; 4335 4336 /* reset doesn't touch the display */ 4337 if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) 4338 return; 4339 4340 state = fetch_and_zero(&dev_priv->modeset_restore_state); 4341 if (!state) 4342 goto unlock; 4343 4344 /* reset doesn't touch the display */ 4345 if (!gpu_reset_clobbers_display(dev_priv)) { 4346 /* for testing only restore the display */ 4347 ret = __intel_display_resume(dev, state, ctx); 4348 if (ret) 4349 DRM_ERROR("Restoring old state failed with %i\n", ret); 4350 } else { 4351 /* 4352 * The display has been reset as well, 4353 * so need a full re-initialization. 4354 */ 4355 intel_pps_unlock_regs_wa(dev_priv); 4356 intel_modeset_init_hw(dev_priv); 4357 intel_init_clock_gating(dev_priv); 4358 4359 spin_lock_irq(&dev_priv->irq_lock); 4360 if (dev_priv->display.hpd_irq_setup) 4361 dev_priv->display.hpd_irq_setup(dev_priv); 4362 spin_unlock_irq(&dev_priv->irq_lock); 4363 4364 ret = __intel_display_resume(dev, state, ctx); 4365 if (ret) 4366 DRM_ERROR("Restoring old state failed with %i\n", ret); 4367 4368 intel_hpd_init(dev_priv); 4369 } 4370 4371 drm_atomic_state_put(state); 4372 unlock: 4373 drm_modeset_drop_locks(ctx); 4374 drm_modeset_acquire_fini(ctx); 4375 mutex_unlock(&dev->mode_config.mutex); 4376 4377 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 4378 } 4379 4380 static void icl_set_pipe_chicken(struct intel_crtc *crtc) 4381 { 4382 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4383 enum pipe pipe = crtc->pipe; 4384 u32 tmp; 4385 4386 tmp = I915_READ(PIPE_CHICKEN(pipe)); 4387 4388 /* 4389 * Display WA #1153: icl 4390 * enable hardware to bypass the alpha math 4391 * and rounding for per-pixel values 00 and 0xff 4392 */ 4393 tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 4394 /* 4395 * Display WA # 1605353570: icl 4396 * Set the pixel rounding bit to 1 for allowing 4397 * passthrough of Frame buffer pixels unmodified 4398 * across pipe 4399 */ 4400 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 4401 I915_WRITE(PIPE_CHICKEN(pipe), tmp); 4402 } 4403 4404 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state, 4405 const struct intel_crtc_state *new_crtc_state) 4406 { 4407 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 4408 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4409 4410 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ 4411 crtc->base.mode = new_crtc_state->base.mode; 4412 4413 /* 4414 * Update pipe size and adjust fitter if needed: the reason for this is 4415 * that in compute_mode_changes we check the native mode (not the pfit 4416 * mode) to see if we can flip rather than do a full mode set. In the 4417 * fastboot case, we'll flip, but if we don't update the pipesrc and 4418 * pfit state, we'll end up with a big fb scanned out into the wrong 4419 * sized surface. 4420 */ 4421 4422 I915_WRITE(PIPESRC(crtc->pipe), 4423 ((new_crtc_state->pipe_src_w - 1) << 16) | 4424 (new_crtc_state->pipe_src_h - 1)); 4425 4426 /* on skylake this is done by detaching scalers */ 4427 if (INTEL_GEN(dev_priv) >= 9) { 4428 skl_detach_scalers(new_crtc_state); 4429 4430 if (new_crtc_state->pch_pfit.enabled) 4431 skylake_pfit_enable(new_crtc_state); 4432 } else if (HAS_PCH_SPLIT(dev_priv)) { 4433 if (new_crtc_state->pch_pfit.enabled) 4434 ironlake_pfit_enable(new_crtc_state); 4435 else if (old_crtc_state->pch_pfit.enabled) 4436 ironlake_pfit_disable(old_crtc_state); 4437 } 4438 4439 if (INTEL_GEN(dev_priv) >= 11) 4440 icl_set_pipe_chicken(crtc); 4441 } 4442 4443 static void intel_fdi_normal_train(struct intel_crtc *crtc) 4444 { 4445 struct drm_device *dev = crtc->base.dev; 4446 struct drm_i915_private *dev_priv = to_i915(dev); 4447 enum pipe pipe = crtc->pipe; 4448 i915_reg_t reg; 4449 u32 temp; 4450 4451 /* enable normal train */ 4452 reg = FDI_TX_CTL(pipe); 4453 temp = I915_READ(reg); 4454 if (IS_IVYBRIDGE(dev_priv)) { 4455 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 4456 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 4457 } else { 4458 temp &= ~FDI_LINK_TRAIN_NONE; 4459 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 4460 } 4461 I915_WRITE(reg, temp); 4462 4463 reg = FDI_RX_CTL(pipe); 4464 temp = I915_READ(reg); 4465 if (HAS_PCH_CPT(dev_priv)) { 4466 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4467 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 4468 } else { 4469 temp &= ~FDI_LINK_TRAIN_NONE; 4470 temp |= FDI_LINK_TRAIN_NONE; 4471 } 4472 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 4473 4474 /* wait one idle pattern time */ 4475 POSTING_READ(reg); 4476 udelay(1000); 4477 4478 /* IVB wants error correction enabled */ 4479 if (IS_IVYBRIDGE(dev_priv)) 4480 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 4481 FDI_FE_ERRC_ENABLE); 4482 } 4483 4484 /* The FDI link training functions for ILK/Ibexpeak. */ 4485 static void ironlake_fdi_link_train(struct intel_crtc *crtc, 4486 const struct intel_crtc_state *crtc_state) 4487 { 4488 struct drm_device *dev = crtc->base.dev; 4489 struct drm_i915_private *dev_priv = to_i915(dev); 4490 enum pipe pipe = crtc->pipe; 4491 i915_reg_t reg; 4492 u32 temp, tries; 4493 4494 /* FDI needs bits from pipe first */ 4495 assert_pipe_enabled(dev_priv, pipe); 4496 4497 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 4498 for train result */ 4499 reg = FDI_RX_IMR(pipe); 4500 temp = I915_READ(reg); 4501 temp &= ~FDI_RX_SYMBOL_LOCK; 4502 temp &= ~FDI_RX_BIT_LOCK; 4503 I915_WRITE(reg, temp); 4504 I915_READ(reg); 4505 udelay(150); 4506 4507 /* enable CPU FDI TX and PCH FDI RX */ 4508 reg = FDI_TX_CTL(pipe); 4509 temp = I915_READ(reg); 4510 temp &= ~FDI_DP_PORT_WIDTH_MASK; 4511 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 4512 temp &= ~FDI_LINK_TRAIN_NONE; 4513 temp |= FDI_LINK_TRAIN_PATTERN_1; 4514 I915_WRITE(reg, temp | FDI_TX_ENABLE); 4515 4516 reg = FDI_RX_CTL(pipe); 4517 temp = I915_READ(reg); 4518 temp &= ~FDI_LINK_TRAIN_NONE; 4519 temp |= FDI_LINK_TRAIN_PATTERN_1; 4520 I915_WRITE(reg, temp | FDI_RX_ENABLE); 4521 4522 POSTING_READ(reg); 4523 udelay(150); 4524 4525 /* Ironlake workaround, enable clock pointer after FDI enable*/ 4526 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 4527 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 4528 FDI_RX_PHASE_SYNC_POINTER_EN); 4529 4530 reg = FDI_RX_IIR(pipe); 4531 for (tries = 0; tries < 5; tries++) { 4532 temp = I915_READ(reg); 4533 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4534 4535 if ((temp & FDI_RX_BIT_LOCK)) { 4536 DRM_DEBUG_KMS("FDI train 1 done.\n"); 4537 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 4538 break; 4539 } 4540 } 4541 if (tries == 5) 4542 DRM_ERROR("FDI train 1 fail!\n"); 4543 4544 /* Train 2 */ 4545 reg = FDI_TX_CTL(pipe); 4546 temp = I915_READ(reg); 4547 temp &= ~FDI_LINK_TRAIN_NONE; 4548 temp |= FDI_LINK_TRAIN_PATTERN_2; 4549 I915_WRITE(reg, temp); 4550 4551 reg = FDI_RX_CTL(pipe); 4552 temp = I915_READ(reg); 4553 temp &= ~FDI_LINK_TRAIN_NONE; 4554 temp |= FDI_LINK_TRAIN_PATTERN_2; 4555 I915_WRITE(reg, temp); 4556 4557 POSTING_READ(reg); 4558 udelay(150); 4559 4560 reg = FDI_RX_IIR(pipe); 4561 for (tries = 0; tries < 5; tries++) { 4562 temp = I915_READ(reg); 4563 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4564 4565 if (temp & FDI_RX_SYMBOL_LOCK) { 4566 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 4567 DRM_DEBUG_KMS("FDI train 2 done.\n"); 4568 break; 4569 } 4570 } 4571 if (tries == 5) 4572 DRM_ERROR("FDI train 2 fail!\n"); 4573 4574 DRM_DEBUG_KMS("FDI train done\n"); 4575 4576 } 4577 4578 static const int snb_b_fdi_train_param[] = { 4579 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 4580 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 4581 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 4582 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 4583 }; 4584 4585 /* The FDI link training functions for SNB/Cougarpoint. */ 4586 static void gen6_fdi_link_train(struct intel_crtc *crtc, 4587 const struct intel_crtc_state *crtc_state) 4588 { 4589 struct drm_device *dev = crtc->base.dev; 4590 struct drm_i915_private *dev_priv = to_i915(dev); 4591 enum pipe pipe = crtc->pipe; 4592 i915_reg_t reg; 4593 u32 temp, i, retry; 4594 4595 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 4596 for train result */ 4597 reg = FDI_RX_IMR(pipe); 4598 temp = I915_READ(reg); 4599 temp &= ~FDI_RX_SYMBOL_LOCK; 4600 temp &= ~FDI_RX_BIT_LOCK; 4601 I915_WRITE(reg, temp); 4602 4603 POSTING_READ(reg); 4604 udelay(150); 4605 4606 /* enable CPU FDI TX and PCH FDI RX */ 4607 reg = FDI_TX_CTL(pipe); 4608 temp = I915_READ(reg); 4609 temp &= ~FDI_DP_PORT_WIDTH_MASK; 4610 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 4611 temp &= ~FDI_LINK_TRAIN_NONE; 4612 temp |= FDI_LINK_TRAIN_PATTERN_1; 4613 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4614 /* SNB-B */ 4615 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 4616 I915_WRITE(reg, temp | FDI_TX_ENABLE); 4617 4618 I915_WRITE(FDI_RX_MISC(pipe), 4619 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 4620 4621 reg = FDI_RX_CTL(pipe); 4622 temp = I915_READ(reg); 4623 if (HAS_PCH_CPT(dev_priv)) { 4624 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4625 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 4626 } else { 4627 temp &= ~FDI_LINK_TRAIN_NONE; 4628 temp |= FDI_LINK_TRAIN_PATTERN_1; 4629 } 4630 I915_WRITE(reg, temp | FDI_RX_ENABLE); 4631 4632 POSTING_READ(reg); 4633 udelay(150); 4634 4635 for (i = 0; i < 4; i++) { 4636 reg = FDI_TX_CTL(pipe); 4637 temp = I915_READ(reg); 4638 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4639 temp |= snb_b_fdi_train_param[i]; 4640 I915_WRITE(reg, temp); 4641 4642 POSTING_READ(reg); 4643 udelay(500); 4644 4645 for (retry = 0; retry < 5; retry++) { 4646 reg = FDI_RX_IIR(pipe); 4647 temp = I915_READ(reg); 4648 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4649 if (temp & FDI_RX_BIT_LOCK) { 4650 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 4651 DRM_DEBUG_KMS("FDI train 1 done.\n"); 4652 break; 4653 } 4654 udelay(50); 4655 } 4656 if (retry < 5) 4657 break; 4658 } 4659 if (i == 4) 4660 DRM_ERROR("FDI train 1 fail!\n"); 4661 4662 /* Train 2 */ 4663 reg = FDI_TX_CTL(pipe); 4664 temp = I915_READ(reg); 4665 temp &= ~FDI_LINK_TRAIN_NONE; 4666 temp |= FDI_LINK_TRAIN_PATTERN_2; 4667 if (IS_GEN(dev_priv, 6)) { 4668 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4669 /* SNB-B */ 4670 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 4671 } 4672 I915_WRITE(reg, temp); 4673 4674 reg = FDI_RX_CTL(pipe); 4675 temp = I915_READ(reg); 4676 if (HAS_PCH_CPT(dev_priv)) { 4677 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4678 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 4679 } else { 4680 temp &= ~FDI_LINK_TRAIN_NONE; 4681 temp |= FDI_LINK_TRAIN_PATTERN_2; 4682 } 4683 I915_WRITE(reg, temp); 4684 4685 POSTING_READ(reg); 4686 udelay(150); 4687 4688 for (i = 0; i < 4; i++) { 4689 reg = FDI_TX_CTL(pipe); 4690 temp = I915_READ(reg); 4691 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4692 temp |= snb_b_fdi_train_param[i]; 4693 I915_WRITE(reg, temp); 4694 4695 POSTING_READ(reg); 4696 udelay(500); 4697 4698 for (retry = 0; retry < 5; retry++) { 4699 reg = FDI_RX_IIR(pipe); 4700 temp = I915_READ(reg); 4701 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4702 if (temp & FDI_RX_SYMBOL_LOCK) { 4703 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 4704 DRM_DEBUG_KMS("FDI train 2 done.\n"); 4705 break; 4706 } 4707 udelay(50); 4708 } 4709 if (retry < 5) 4710 break; 4711 } 4712 if (i == 4) 4713 DRM_ERROR("FDI train 2 fail!\n"); 4714 4715 DRM_DEBUG_KMS("FDI train done.\n"); 4716 } 4717 4718 /* Manual link training for Ivy Bridge A0 parts */ 4719 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, 4720 const struct intel_crtc_state *crtc_state) 4721 { 4722 struct drm_device *dev = crtc->base.dev; 4723 struct drm_i915_private *dev_priv = to_i915(dev); 4724 enum pipe pipe = crtc->pipe; 4725 i915_reg_t reg; 4726 u32 temp, i, j; 4727 4728 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 4729 for train result */ 4730 reg = FDI_RX_IMR(pipe); 4731 temp = I915_READ(reg); 4732 temp &= ~FDI_RX_SYMBOL_LOCK; 4733 temp &= ~FDI_RX_BIT_LOCK; 4734 I915_WRITE(reg, temp); 4735 4736 POSTING_READ(reg); 4737 udelay(150); 4738 4739 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 4740 I915_READ(FDI_RX_IIR(pipe))); 4741 4742 /* Try each vswing and preemphasis setting twice before moving on */ 4743 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 4744 /* disable first in case we need to retry */ 4745 reg = FDI_TX_CTL(pipe); 4746 temp = I915_READ(reg); 4747 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 4748 temp &= ~FDI_TX_ENABLE; 4749 I915_WRITE(reg, temp); 4750 4751 reg = FDI_RX_CTL(pipe); 4752 temp = I915_READ(reg); 4753 temp &= ~FDI_LINK_TRAIN_AUTO; 4754 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4755 temp &= ~FDI_RX_ENABLE; 4756 I915_WRITE(reg, temp); 4757 4758 /* enable CPU FDI TX and PCH FDI RX */ 4759 reg = FDI_TX_CTL(pipe); 4760 temp = I915_READ(reg); 4761 temp &= ~FDI_DP_PORT_WIDTH_MASK; 4762 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 4763 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 4764 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4765 temp |= snb_b_fdi_train_param[j/2]; 4766 temp |= FDI_COMPOSITE_SYNC; 4767 I915_WRITE(reg, temp | FDI_TX_ENABLE); 4768 4769 I915_WRITE(FDI_RX_MISC(pipe), 4770 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 4771 4772 reg = FDI_RX_CTL(pipe); 4773 temp = I915_READ(reg); 4774 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 4775 temp |= FDI_COMPOSITE_SYNC; 4776 I915_WRITE(reg, temp | FDI_RX_ENABLE); 4777 4778 POSTING_READ(reg); 4779 udelay(1); /* should be 0.5us */ 4780 4781 for (i = 0; i < 4; i++) { 4782 reg = FDI_RX_IIR(pipe); 4783 temp = I915_READ(reg); 4784 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4785 4786 if (temp & FDI_RX_BIT_LOCK || 4787 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 4788 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 4789 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", 4790 i); 4791 break; 4792 } 4793 udelay(1); /* should be 0.5us */ 4794 } 4795 if (i == 4) { 4796 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); 4797 continue; 4798 } 4799 4800 /* Train 2 */ 4801 reg = FDI_TX_CTL(pipe); 4802 temp = I915_READ(reg); 4803 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 4804 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 4805 I915_WRITE(reg, temp); 4806 4807 reg = FDI_RX_CTL(pipe); 4808 temp = I915_READ(reg); 4809 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4810 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 4811 I915_WRITE(reg, temp); 4812 4813 POSTING_READ(reg); 4814 udelay(2); /* should be 1.5us */ 4815 4816 for (i = 0; i < 4; i++) { 4817 reg = FDI_RX_IIR(pipe); 4818 temp = I915_READ(reg); 4819 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4820 4821 if (temp & FDI_RX_SYMBOL_LOCK || 4822 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { 4823 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 4824 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", 4825 i); 4826 goto train_done; 4827 } 4828 udelay(2); /* should be 1.5us */ 4829 } 4830 if (i == 4) 4831 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); 4832 } 4833 4834 train_done: 4835 DRM_DEBUG_KMS("FDI train done.\n"); 4836 } 4837 4838 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state) 4839 { 4840 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 4841 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 4842 enum pipe pipe = intel_crtc->pipe; 4843 i915_reg_t reg; 4844 u32 temp; 4845 4846 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 4847 reg = FDI_RX_CTL(pipe); 4848 temp = I915_READ(reg); 4849 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 4850 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 4851 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4852 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 4853 4854 POSTING_READ(reg); 4855 udelay(200); 4856 4857 /* Switch from Rawclk to PCDclk */ 4858 temp = I915_READ(reg); 4859 I915_WRITE(reg, temp | FDI_PCDCLK); 4860 4861 POSTING_READ(reg); 4862 udelay(200); 4863 4864 /* Enable CPU FDI TX PLL, always on for Ironlake */ 4865 reg = FDI_TX_CTL(pipe); 4866 temp = I915_READ(reg); 4867 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 4868 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 4869 4870 POSTING_READ(reg); 4871 udelay(100); 4872 } 4873 } 4874 4875 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 4876 { 4877 struct drm_device *dev = intel_crtc->base.dev; 4878 struct drm_i915_private *dev_priv = to_i915(dev); 4879 enum pipe pipe = intel_crtc->pipe; 4880 i915_reg_t reg; 4881 u32 temp; 4882 4883 /* Switch from PCDclk to Rawclk */ 4884 reg = FDI_RX_CTL(pipe); 4885 temp = I915_READ(reg); 4886 I915_WRITE(reg, temp & ~FDI_PCDCLK); 4887 4888 /* Disable CPU FDI TX PLL */ 4889 reg = FDI_TX_CTL(pipe); 4890 temp = I915_READ(reg); 4891 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 4892 4893 POSTING_READ(reg); 4894 udelay(100); 4895 4896 reg = FDI_RX_CTL(pipe); 4897 temp = I915_READ(reg); 4898 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 4899 4900 /* Wait for the clocks to turn off. */ 4901 POSTING_READ(reg); 4902 udelay(100); 4903 } 4904 4905 static void ironlake_fdi_disable(struct drm_crtc *crtc) 4906 { 4907 struct drm_device *dev = crtc->dev; 4908 struct drm_i915_private *dev_priv = to_i915(dev); 4909 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4910 enum pipe pipe = intel_crtc->pipe; 4911 i915_reg_t reg; 4912 u32 temp; 4913 4914 /* disable CPU FDI tx and PCH FDI rx */ 4915 reg = FDI_TX_CTL(pipe); 4916 temp = I915_READ(reg); 4917 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 4918 POSTING_READ(reg); 4919 4920 reg = FDI_RX_CTL(pipe); 4921 temp = I915_READ(reg); 4922 temp &= ~(0x7 << 16); 4923 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4924 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 4925 4926 POSTING_READ(reg); 4927 udelay(100); 4928 4929 /* Ironlake workaround, disable clock pointer after downing FDI */ 4930 if (HAS_PCH_IBX(dev_priv)) 4931 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 4932 4933 /* still set train pattern 1 */ 4934 reg = FDI_TX_CTL(pipe); 4935 temp = I915_READ(reg); 4936 temp &= ~FDI_LINK_TRAIN_NONE; 4937 temp |= FDI_LINK_TRAIN_PATTERN_1; 4938 I915_WRITE(reg, temp); 4939 4940 reg = FDI_RX_CTL(pipe); 4941 temp = I915_READ(reg); 4942 if (HAS_PCH_CPT(dev_priv)) { 4943 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4944 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 4945 } else { 4946 temp &= ~FDI_LINK_TRAIN_NONE; 4947 temp |= FDI_LINK_TRAIN_PATTERN_1; 4948 } 4949 /* BPC in FDI rx is consistent with that in PIPECONF */ 4950 temp &= ~(0x07 << 16); 4951 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4952 I915_WRITE(reg, temp); 4953 4954 POSTING_READ(reg); 4955 udelay(100); 4956 } 4957 4958 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 4959 { 4960 struct drm_crtc *crtc; 4961 bool cleanup_done; 4962 4963 drm_for_each_crtc(crtc, &dev_priv->drm) { 4964 struct drm_crtc_commit *commit; 4965 spin_lock(&crtc->commit_lock); 4966 commit = list_first_entry_or_null(&crtc->commit_list, 4967 struct drm_crtc_commit, commit_entry); 4968 cleanup_done = commit ? 4969 try_wait_for_completion(&commit->cleanup_done) : true; 4970 spin_unlock(&crtc->commit_lock); 4971 4972 if (cleanup_done) 4973 continue; 4974 4975 drm_crtc_wait_one_vblank(crtc); 4976 4977 return true; 4978 } 4979 4980 return false; 4981 } 4982 4983 void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 4984 { 4985 u32 temp; 4986 4987 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 4988 4989 mutex_lock(&dev_priv->sb_lock); 4990 4991 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4992 temp |= SBI_SSCCTL_DISABLE; 4993 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 4994 4995 mutex_unlock(&dev_priv->sb_lock); 4996 } 4997 4998 /* Program iCLKIP clock to the desired frequency */ 4999 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) 5000 { 5001 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5002 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5003 int clock = crtc_state->base.adjusted_mode.crtc_clock; 5004 u32 divsel, phaseinc, auxdiv, phasedir = 0; 5005 u32 temp; 5006 5007 lpt_disable_iclkip(dev_priv); 5008 5009 /* The iCLK virtual clock root frequency is in MHz, 5010 * but the adjusted_mode->crtc_clock in in KHz. To get the 5011 * divisors, it is necessary to divide one by another, so we 5012 * convert the virtual clock precision to KHz here for higher 5013 * precision. 5014 */ 5015 for (auxdiv = 0; auxdiv < 2; auxdiv++) { 5016 u32 iclk_virtual_root_freq = 172800 * 1000; 5017 u32 iclk_pi_range = 64; 5018 u32 desired_divisor; 5019 5020 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 5021 clock << auxdiv); 5022 divsel = (desired_divisor / iclk_pi_range) - 2; 5023 phaseinc = desired_divisor % iclk_pi_range; 5024 5025 /* 5026 * Near 20MHz is a corner case which is 5027 * out of range for the 7-bit divisor 5028 */ 5029 if (divsel <= 0x7f) 5030 break; 5031 } 5032 5033 /* This should not happen with any sane values */ 5034 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 5035 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 5036 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 5037 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 5038 5039 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 5040 clock, 5041 auxdiv, 5042 divsel, 5043 phasedir, 5044 phaseinc); 5045 5046 mutex_lock(&dev_priv->sb_lock); 5047 5048 /* Program SSCDIVINTPHASE6 */ 5049 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 5050 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 5051 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 5052 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 5053 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 5054 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 5055 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 5056 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 5057 5058 /* Program SSCAUXDIV */ 5059 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 5060 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 5061 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 5062 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 5063 5064 /* Enable modulator and associated divider */ 5065 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5066 temp &= ~SBI_SSCCTL_DISABLE; 5067 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 5068 5069 mutex_unlock(&dev_priv->sb_lock); 5070 5071 /* Wait for initialization time */ 5072 udelay(24); 5073 5074 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 5075 } 5076 5077 int lpt_get_iclkip(struct drm_i915_private *dev_priv) 5078 { 5079 u32 divsel, phaseinc, auxdiv; 5080 u32 iclk_virtual_root_freq = 172800 * 1000; 5081 u32 iclk_pi_range = 64; 5082 u32 desired_divisor; 5083 u32 temp; 5084 5085 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) 5086 return 0; 5087 5088 mutex_lock(&dev_priv->sb_lock); 5089 5090 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5091 if (temp & SBI_SSCCTL_DISABLE) { 5092 mutex_unlock(&dev_priv->sb_lock); 5093 return 0; 5094 } 5095 5096 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 5097 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> 5098 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; 5099 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> 5100 SBI_SSCDIVINTPHASE_INCVAL_SHIFT; 5101 5102 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 5103 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> 5104 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; 5105 5106 mutex_unlock(&dev_priv->sb_lock); 5107 5108 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; 5109 5110 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 5111 desired_divisor << auxdiv); 5112 } 5113 5114 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, 5115 enum pipe pch_transcoder) 5116 { 5117 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5118 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5119 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 5120 5121 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 5122 I915_READ(HTOTAL(cpu_transcoder))); 5123 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), 5124 I915_READ(HBLANK(cpu_transcoder))); 5125 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), 5126 I915_READ(HSYNC(cpu_transcoder))); 5127 5128 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), 5129 I915_READ(VTOTAL(cpu_transcoder))); 5130 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), 5131 I915_READ(VBLANK(cpu_transcoder))); 5132 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), 5133 I915_READ(VSYNC(cpu_transcoder))); 5134 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), 5135 I915_READ(VSYNCSHIFT(cpu_transcoder))); 5136 } 5137 5138 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable) 5139 { 5140 u32 temp; 5141 5142 temp = I915_READ(SOUTH_CHICKEN1); 5143 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 5144 return; 5145 5146 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 5147 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 5148 5149 temp &= ~FDI_BC_BIFURCATION_SELECT; 5150 if (enable) 5151 temp |= FDI_BC_BIFURCATION_SELECT; 5152 5153 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); 5154 I915_WRITE(SOUTH_CHICKEN1, temp); 5155 POSTING_READ(SOUTH_CHICKEN1); 5156 } 5157 5158 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) 5159 { 5160 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5161 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5162 5163 switch (crtc->pipe) { 5164 case PIPE_A: 5165 break; 5166 case PIPE_B: 5167 if (crtc_state->fdi_lanes > 2) 5168 cpt_set_fdi_bc_bifurcation(dev_priv, false); 5169 else 5170 cpt_set_fdi_bc_bifurcation(dev_priv, true); 5171 5172 break; 5173 case PIPE_C: 5174 cpt_set_fdi_bc_bifurcation(dev_priv, true); 5175 5176 break; 5177 default: 5178 BUG(); 5179 } 5180 } 5181 5182 /* 5183 * Finds the encoder associated with the given CRTC. This can only be 5184 * used when we know that the CRTC isn't feeding multiple encoders! 5185 */ 5186 static struct intel_encoder * 5187 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 5188 const struct intel_crtc_state *crtc_state) 5189 { 5190 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5191 const struct drm_connector_state *connector_state; 5192 const struct drm_connector *connector; 5193 struct intel_encoder *encoder = NULL; 5194 int num_encoders = 0; 5195 int i; 5196 5197 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 5198 if (connector_state->crtc != &crtc->base) 5199 continue; 5200 5201 encoder = to_intel_encoder(connector_state->best_encoder); 5202 num_encoders++; 5203 } 5204 5205 WARN(num_encoders != 1, "%d encoders for pipe %c\n", 5206 num_encoders, pipe_name(crtc->pipe)); 5207 5208 return encoder; 5209 } 5210 5211 /* 5212 * Enable PCH resources required for PCH ports: 5213 * - PCH PLLs 5214 * - FDI training & RX/TX 5215 * - update transcoder timings 5216 * - DP transcoding bits 5217 * - transcoder 5218 */ 5219 static void ironlake_pch_enable(const struct intel_atomic_state *state, 5220 const struct intel_crtc_state *crtc_state) 5221 { 5222 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5223 struct drm_device *dev = crtc->base.dev; 5224 struct drm_i915_private *dev_priv = to_i915(dev); 5225 enum pipe pipe = crtc->pipe; 5226 u32 temp; 5227 5228 assert_pch_transcoder_disabled(dev_priv, pipe); 5229 5230 if (IS_IVYBRIDGE(dev_priv)) 5231 ivybridge_update_fdi_bc_bifurcation(crtc_state); 5232 5233 /* Write the TU size bits before fdi link training, so that error 5234 * detection works. */ 5235 I915_WRITE(FDI_RX_TUSIZE1(pipe), 5236 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 5237 5238 /* For PCH output, training FDI link */ 5239 dev_priv->display.fdi_link_train(crtc, crtc_state); 5240 5241 /* We need to program the right clock selection before writing the pixel 5242 * mutliplier into the DPLL. */ 5243 if (HAS_PCH_CPT(dev_priv)) { 5244 u32 sel; 5245 5246 temp = I915_READ(PCH_DPLL_SEL); 5247 temp |= TRANS_DPLL_ENABLE(pipe); 5248 sel = TRANS_DPLLB_SEL(pipe); 5249 if (crtc_state->shared_dpll == 5250 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) 5251 temp |= sel; 5252 else 5253 temp &= ~sel; 5254 I915_WRITE(PCH_DPLL_SEL, temp); 5255 } 5256 5257 /* XXX: pch pll's can be enabled any time before we enable the PCH 5258 * transcoder, and we actually should do this to not upset any PCH 5259 * transcoder that already use the clock when we share it. 5260 * 5261 * Note that enable_shared_dpll tries to do the right thing, but 5262 * get_shared_dpll unconditionally resets the pll - we need that to have 5263 * the right LVDS enable sequence. */ 5264 intel_enable_shared_dpll(crtc_state); 5265 5266 /* set transcoder timing, panel must allow it */ 5267 assert_panel_unlocked(dev_priv, pipe); 5268 ironlake_pch_transcoder_set_timings(crtc_state, pipe); 5269 5270 intel_fdi_normal_train(crtc); 5271 5272 /* For PCH DP, enable TRANS_DP_CTL */ 5273 if (HAS_PCH_CPT(dev_priv) && 5274 intel_crtc_has_dp_encoder(crtc_state)) { 5275 const struct drm_display_mode *adjusted_mode = 5276 &crtc_state->base.adjusted_mode; 5277 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 5278 i915_reg_t reg = TRANS_DP_CTL(pipe); 5279 enum port port; 5280 5281 temp = I915_READ(reg); 5282 temp &= ~(TRANS_DP_PORT_SEL_MASK | 5283 TRANS_DP_SYNC_MASK | 5284 TRANS_DP_BPC_MASK); 5285 temp |= TRANS_DP_OUTPUT_ENABLE; 5286 temp |= bpc << 9; /* same format but at 11:9 */ 5287 5288 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 5289 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 5290 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 5291 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 5292 5293 port = intel_get_crtc_new_encoder(state, crtc_state)->port; 5294 WARN_ON(port < PORT_B || port > PORT_D); 5295 temp |= TRANS_DP_PORT_SEL(port); 5296 5297 I915_WRITE(reg, temp); 5298 } 5299 5300 ironlake_enable_pch_transcoder(crtc_state); 5301 } 5302 5303 static void lpt_pch_enable(const struct intel_atomic_state *state, 5304 const struct intel_crtc_state *crtc_state) 5305 { 5306 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5307 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5308 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 5309 5310 assert_pch_transcoder_disabled(dev_priv, PIPE_A); 5311 5312 lpt_program_iclkip(crtc_state); 5313 5314 /* Set transcoder timing. */ 5315 ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A); 5316 5317 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 5318 } 5319 5320 static void cpt_verify_modeset(struct drm_device *dev, enum pipe pipe) 5321 { 5322 struct drm_i915_private *dev_priv = to_i915(dev); 5323 i915_reg_t dslreg = PIPEDSL(pipe); 5324 u32 temp; 5325 5326 temp = I915_READ(dslreg); 5327 udelay(500); 5328 if (wait_for(I915_READ(dslreg) != temp, 5)) { 5329 if (wait_for(I915_READ(dslreg) != temp, 5)) 5330 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); 5331 } 5332 } 5333 5334 /* 5335 * The hardware phase 0.0 refers to the center of the pixel. 5336 * We want to start from the top/left edge which is phase 5337 * -0.5. That matches how the hardware calculates the scaling 5338 * factors (from top-left of the first pixel to bottom-right 5339 * of the last pixel, as opposed to the pixel centers). 5340 * 5341 * For 4:2:0 subsampled chroma planes we obviously have to 5342 * adjust that so that the chroma sample position lands in 5343 * the right spot. 5344 * 5345 * Note that for packed YCbCr 4:2:2 formats there is no way to 5346 * control chroma siting. The hardware simply replicates the 5347 * chroma samples for both of the luma samples, and thus we don't 5348 * actually get the expected MPEG2 chroma siting convention :( 5349 * The same behaviour is observed on pre-SKL platforms as well. 5350 * 5351 * Theory behind the formula (note that we ignore sub-pixel 5352 * source coordinates): 5353 * s = source sample position 5354 * d = destination sample position 5355 * 5356 * Downscaling 4:1: 5357 * -0.5 5358 * | 0.0 5359 * | | 1.5 (initial phase) 5360 * | | | 5361 * v v v 5362 * | s | s | s | s | 5363 * | d | 5364 * 5365 * Upscaling 1:4: 5366 * -0.5 5367 * | -0.375 (initial phase) 5368 * | | 0.0 5369 * | | | 5370 * v v v 5371 * | s | 5372 * | d | d | d | d | 5373 */ 5374 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited) 5375 { 5376 int phase = -0x8000; 5377 u16 trip = 0; 5378 5379 if (chroma_cosited) 5380 phase += (sub - 1) * 0x8000 / sub; 5381 5382 phase += scale / (2 * sub); 5383 5384 /* 5385 * Hardware initial phase limited to [-0.5:1.5]. 5386 * Since the max hardware scale factor is 3.0, we 5387 * should never actually excdeed 1.0 here. 5388 */ 5389 WARN_ON(phase < -0x8000 || phase > 0x18000); 5390 5391 if (phase < 0) 5392 phase = 0x10000 + phase; 5393 else 5394 trip = PS_PHASE_TRIP; 5395 5396 return ((phase >> 2) & PS_PHASE_MASK) | trip; 5397 } 5398 5399 #define SKL_MIN_SRC_W 8 5400 #define SKL_MAX_SRC_W 4096 5401 #define SKL_MIN_SRC_H 8 5402 #define SKL_MAX_SRC_H 4096 5403 #define SKL_MIN_DST_W 8 5404 #define SKL_MAX_DST_W 4096 5405 #define SKL_MIN_DST_H 8 5406 #define SKL_MAX_DST_H 4096 5407 #define ICL_MAX_SRC_W 5120 5408 #define ICL_MAX_SRC_H 4096 5409 #define ICL_MAX_DST_W 5120 5410 #define ICL_MAX_DST_H 4096 5411 #define SKL_MIN_YUV_420_SRC_W 16 5412 #define SKL_MIN_YUV_420_SRC_H 16 5413 5414 static int 5415 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 5416 unsigned int scaler_user, int *scaler_id, 5417 int src_w, int src_h, int dst_w, int dst_h, 5418 const struct drm_format_info *format, bool need_scaler) 5419 { 5420 struct intel_crtc_scaler_state *scaler_state = 5421 &crtc_state->scaler_state; 5422 struct intel_crtc *intel_crtc = 5423 to_intel_crtc(crtc_state->base.crtc); 5424 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 5425 const struct drm_display_mode *adjusted_mode = 5426 &crtc_state->base.adjusted_mode; 5427 5428 /* 5429 * Src coordinates are already rotated by 270 degrees for 5430 * the 90/270 degree plane rotation cases (to match the 5431 * GTT mapping), hence no need to account for rotation here. 5432 */ 5433 if (src_w != dst_w || src_h != dst_h) 5434 need_scaler = true; 5435 5436 /* 5437 * Scaling/fitting not supported in IF-ID mode in GEN9+ 5438 * TODO: Interlace fetch mode doesn't support YUV420 planar formats. 5439 * Once NV12 is enabled, handle it here while allocating scaler 5440 * for NV12. 5441 */ 5442 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable && 5443 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 5444 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n"); 5445 return -EINVAL; 5446 } 5447 5448 /* 5449 * if plane is being disabled or scaler is no more required or force detach 5450 * - free scaler binded to this plane/crtc 5451 * - in order to do this, update crtc->scaler_usage 5452 * 5453 * Here scaler state in crtc_state is set free so that 5454 * scaler can be assigned to other user. Actual register 5455 * update to free the scaler is done in plane/panel-fit programming. 5456 * For this purpose crtc/plane_state->scaler_id isn't reset here. 5457 */ 5458 if (force_detach || !need_scaler) { 5459 if (*scaler_id >= 0) { 5460 scaler_state->scaler_users &= ~(1 << scaler_user); 5461 scaler_state->scalers[*scaler_id].in_use = 0; 5462 5463 DRM_DEBUG_KMS("scaler_user index %u.%u: " 5464 "Staged freeing scaler id %d scaler_users = 0x%x\n", 5465 intel_crtc->pipe, scaler_user, *scaler_id, 5466 scaler_state->scaler_users); 5467 *scaler_id = -1; 5468 } 5469 return 0; 5470 } 5471 5472 if (format && drm_format_info_is_yuv_semiplanar(format) && 5473 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { 5474 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n"); 5475 return -EINVAL; 5476 } 5477 5478 /* range checks */ 5479 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 5480 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 5481 (INTEL_GEN(dev_priv) >= 11 && 5482 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H || 5483 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) || 5484 (INTEL_GEN(dev_priv) < 11 && 5485 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 5486 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) { 5487 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " 5488 "size is out of scaler range\n", 5489 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h); 5490 return -EINVAL; 5491 } 5492 5493 /* mark this plane as a scaler user in crtc_state */ 5494 scaler_state->scaler_users |= (1 << scaler_user); 5495 DRM_DEBUG_KMS("scaler_user index %u.%u: " 5496 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 5497 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 5498 scaler_state->scaler_users); 5499 5500 return 0; 5501 } 5502 5503 /** 5504 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc. 5505 * 5506 * @state: crtc's scaler state 5507 * 5508 * Return 5509 * 0 - scaler_usage updated successfully 5510 * error - requested scaling cannot be supported or other error condition 5511 */ 5512 int skl_update_scaler_crtc(struct intel_crtc_state *state) 5513 { 5514 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 5515 bool need_scaler = false; 5516 5517 if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 5518 need_scaler = true; 5519 5520 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 5521 &state->scaler_state.scaler_id, 5522 state->pipe_src_w, state->pipe_src_h, 5523 adjusted_mode->crtc_hdisplay, 5524 adjusted_mode->crtc_vdisplay, NULL, need_scaler); 5525 } 5526 5527 /** 5528 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 5529 * @crtc_state: crtc's scaler state 5530 * @plane_state: atomic plane state to update 5531 * 5532 * Return 5533 * 0 - scaler_usage updated successfully 5534 * error - requested scaling cannot be supported or other error condition 5535 */ 5536 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 5537 struct intel_plane_state *plane_state) 5538 { 5539 struct intel_plane *intel_plane = 5540 to_intel_plane(plane_state->base.plane); 5541 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 5542 struct drm_framebuffer *fb = plane_state->base.fb; 5543 int ret; 5544 bool force_detach = !fb || !plane_state->base.visible; 5545 bool need_scaler = false; 5546 5547 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ 5548 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) && 5549 fb && drm_format_info_is_yuv_semiplanar(fb->format)) 5550 need_scaler = true; 5551 5552 ret = skl_update_scaler(crtc_state, force_detach, 5553 drm_plane_index(&intel_plane->base), 5554 &plane_state->scaler_id, 5555 drm_rect_width(&plane_state->base.src) >> 16, 5556 drm_rect_height(&plane_state->base.src) >> 16, 5557 drm_rect_width(&plane_state->base.dst), 5558 drm_rect_height(&plane_state->base.dst), 5559 fb ? fb->format : NULL, need_scaler); 5560 5561 if (ret || plane_state->scaler_id < 0) 5562 return ret; 5563 5564 /* check colorkey */ 5565 if (plane_state->ckey.flags) { 5566 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed", 5567 intel_plane->base.base.id, 5568 intel_plane->base.name); 5569 return -EINVAL; 5570 } 5571 5572 /* Check src format */ 5573 switch (fb->format->format) { 5574 case DRM_FORMAT_RGB565: 5575 case DRM_FORMAT_XBGR8888: 5576 case DRM_FORMAT_XRGB8888: 5577 case DRM_FORMAT_ABGR8888: 5578 case DRM_FORMAT_ARGB8888: 5579 case DRM_FORMAT_XRGB2101010: 5580 case DRM_FORMAT_XBGR2101010: 5581 case DRM_FORMAT_XBGR16161616F: 5582 case DRM_FORMAT_ABGR16161616F: 5583 case DRM_FORMAT_XRGB16161616F: 5584 case DRM_FORMAT_ARGB16161616F: 5585 case DRM_FORMAT_YUYV: 5586 case DRM_FORMAT_YVYU: 5587 case DRM_FORMAT_UYVY: 5588 case DRM_FORMAT_VYUY: 5589 case DRM_FORMAT_NV12: 5590 case DRM_FORMAT_P010: 5591 case DRM_FORMAT_P012: 5592 case DRM_FORMAT_P016: 5593 case DRM_FORMAT_Y210: 5594 case DRM_FORMAT_Y212: 5595 case DRM_FORMAT_Y216: 5596 case DRM_FORMAT_XVYU2101010: 5597 case DRM_FORMAT_XVYU12_16161616: 5598 case DRM_FORMAT_XVYU16161616: 5599 break; 5600 default: 5601 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", 5602 intel_plane->base.base.id, intel_plane->base.name, 5603 fb->base.id, fb->format->format); 5604 return -EINVAL; 5605 } 5606 5607 return 0; 5608 } 5609 5610 static void skylake_scaler_disable(struct intel_crtc *crtc) 5611 { 5612 int i; 5613 5614 for (i = 0; i < crtc->num_scalers; i++) 5615 skl_detach_scaler(crtc, i); 5616 } 5617 5618 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state) 5619 { 5620 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5621 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5622 enum pipe pipe = crtc->pipe; 5623 const struct intel_crtc_scaler_state *scaler_state = 5624 &crtc_state->scaler_state; 5625 5626 if (crtc_state->pch_pfit.enabled) { 5627 u16 uv_rgb_hphase, uv_rgb_vphase; 5628 int pfit_w, pfit_h, hscale, vscale; 5629 int id; 5630 5631 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0)) 5632 return; 5633 5634 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF; 5635 pfit_h = crtc_state->pch_pfit.size & 0xFFFF; 5636 5637 hscale = (crtc_state->pipe_src_w << 16) / pfit_w; 5638 vscale = (crtc_state->pipe_src_h << 16) / pfit_h; 5639 5640 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); 5641 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); 5642 5643 id = scaler_state->scaler_id; 5644 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 5645 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 5646 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id), 5647 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); 5648 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id), 5649 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); 5650 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos); 5651 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size); 5652 } 5653 } 5654 5655 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state) 5656 { 5657 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5658 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5659 enum pipe pipe = crtc->pipe; 5660 5661 if (crtc_state->pch_pfit.enabled) { 5662 /* Force use of hard-coded filter coefficients 5663 * as some pre-programmed values are broken, 5664 * e.g. x201. 5665 */ 5666 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 5667 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 5668 PF_PIPE_SEL_IVB(pipe)); 5669 else 5670 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 5671 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos); 5672 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size); 5673 } 5674 } 5675 5676 void hsw_enable_ips(const struct intel_crtc_state *crtc_state) 5677 { 5678 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5679 struct drm_device *dev = crtc->base.dev; 5680 struct drm_i915_private *dev_priv = to_i915(dev); 5681 5682 if (!crtc_state->ips_enabled) 5683 return; 5684 5685 /* 5686 * We can only enable IPS after we enable a plane and wait for a vblank 5687 * This function is called from post_plane_update, which is run after 5688 * a vblank wait. 5689 */ 5690 WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); 5691 5692 if (IS_BROADWELL(dev_priv)) { 5693 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 5694 IPS_ENABLE | IPS_PCODE_CONTROL)); 5695 /* Quoting Art Runyan: "its not safe to expect any particular 5696 * value in IPS_CTL bit 31 after enabling IPS through the 5697 * mailbox." Moreover, the mailbox may return a bogus state, 5698 * so we need to just enable it and continue on. 5699 */ 5700 } else { 5701 I915_WRITE(IPS_CTL, IPS_ENABLE); 5702 /* The bit only becomes 1 in the next vblank, so this wait here 5703 * is essentially intel_wait_for_vblank. If we don't have this 5704 * and don't wait for vblanks until the end of crtc_enable, then 5705 * the HW state readout code will complain that the expected 5706 * IPS_CTL value is not the one we read. */ 5707 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50)) 5708 DRM_ERROR("Timed out waiting for IPS enable\n"); 5709 } 5710 } 5711 5712 void hsw_disable_ips(const struct intel_crtc_state *crtc_state) 5713 { 5714 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 5715 struct drm_device *dev = crtc->base.dev; 5716 struct drm_i915_private *dev_priv = to_i915(dev); 5717 5718 if (!crtc_state->ips_enabled) 5719 return; 5720 5721 if (IS_BROADWELL(dev_priv)) { 5722 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 5723 /* 5724 * Wait for PCODE to finish disabling IPS. The BSpec specified 5725 * 42ms timeout value leads to occasional timeouts so use 100ms 5726 * instead. 5727 */ 5728 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100)) 5729 DRM_ERROR("Timed out waiting for IPS disable\n"); 5730 } else { 5731 I915_WRITE(IPS_CTL, 0); 5732 POSTING_READ(IPS_CTL); 5733 } 5734 5735 /* We need to wait for a vblank before we can disable the plane. */ 5736 intel_wait_for_vblank(dev_priv, crtc->pipe); 5737 } 5738 5739 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) 5740 { 5741 if (intel_crtc->overlay) 5742 (void) intel_overlay_switch_off(intel_crtc->overlay); 5743 5744 /* Let userspace switch the overlay on again. In most cases userspace 5745 * has to recompute where to put it anyway. 5746 */ 5747 } 5748 5749 /** 5750 * intel_post_enable_primary - Perform operations after enabling primary plane 5751 * @crtc: the CRTC whose primary plane was just enabled 5752 * @new_crtc_state: the enabling state 5753 * 5754 * Performs potentially sleeping operations that must be done after the primary 5755 * plane is enabled, such as updating FBC and IPS. Note that this may be 5756 * called due to an explicit primary plane update, or due to an implicit 5757 * re-enable that is caused when a sprite plane is updated to no longer 5758 * completely hide the primary plane. 5759 */ 5760 static void 5761 intel_post_enable_primary(struct drm_crtc *crtc, 5762 const struct intel_crtc_state *new_crtc_state) 5763 { 5764 struct drm_device *dev = crtc->dev; 5765 struct drm_i915_private *dev_priv = to_i915(dev); 5766 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5767 enum pipe pipe = intel_crtc->pipe; 5768 5769 /* 5770 * Gen2 reports pipe underruns whenever all planes are disabled. 5771 * So don't enable underrun reporting before at least some planes 5772 * are enabled. 5773 * FIXME: Need to fix the logic to work when we turn off all planes 5774 * but leave the pipe running. 5775 */ 5776 if (IS_GEN(dev_priv, 2)) 5777 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5778 5779 /* Underruns don't always raise interrupts, so check manually. */ 5780 intel_check_cpu_fifo_underruns(dev_priv); 5781 intel_check_pch_fifo_underruns(dev_priv); 5782 } 5783 5784 /* FIXME get rid of this and use pre_plane_update */ 5785 static void 5786 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) 5787 { 5788 struct drm_device *dev = crtc->dev; 5789 struct drm_i915_private *dev_priv = to_i915(dev); 5790 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5791 enum pipe pipe = intel_crtc->pipe; 5792 5793 /* 5794 * Gen2 reports pipe underruns whenever all planes are disabled. 5795 * So disable underrun reporting before all the planes get disabled. 5796 */ 5797 if (IS_GEN(dev_priv, 2)) 5798 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5799 5800 hsw_disable_ips(to_intel_crtc_state(crtc->state)); 5801 5802 /* 5803 * Vblank time updates from the shadow to live plane control register 5804 * are blocked if the memory self-refresh mode is active at that 5805 * moment. So to make sure the plane gets truly disabled, disable 5806 * first the self-refresh mode. The self-refresh enable bit in turn 5807 * will be checked/applied by the HW only at the next frame start 5808 * event which is after the vblank start event, so we need to have a 5809 * wait-for-vblank between disabling the plane and the pipe. 5810 */ 5811 if (HAS_GMCH(dev_priv) && 5812 intel_set_memory_cxsr(dev_priv, false)) 5813 intel_wait_for_vblank(dev_priv, pipe); 5814 } 5815 5816 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state, 5817 const struct intel_crtc_state *new_crtc_state) 5818 { 5819 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 5820 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5821 5822 if (!old_crtc_state->ips_enabled) 5823 return false; 5824 5825 if (needs_modeset(new_crtc_state)) 5826 return true; 5827 5828 /* 5829 * Workaround : Do not read or write the pipe palette/gamma data while 5830 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 5831 * 5832 * Disable IPS before we program the LUT. 5833 */ 5834 if (IS_HASWELL(dev_priv) && 5835 (new_crtc_state->base.color_mgmt_changed || 5836 new_crtc_state->update_pipe) && 5837 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) 5838 return true; 5839 5840 return !new_crtc_state->ips_enabled; 5841 } 5842 5843 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state, 5844 const struct intel_crtc_state *new_crtc_state) 5845 { 5846 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 5847 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5848 5849 if (!new_crtc_state->ips_enabled) 5850 return false; 5851 5852 if (needs_modeset(new_crtc_state)) 5853 return true; 5854 5855 /* 5856 * Workaround : Do not read or write the pipe palette/gamma data while 5857 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 5858 * 5859 * Re-enable IPS after the LUT has been programmed. 5860 */ 5861 if (IS_HASWELL(dev_priv) && 5862 (new_crtc_state->base.color_mgmt_changed || 5863 new_crtc_state->update_pipe) && 5864 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) 5865 return true; 5866 5867 /* 5868 * We can't read out IPS on broadwell, assume the worst and 5869 * forcibly enable IPS on the first fastset. 5870 */ 5871 if (new_crtc_state->update_pipe && 5872 old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED) 5873 return true; 5874 5875 return !old_crtc_state->ips_enabled; 5876 } 5877 5878 static bool needs_nv12_wa(struct drm_i915_private *dev_priv, 5879 const struct intel_crtc_state *crtc_state) 5880 { 5881 if (!crtc_state->nv12_planes) 5882 return false; 5883 5884 /* WA Display #0827: Gen9:all */ 5885 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) 5886 return true; 5887 5888 return false; 5889 } 5890 5891 static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv, 5892 const struct intel_crtc_state *crtc_state) 5893 { 5894 /* Wa_2006604312:icl */ 5895 if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv)) 5896 return true; 5897 5898 return false; 5899 } 5900 5901 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) 5902 { 5903 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 5904 struct drm_device *dev = crtc->base.dev; 5905 struct drm_i915_private *dev_priv = to_i915(dev); 5906 struct drm_atomic_state *state = old_crtc_state->base.state; 5907 struct intel_crtc_state *pipe_config = 5908 intel_atomic_get_new_crtc_state(to_intel_atomic_state(state), 5909 crtc); 5910 struct drm_plane *primary = crtc->base.primary; 5911 struct drm_plane_state *old_primary_state = 5912 drm_atomic_get_old_plane_state(state, primary); 5913 5914 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits); 5915 5916 if (pipe_config->update_wm_post && pipe_config->base.active) 5917 intel_update_watermarks(crtc); 5918 5919 if (hsw_post_update_enable_ips(old_crtc_state, pipe_config)) 5920 hsw_enable_ips(pipe_config); 5921 5922 if (old_primary_state) { 5923 struct drm_plane_state *new_primary_state = 5924 drm_atomic_get_new_plane_state(state, primary); 5925 5926 intel_fbc_post_update(crtc); 5927 5928 if (new_primary_state->visible && 5929 (needs_modeset(pipe_config) || 5930 !old_primary_state->visible)) 5931 intel_post_enable_primary(&crtc->base, pipe_config); 5932 } 5933 5934 if (needs_nv12_wa(dev_priv, old_crtc_state) && 5935 !needs_nv12_wa(dev_priv, pipe_config)) 5936 skl_wa_827(dev_priv, crtc->pipe, false); 5937 5938 if (needs_scalerclk_wa(dev_priv, old_crtc_state) && 5939 !needs_scalerclk_wa(dev_priv, pipe_config)) 5940 icl_wa_scalerclkgating(dev_priv, crtc->pipe, false); 5941 } 5942 5943 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, 5944 struct intel_crtc_state *pipe_config) 5945 { 5946 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 5947 struct drm_device *dev = crtc->base.dev; 5948 struct drm_i915_private *dev_priv = to_i915(dev); 5949 struct drm_atomic_state *state = old_crtc_state->base.state; 5950 struct drm_plane *primary = crtc->base.primary; 5951 struct drm_plane_state *old_primary_state = 5952 drm_atomic_get_old_plane_state(state, primary); 5953 bool modeset = needs_modeset(pipe_config); 5954 struct intel_atomic_state *intel_state = 5955 to_intel_atomic_state(state); 5956 5957 if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config)) 5958 hsw_disable_ips(old_crtc_state); 5959 5960 if (old_primary_state) { 5961 struct intel_plane_state *new_primary_state = 5962 intel_atomic_get_new_plane_state(intel_state, 5963 to_intel_plane(primary)); 5964 5965 intel_fbc_pre_update(crtc, pipe_config, new_primary_state); 5966 /* 5967 * Gen2 reports pipe underruns whenever all planes are disabled. 5968 * So disable underrun reporting before all the planes get disabled. 5969 */ 5970 if (IS_GEN(dev_priv, 2) && old_primary_state->visible && 5971 (modeset || !new_primary_state->base.visible)) 5972 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 5973 } 5974 5975 /* Display WA 827 */ 5976 if (!needs_nv12_wa(dev_priv, old_crtc_state) && 5977 needs_nv12_wa(dev_priv, pipe_config)) 5978 skl_wa_827(dev_priv, crtc->pipe, true); 5979 5980 /* Wa_2006604312:icl */ 5981 if (!needs_scalerclk_wa(dev_priv, old_crtc_state) && 5982 needs_scalerclk_wa(dev_priv, pipe_config)) 5983 icl_wa_scalerclkgating(dev_priv, crtc->pipe, true); 5984 5985 /* 5986 * Vblank time updates from the shadow to live plane control register 5987 * are blocked if the memory self-refresh mode is active at that 5988 * moment. So to make sure the plane gets truly disabled, disable 5989 * first the self-refresh mode. The self-refresh enable bit in turn 5990 * will be checked/applied by the HW only at the next frame start 5991 * event which is after the vblank start event, so we need to have a 5992 * wait-for-vblank between disabling the plane and the pipe. 5993 */ 5994 if (HAS_GMCH(dev_priv) && old_crtc_state->base.active && 5995 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 5996 intel_wait_for_vblank(dev_priv, crtc->pipe); 5997 5998 /* 5999 * IVB workaround: must disable low power watermarks for at least 6000 * one frame before enabling scaling. LP watermarks can be re-enabled 6001 * when scaling is disabled. 6002 * 6003 * WaCxSRDisabledForSpriteScaling:ivb 6004 */ 6005 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) && 6006 old_crtc_state->base.active) 6007 intel_wait_for_vblank(dev_priv, crtc->pipe); 6008 6009 /* 6010 * If we're doing a modeset, we're done. No need to do any pre-vblank 6011 * watermark programming here. 6012 */ 6013 if (needs_modeset(pipe_config)) 6014 return; 6015 6016 /* 6017 * For platforms that support atomic watermarks, program the 6018 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 6019 * will be the intermediate values that are safe for both pre- and 6020 * post- vblank; when vblank happens, the 'active' values will be set 6021 * to the final 'target' values and we'll do this again to get the 6022 * optimal watermarks. For gen9+ platforms, the values we program here 6023 * will be the final target values which will get automatically latched 6024 * at vblank time; no further programming will be necessary. 6025 * 6026 * If a platform hasn't been transitioned to atomic watermarks yet, 6027 * we'll continue to update watermarks the old way, if flags tell 6028 * us to. 6029 */ 6030 if (dev_priv->display.initial_watermarks != NULL) 6031 dev_priv->display.initial_watermarks(intel_state, 6032 pipe_config); 6033 else if (pipe_config->update_wm_pre) 6034 intel_update_watermarks(crtc); 6035 } 6036 6037 static void intel_crtc_disable_planes(struct intel_atomic_state *state, 6038 struct intel_crtc *crtc) 6039 { 6040 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6041 const struct intel_crtc_state *new_crtc_state = 6042 intel_atomic_get_new_crtc_state(state, crtc); 6043 unsigned int update_mask = new_crtc_state->update_planes; 6044 const struct intel_plane_state *old_plane_state; 6045 struct intel_plane *plane; 6046 unsigned fb_bits = 0; 6047 int i; 6048 6049 intel_crtc_dpms_overlay_disable(crtc); 6050 6051 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 6052 if (crtc->pipe != plane->pipe || 6053 !(update_mask & BIT(plane->id))) 6054 continue; 6055 6056 intel_disable_plane(plane, new_crtc_state); 6057 6058 if (old_plane_state->base.visible) 6059 fb_bits |= plane->frontbuffer_bit; 6060 } 6061 6062 intel_frontbuffer_flip(dev_priv, fb_bits); 6063 } 6064 6065 /* 6066 * intel_connector_primary_encoder - get the primary encoder for a connector 6067 * @connector: connector for which to return the encoder 6068 * 6069 * Returns the primary encoder for a connector. There is a 1:1 mapping from 6070 * all connectors to their encoder, except for DP-MST connectors which have 6071 * both a virtual and a primary encoder. These DP-MST primary encoders can be 6072 * pointed to by as many DP-MST connectors as there are pipes. 6073 */ 6074 static struct intel_encoder * 6075 intel_connector_primary_encoder(struct intel_connector *connector) 6076 { 6077 struct intel_encoder *encoder; 6078 6079 if (connector->mst_port) 6080 return &dp_to_dig_port(connector->mst_port)->base; 6081 6082 encoder = intel_attached_encoder(&connector->base); 6083 WARN_ON(!encoder); 6084 6085 return encoder; 6086 } 6087 6088 static bool 6089 intel_connector_needs_modeset(struct intel_atomic_state *state, 6090 const struct drm_connector_state *old_conn_state, 6091 const struct drm_connector_state *new_conn_state) 6092 { 6093 struct intel_crtc *old_crtc = old_conn_state->crtc ? 6094 to_intel_crtc(old_conn_state->crtc) : NULL; 6095 struct intel_crtc *new_crtc = new_conn_state->crtc ? 6096 to_intel_crtc(new_conn_state->crtc) : NULL; 6097 6098 return new_crtc != old_crtc || 6099 (new_crtc && 6100 needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc))); 6101 } 6102 6103 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 6104 { 6105 struct drm_connector_state *old_conn_state; 6106 struct drm_connector_state *new_conn_state; 6107 struct drm_connector *conn; 6108 int i; 6109 6110 for_each_oldnew_connector_in_state(&state->base, conn, 6111 old_conn_state, new_conn_state, i) { 6112 struct intel_encoder *encoder; 6113 struct intel_crtc *crtc; 6114 6115 if (!intel_connector_needs_modeset(state, 6116 old_conn_state, 6117 new_conn_state)) 6118 continue; 6119 6120 encoder = intel_connector_primary_encoder(to_intel_connector(conn)); 6121 if (!encoder->update_prepare) 6122 continue; 6123 6124 crtc = new_conn_state->crtc ? 6125 to_intel_crtc(new_conn_state->crtc) : NULL; 6126 encoder->update_prepare(state, encoder, crtc); 6127 } 6128 } 6129 6130 static void intel_encoders_update_complete(struct intel_atomic_state *state) 6131 { 6132 struct drm_connector_state *old_conn_state; 6133 struct drm_connector_state *new_conn_state; 6134 struct drm_connector *conn; 6135 int i; 6136 6137 for_each_oldnew_connector_in_state(&state->base, conn, 6138 old_conn_state, new_conn_state, i) { 6139 struct intel_encoder *encoder; 6140 struct intel_crtc *crtc; 6141 6142 if (!intel_connector_needs_modeset(state, 6143 old_conn_state, 6144 new_conn_state)) 6145 continue; 6146 6147 encoder = intel_connector_primary_encoder(to_intel_connector(conn)); 6148 if (!encoder->update_complete) 6149 continue; 6150 6151 crtc = new_conn_state->crtc ? 6152 to_intel_crtc(new_conn_state->crtc) : NULL; 6153 encoder->update_complete(state, encoder, crtc); 6154 } 6155 } 6156 6157 static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc, 6158 struct intel_crtc_state *crtc_state, 6159 struct intel_atomic_state *state) 6160 { 6161 struct drm_connector_state *conn_state; 6162 struct drm_connector *conn; 6163 int i; 6164 6165 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6166 struct intel_encoder *encoder = 6167 to_intel_encoder(conn_state->best_encoder); 6168 6169 if (conn_state->crtc != &crtc->base) 6170 continue; 6171 6172 if (encoder->pre_pll_enable) 6173 encoder->pre_pll_enable(encoder, crtc_state, conn_state); 6174 } 6175 } 6176 6177 static void intel_encoders_pre_enable(struct intel_crtc *crtc, 6178 struct intel_crtc_state *crtc_state, 6179 struct intel_atomic_state *state) 6180 { 6181 struct drm_connector_state *conn_state; 6182 struct drm_connector *conn; 6183 int i; 6184 6185 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6186 struct intel_encoder *encoder = 6187 to_intel_encoder(conn_state->best_encoder); 6188 6189 if (conn_state->crtc != &crtc->base) 6190 continue; 6191 6192 if (encoder->pre_enable) 6193 encoder->pre_enable(encoder, crtc_state, conn_state); 6194 } 6195 } 6196 6197 static void intel_encoders_enable(struct intel_crtc *crtc, 6198 struct intel_crtc_state *crtc_state, 6199 struct intel_atomic_state *state) 6200 { 6201 struct drm_connector_state *conn_state; 6202 struct drm_connector *conn; 6203 int i; 6204 6205 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6206 struct intel_encoder *encoder = 6207 to_intel_encoder(conn_state->best_encoder); 6208 6209 if (conn_state->crtc != &crtc->base) 6210 continue; 6211 6212 if (encoder->enable) 6213 encoder->enable(encoder, crtc_state, conn_state); 6214 intel_opregion_notify_encoder(encoder, true); 6215 } 6216 } 6217 6218 static void intel_encoders_disable(struct intel_crtc *crtc, 6219 struct intel_crtc_state *old_crtc_state, 6220 struct intel_atomic_state *state) 6221 { 6222 struct drm_connector_state *old_conn_state; 6223 struct drm_connector *conn; 6224 int i; 6225 6226 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6227 struct intel_encoder *encoder = 6228 to_intel_encoder(old_conn_state->best_encoder); 6229 6230 if (old_conn_state->crtc != &crtc->base) 6231 continue; 6232 6233 intel_opregion_notify_encoder(encoder, false); 6234 if (encoder->disable) 6235 encoder->disable(encoder, old_crtc_state, old_conn_state); 6236 } 6237 } 6238 6239 static void intel_encoders_post_disable(struct intel_crtc *crtc, 6240 struct intel_crtc_state *old_crtc_state, 6241 struct intel_atomic_state *state) 6242 { 6243 struct drm_connector_state *old_conn_state; 6244 struct drm_connector *conn; 6245 int i; 6246 6247 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6248 struct intel_encoder *encoder = 6249 to_intel_encoder(old_conn_state->best_encoder); 6250 6251 if (old_conn_state->crtc != &crtc->base) 6252 continue; 6253 6254 if (encoder->post_disable) 6255 encoder->post_disable(encoder, old_crtc_state, old_conn_state); 6256 } 6257 } 6258 6259 static void intel_encoders_post_pll_disable(struct intel_crtc *crtc, 6260 struct intel_crtc_state *old_crtc_state, 6261 struct intel_atomic_state *state) 6262 { 6263 struct drm_connector_state *old_conn_state; 6264 struct drm_connector *conn; 6265 int i; 6266 6267 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6268 struct intel_encoder *encoder = 6269 to_intel_encoder(old_conn_state->best_encoder); 6270 6271 if (old_conn_state->crtc != &crtc->base) 6272 continue; 6273 6274 if (encoder->post_pll_disable) 6275 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state); 6276 } 6277 } 6278 6279 static void intel_encoders_update_pipe(struct intel_crtc *crtc, 6280 struct intel_crtc_state *crtc_state, 6281 struct intel_atomic_state *state) 6282 { 6283 struct drm_connector_state *conn_state; 6284 struct drm_connector *conn; 6285 int i; 6286 6287 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6288 struct intel_encoder *encoder = 6289 to_intel_encoder(conn_state->best_encoder); 6290 6291 if (conn_state->crtc != &crtc->base) 6292 continue; 6293 6294 if (encoder->update_pipe) 6295 encoder->update_pipe(encoder, crtc_state, conn_state); 6296 } 6297 } 6298 6299 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) 6300 { 6301 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 6302 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 6303 6304 plane->disable_plane(plane, crtc_state); 6305 } 6306 6307 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, 6308 struct intel_atomic_state *state) 6309 { 6310 struct drm_crtc *crtc = pipe_config->base.crtc; 6311 struct drm_device *dev = crtc->dev; 6312 struct drm_i915_private *dev_priv = to_i915(dev); 6313 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6314 enum pipe pipe = intel_crtc->pipe; 6315 6316 if (WARN_ON(intel_crtc->active)) 6317 return; 6318 6319 /* 6320 * Sometimes spurious CPU pipe underruns happen during FDI 6321 * training, at least with VGA+HDMI cloning. Suppress them. 6322 * 6323 * On ILK we get an occasional spurious CPU pipe underruns 6324 * between eDP port A enable and vdd enable. Also PCH port 6325 * enable seems to result in the occasional CPU pipe underrun. 6326 * 6327 * Spurious PCH underruns also occur during PCH enabling. 6328 */ 6329 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6330 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 6331 6332 if (pipe_config->has_pch_encoder) 6333 intel_prepare_shared_dpll(pipe_config); 6334 6335 if (intel_crtc_has_dp_encoder(pipe_config)) 6336 intel_dp_set_m_n(pipe_config, M1_N1); 6337 6338 intel_set_pipe_timings(pipe_config); 6339 intel_set_pipe_src_size(pipe_config); 6340 6341 if (pipe_config->has_pch_encoder) { 6342 intel_cpu_transcoder_set_m_n(pipe_config, 6343 &pipe_config->fdi_m_n, NULL); 6344 } 6345 6346 ironlake_set_pipeconf(pipe_config); 6347 6348 intel_crtc->active = true; 6349 6350 intel_encoders_pre_enable(intel_crtc, pipe_config, state); 6351 6352 if (pipe_config->has_pch_encoder) { 6353 /* Note: FDI PLL enabling _must_ be done before we enable the 6354 * cpu pipes, hence this is separate from all the other fdi/pch 6355 * enabling. */ 6356 ironlake_fdi_pll_enable(pipe_config); 6357 } else { 6358 assert_fdi_tx_disabled(dev_priv, pipe); 6359 assert_fdi_rx_disabled(dev_priv, pipe); 6360 } 6361 6362 ironlake_pfit_enable(pipe_config); 6363 6364 /* 6365 * On ILK+ LUT must be loaded before the pipe is running but with 6366 * clocks enabled 6367 */ 6368 intel_color_load_luts(pipe_config); 6369 intel_color_commit(pipe_config); 6370 /* update DSPCNTR to configure gamma for pipe bottom color */ 6371 intel_disable_primary_plane(pipe_config); 6372 6373 if (dev_priv->display.initial_watermarks != NULL) 6374 dev_priv->display.initial_watermarks(state, pipe_config); 6375 intel_enable_pipe(pipe_config); 6376 6377 if (pipe_config->has_pch_encoder) 6378 ironlake_pch_enable(state, pipe_config); 6379 6380 assert_vblank_disabled(crtc); 6381 intel_crtc_vblank_on(pipe_config); 6382 6383 intel_encoders_enable(intel_crtc, pipe_config, state); 6384 6385 if (HAS_PCH_CPT(dev_priv)) 6386 cpt_verify_modeset(dev, intel_crtc->pipe); 6387 6388 /* 6389 * Must wait for vblank to avoid spurious PCH FIFO underruns. 6390 * And a second vblank wait is needed at least on ILK with 6391 * some interlaced HDMI modes. Let's do the double wait always 6392 * in case there are more corner cases we don't know about. 6393 */ 6394 if (pipe_config->has_pch_encoder) { 6395 intel_wait_for_vblank(dev_priv, pipe); 6396 intel_wait_for_vblank(dev_priv, pipe); 6397 } 6398 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6399 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 6400 } 6401 6402 /* IPS only exists on ULT machines and is tied to pipe A. */ 6403 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 6404 { 6405 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A; 6406 } 6407 6408 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, 6409 enum pipe pipe, bool apply) 6410 { 6411 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe)); 6412 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 6413 6414 if (apply) 6415 val |= mask; 6416 else 6417 val &= ~mask; 6418 6419 I915_WRITE(CLKGATE_DIS_PSL(pipe), val); 6420 } 6421 6422 static void icl_pipe_mbus_enable(struct intel_crtc *crtc) 6423 { 6424 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6425 enum pipe pipe = crtc->pipe; 6426 u32 val; 6427 6428 val = MBUS_DBOX_A_CREDIT(2); 6429 6430 if (INTEL_GEN(dev_priv) >= 12) { 6431 val |= MBUS_DBOX_BW_CREDIT(2); 6432 val |= MBUS_DBOX_B_CREDIT(12); 6433 } else { 6434 val |= MBUS_DBOX_BW_CREDIT(1); 6435 val |= MBUS_DBOX_B_CREDIT(8); 6436 } 6437 6438 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val); 6439 } 6440 6441 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, 6442 struct intel_atomic_state *state) 6443 { 6444 struct drm_crtc *crtc = pipe_config->base.crtc; 6445 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 6446 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6447 enum pipe pipe = intel_crtc->pipe, hsw_workaround_pipe; 6448 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 6449 bool psl_clkgate_wa; 6450 6451 if (WARN_ON(intel_crtc->active)) 6452 return; 6453 6454 intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state); 6455 6456 if (pipe_config->shared_dpll) 6457 intel_enable_shared_dpll(pipe_config); 6458 6459 intel_encoders_pre_enable(intel_crtc, pipe_config, state); 6460 6461 if (intel_crtc_has_dp_encoder(pipe_config)) 6462 intel_dp_set_m_n(pipe_config, M1_N1); 6463 6464 if (!transcoder_is_dsi(cpu_transcoder)) 6465 intel_set_pipe_timings(pipe_config); 6466 6467 intel_set_pipe_src_size(pipe_config); 6468 6469 if (cpu_transcoder != TRANSCODER_EDP && 6470 !transcoder_is_dsi(cpu_transcoder)) { 6471 I915_WRITE(PIPE_MULT(cpu_transcoder), 6472 pipe_config->pixel_multiplier - 1); 6473 } 6474 6475 if (pipe_config->has_pch_encoder) { 6476 intel_cpu_transcoder_set_m_n(pipe_config, 6477 &pipe_config->fdi_m_n, NULL); 6478 } 6479 6480 if (!transcoder_is_dsi(cpu_transcoder)) 6481 haswell_set_pipeconf(pipe_config); 6482 6483 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 6484 bdw_set_pipemisc(pipe_config); 6485 6486 intel_crtc->active = true; 6487 6488 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */ 6489 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && 6490 pipe_config->pch_pfit.enabled; 6491 if (psl_clkgate_wa) 6492 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); 6493 6494 if (INTEL_GEN(dev_priv) >= 9) 6495 skylake_pfit_enable(pipe_config); 6496 else 6497 ironlake_pfit_enable(pipe_config); 6498 6499 /* 6500 * On ILK+ LUT must be loaded before the pipe is running but with 6501 * clocks enabled 6502 */ 6503 intel_color_load_luts(pipe_config); 6504 intel_color_commit(pipe_config); 6505 /* update DSPCNTR to configure gamma/csc for pipe bottom color */ 6506 if (INTEL_GEN(dev_priv) < 9) 6507 intel_disable_primary_plane(pipe_config); 6508 6509 if (INTEL_GEN(dev_priv) >= 11) 6510 icl_set_pipe_chicken(intel_crtc); 6511 6512 intel_ddi_set_pipe_settings(pipe_config); 6513 if (!transcoder_is_dsi(cpu_transcoder)) 6514 intel_ddi_enable_transcoder_func(pipe_config); 6515 6516 if (dev_priv->display.initial_watermarks != NULL) 6517 dev_priv->display.initial_watermarks(state, pipe_config); 6518 6519 if (INTEL_GEN(dev_priv) >= 11) 6520 icl_pipe_mbus_enable(intel_crtc); 6521 6522 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 6523 if (!transcoder_is_dsi(cpu_transcoder)) 6524 intel_enable_pipe(pipe_config); 6525 6526 if (pipe_config->has_pch_encoder) 6527 lpt_pch_enable(state, pipe_config); 6528 6529 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) 6530 intel_ddi_set_vc_payload_alloc(pipe_config, true); 6531 6532 assert_vblank_disabled(crtc); 6533 intel_crtc_vblank_on(pipe_config); 6534 6535 intel_encoders_enable(intel_crtc, pipe_config, state); 6536 6537 if (psl_clkgate_wa) { 6538 intel_wait_for_vblank(dev_priv, pipe); 6539 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); 6540 } 6541 6542 /* If we change the relative order between pipe/planes enabling, we need 6543 * to change the workaround. */ 6544 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; 6545 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 6546 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 6547 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 6548 } 6549 } 6550 6551 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state) 6552 { 6553 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 6554 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6555 enum pipe pipe = crtc->pipe; 6556 6557 /* To avoid upsetting the power well on haswell only disable the pfit if 6558 * it's in use. The hw state code will make sure we get this right. */ 6559 if (old_crtc_state->pch_pfit.enabled) { 6560 I915_WRITE(PF_CTL(pipe), 0); 6561 I915_WRITE(PF_WIN_POS(pipe), 0); 6562 I915_WRITE(PF_WIN_SZ(pipe), 0); 6563 } 6564 } 6565 6566 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, 6567 struct intel_atomic_state *state) 6568 { 6569 struct drm_crtc *crtc = old_crtc_state->base.crtc; 6570 struct drm_device *dev = crtc->dev; 6571 struct drm_i915_private *dev_priv = to_i915(dev); 6572 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6573 enum pipe pipe = intel_crtc->pipe; 6574 6575 /* 6576 * Sometimes spurious CPU pipe underruns happen when the 6577 * pipe is already disabled, but FDI RX/TX is still enabled. 6578 * Happens at least with VGA+HDMI cloning. Suppress them. 6579 */ 6580 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6581 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 6582 6583 intel_encoders_disable(intel_crtc, old_crtc_state, state); 6584 6585 drm_crtc_vblank_off(crtc); 6586 assert_vblank_disabled(crtc); 6587 6588 intel_disable_pipe(old_crtc_state); 6589 6590 ironlake_pfit_disable(old_crtc_state); 6591 6592 if (old_crtc_state->has_pch_encoder) 6593 ironlake_fdi_disable(crtc); 6594 6595 intel_encoders_post_disable(intel_crtc, old_crtc_state, state); 6596 6597 if (old_crtc_state->has_pch_encoder) { 6598 ironlake_disable_pch_transcoder(dev_priv, pipe); 6599 6600 if (HAS_PCH_CPT(dev_priv)) { 6601 i915_reg_t reg; 6602 u32 temp; 6603 6604 /* disable TRANS_DP_CTL */ 6605 reg = TRANS_DP_CTL(pipe); 6606 temp = I915_READ(reg); 6607 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 6608 TRANS_DP_PORT_SEL_MASK); 6609 temp |= TRANS_DP_PORT_SEL_NONE; 6610 I915_WRITE(reg, temp); 6611 6612 /* disable DPLL_SEL */ 6613 temp = I915_READ(PCH_DPLL_SEL); 6614 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 6615 I915_WRITE(PCH_DPLL_SEL, temp); 6616 } 6617 6618 ironlake_fdi_pll_disable(intel_crtc); 6619 } 6620 6621 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6622 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 6623 } 6624 6625 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, 6626 struct intel_atomic_state *state) 6627 { 6628 struct drm_crtc *crtc = old_crtc_state->base.crtc; 6629 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 6630 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6631 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 6632 6633 intel_encoders_disable(intel_crtc, old_crtc_state, state); 6634 6635 drm_crtc_vblank_off(crtc); 6636 assert_vblank_disabled(crtc); 6637 6638 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 6639 if (!transcoder_is_dsi(cpu_transcoder)) 6640 intel_disable_pipe(old_crtc_state); 6641 6642 if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) 6643 intel_ddi_set_vc_payload_alloc(old_crtc_state, false); 6644 6645 if (!transcoder_is_dsi(cpu_transcoder)) 6646 intel_ddi_disable_transcoder_func(old_crtc_state); 6647 6648 intel_dsc_disable(old_crtc_state); 6649 6650 if (INTEL_GEN(dev_priv) >= 9) 6651 skylake_scaler_disable(intel_crtc); 6652 else 6653 ironlake_pfit_disable(old_crtc_state); 6654 6655 intel_encoders_post_disable(intel_crtc, old_crtc_state, state); 6656 6657 intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state); 6658 } 6659 6660 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) 6661 { 6662 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 6663 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6664 6665 if (!crtc_state->gmch_pfit.control) 6666 return; 6667 6668 /* 6669 * The panel fitter should only be adjusted whilst the pipe is disabled, 6670 * according to register description and PRM. 6671 */ 6672 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 6673 assert_pipe_disabled(dev_priv, crtc->pipe); 6674 6675 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios); 6676 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control); 6677 6678 /* Border color in case we don't scale up to the full screen. Black by 6679 * default, change to something else for debugging. */ 6680 I915_WRITE(BCLRPAT(crtc->pipe), 0); 6681 } 6682 6683 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) 6684 { 6685 if (phy == PHY_NONE) 6686 return false; 6687 6688 if (IS_ELKHARTLAKE(dev_priv)) 6689 return phy <= PHY_C; 6690 6691 if (INTEL_GEN(dev_priv) >= 11) 6692 return phy <= PHY_B; 6693 6694 return false; 6695 } 6696 6697 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) 6698 { 6699 if (INTEL_GEN(dev_priv) >= 12) 6700 return phy >= PHY_D && phy <= PHY_I; 6701 6702 if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv)) 6703 return phy >= PHY_C && phy <= PHY_F; 6704 6705 return false; 6706 } 6707 6708 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) 6709 { 6710 if (IS_ELKHARTLAKE(i915) && port == PORT_D) 6711 return PHY_A; 6712 6713 return (enum phy)port; 6714 } 6715 6716 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) 6717 { 6718 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) 6719 return PORT_TC_NONE; 6720 6721 if (INTEL_GEN(dev_priv) >= 12) 6722 return port - PORT_D; 6723 6724 return port - PORT_C; 6725 } 6726 6727 enum intel_display_power_domain intel_port_to_power_domain(enum port port) 6728 { 6729 switch (port) { 6730 case PORT_A: 6731 return POWER_DOMAIN_PORT_DDI_A_LANES; 6732 case PORT_B: 6733 return POWER_DOMAIN_PORT_DDI_B_LANES; 6734 case PORT_C: 6735 return POWER_DOMAIN_PORT_DDI_C_LANES; 6736 case PORT_D: 6737 return POWER_DOMAIN_PORT_DDI_D_LANES; 6738 case PORT_E: 6739 return POWER_DOMAIN_PORT_DDI_E_LANES; 6740 case PORT_F: 6741 return POWER_DOMAIN_PORT_DDI_F_LANES; 6742 default: 6743 MISSING_CASE(port); 6744 return POWER_DOMAIN_PORT_OTHER; 6745 } 6746 } 6747 6748 enum intel_display_power_domain 6749 intel_aux_power_domain(struct intel_digital_port *dig_port) 6750 { 6751 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 6752 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 6753 6754 if (intel_phy_is_tc(dev_priv, phy) && 6755 dig_port->tc_mode == TC_PORT_TBT_ALT) { 6756 switch (dig_port->aux_ch) { 6757 case AUX_CH_C: 6758 return POWER_DOMAIN_AUX_C_TBT; 6759 case AUX_CH_D: 6760 return POWER_DOMAIN_AUX_D_TBT; 6761 case AUX_CH_E: 6762 return POWER_DOMAIN_AUX_E_TBT; 6763 case AUX_CH_F: 6764 return POWER_DOMAIN_AUX_F_TBT; 6765 default: 6766 MISSING_CASE(dig_port->aux_ch); 6767 return POWER_DOMAIN_AUX_C_TBT; 6768 } 6769 } 6770 6771 switch (dig_port->aux_ch) { 6772 case AUX_CH_A: 6773 return POWER_DOMAIN_AUX_A; 6774 case AUX_CH_B: 6775 return POWER_DOMAIN_AUX_B; 6776 case AUX_CH_C: 6777 return POWER_DOMAIN_AUX_C; 6778 case AUX_CH_D: 6779 return POWER_DOMAIN_AUX_D; 6780 case AUX_CH_E: 6781 return POWER_DOMAIN_AUX_E; 6782 case AUX_CH_F: 6783 return POWER_DOMAIN_AUX_F; 6784 default: 6785 MISSING_CASE(dig_port->aux_ch); 6786 return POWER_DOMAIN_AUX_A; 6787 } 6788 } 6789 6790 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) 6791 { 6792 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 6793 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6794 struct drm_encoder *encoder; 6795 enum pipe pipe = crtc->pipe; 6796 u64 mask; 6797 enum transcoder transcoder = crtc_state->cpu_transcoder; 6798 6799 if (!crtc_state->base.active) 6800 return 0; 6801 6802 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe)); 6803 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder)); 6804 if (crtc_state->pch_pfit.enabled || 6805 crtc_state->pch_pfit.force_thru) 6806 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 6807 6808 drm_for_each_encoder_mask(encoder, &dev_priv->drm, 6809 crtc_state->base.encoder_mask) { 6810 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 6811 6812 mask |= BIT_ULL(intel_encoder->power_domain); 6813 } 6814 6815 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 6816 mask |= BIT_ULL(POWER_DOMAIN_AUDIO); 6817 6818 if (crtc_state->shared_dpll) 6819 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE); 6820 6821 return mask; 6822 } 6823 6824 static u64 6825 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state) 6826 { 6827 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 6828 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6829 enum intel_display_power_domain domain; 6830 u64 domains, new_domains, old_domains; 6831 6832 old_domains = crtc->enabled_power_domains; 6833 crtc->enabled_power_domains = new_domains = 6834 get_crtc_power_domains(crtc_state); 6835 6836 domains = new_domains & ~old_domains; 6837 6838 for_each_power_domain(domain, domains) 6839 intel_display_power_get(dev_priv, domain); 6840 6841 return old_domains & ~new_domains; 6842 } 6843 6844 static void modeset_put_power_domains(struct drm_i915_private *dev_priv, 6845 u64 domains) 6846 { 6847 enum intel_display_power_domain domain; 6848 6849 for_each_power_domain(domain, domains) 6850 intel_display_power_put_unchecked(dev_priv, domain); 6851 } 6852 6853 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, 6854 struct intel_atomic_state *state) 6855 { 6856 struct drm_crtc *crtc = pipe_config->base.crtc; 6857 struct drm_device *dev = crtc->dev; 6858 struct drm_i915_private *dev_priv = to_i915(dev); 6859 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6860 enum pipe pipe = intel_crtc->pipe; 6861 6862 if (WARN_ON(intel_crtc->active)) 6863 return; 6864 6865 if (intel_crtc_has_dp_encoder(pipe_config)) 6866 intel_dp_set_m_n(pipe_config, M1_N1); 6867 6868 intel_set_pipe_timings(pipe_config); 6869 intel_set_pipe_src_size(pipe_config); 6870 6871 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 6872 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 6873 I915_WRITE(CHV_CANVAS(pipe), 0); 6874 } 6875 6876 i9xx_set_pipeconf(pipe_config); 6877 6878 intel_crtc->active = true; 6879 6880 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6881 6882 intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state); 6883 6884 if (IS_CHERRYVIEW(dev_priv)) { 6885 chv_prepare_pll(intel_crtc, pipe_config); 6886 chv_enable_pll(intel_crtc, pipe_config); 6887 } else { 6888 vlv_prepare_pll(intel_crtc, pipe_config); 6889 vlv_enable_pll(intel_crtc, pipe_config); 6890 } 6891 6892 intel_encoders_pre_enable(intel_crtc, pipe_config, state); 6893 6894 i9xx_pfit_enable(pipe_config); 6895 6896 intel_color_load_luts(pipe_config); 6897 intel_color_commit(pipe_config); 6898 /* update DSPCNTR to configure gamma for pipe bottom color */ 6899 intel_disable_primary_plane(pipe_config); 6900 6901 dev_priv->display.initial_watermarks(state, pipe_config); 6902 intel_enable_pipe(pipe_config); 6903 6904 assert_vblank_disabled(crtc); 6905 intel_crtc_vblank_on(pipe_config); 6906 6907 intel_encoders_enable(intel_crtc, pipe_config, state); 6908 } 6909 6910 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state) 6911 { 6912 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 6913 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6914 6915 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0); 6916 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1); 6917 } 6918 6919 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, 6920 struct intel_atomic_state *state) 6921 { 6922 struct drm_crtc *crtc = pipe_config->base.crtc; 6923 struct drm_device *dev = crtc->dev; 6924 struct drm_i915_private *dev_priv = to_i915(dev); 6925 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6926 enum pipe pipe = intel_crtc->pipe; 6927 6928 if (WARN_ON(intel_crtc->active)) 6929 return; 6930 6931 i9xx_set_pll_dividers(pipe_config); 6932 6933 if (intel_crtc_has_dp_encoder(pipe_config)) 6934 intel_dp_set_m_n(pipe_config, M1_N1); 6935 6936 intel_set_pipe_timings(pipe_config); 6937 intel_set_pipe_src_size(pipe_config); 6938 6939 i9xx_set_pipeconf(pipe_config); 6940 6941 intel_crtc->active = true; 6942 6943 if (!IS_GEN(dev_priv, 2)) 6944 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6945 6946 intel_encoders_pre_enable(intel_crtc, pipe_config, state); 6947 6948 i9xx_enable_pll(intel_crtc, pipe_config); 6949 6950 i9xx_pfit_enable(pipe_config); 6951 6952 intel_color_load_luts(pipe_config); 6953 intel_color_commit(pipe_config); 6954 /* update DSPCNTR to configure gamma for pipe bottom color */ 6955 intel_disable_primary_plane(pipe_config); 6956 6957 if (dev_priv->display.initial_watermarks != NULL) 6958 dev_priv->display.initial_watermarks(state, 6959 pipe_config); 6960 else 6961 intel_update_watermarks(intel_crtc); 6962 intel_enable_pipe(pipe_config); 6963 6964 assert_vblank_disabled(crtc); 6965 intel_crtc_vblank_on(pipe_config); 6966 6967 intel_encoders_enable(intel_crtc, pipe_config, state); 6968 } 6969 6970 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) 6971 { 6972 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 6973 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6974 6975 if (!old_crtc_state->gmch_pfit.control) 6976 return; 6977 6978 assert_pipe_disabled(dev_priv, crtc->pipe); 6979 6980 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n", 6981 I915_READ(PFIT_CONTROL)); 6982 I915_WRITE(PFIT_CONTROL, 0); 6983 } 6984 6985 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, 6986 struct intel_atomic_state *state) 6987 { 6988 struct drm_crtc *crtc = old_crtc_state->base.crtc; 6989 struct drm_device *dev = crtc->dev; 6990 struct drm_i915_private *dev_priv = to_i915(dev); 6991 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6992 enum pipe pipe = intel_crtc->pipe; 6993 6994 /* 6995 * On gen2 planes are double buffered but the pipe isn't, so we must 6996 * wait for planes to fully turn off before disabling the pipe. 6997 */ 6998 if (IS_GEN(dev_priv, 2)) 6999 intel_wait_for_vblank(dev_priv, pipe); 7000 7001 intel_encoders_disable(intel_crtc, old_crtc_state, state); 7002 7003 drm_crtc_vblank_off(crtc); 7004 assert_vblank_disabled(crtc); 7005 7006 intel_disable_pipe(old_crtc_state); 7007 7008 i9xx_pfit_disable(old_crtc_state); 7009 7010 intel_encoders_post_disable(intel_crtc, old_crtc_state, state); 7011 7012 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 7013 if (IS_CHERRYVIEW(dev_priv)) 7014 chv_disable_pll(dev_priv, pipe); 7015 else if (IS_VALLEYVIEW(dev_priv)) 7016 vlv_disable_pll(dev_priv, pipe); 7017 else 7018 i9xx_disable_pll(old_crtc_state); 7019 } 7020 7021 intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state); 7022 7023 if (!IS_GEN(dev_priv, 2)) 7024 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 7025 7026 if (!dev_priv->display.initial_watermarks) 7027 intel_update_watermarks(intel_crtc); 7028 7029 /* clock the pipe down to 640x480@60 to potentially save power */ 7030 if (IS_I830(dev_priv)) 7031 i830_enable_pipe(dev_priv, pipe); 7032 } 7033 7034 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, 7035 struct drm_modeset_acquire_ctx *ctx) 7036 { 7037 struct intel_encoder *encoder; 7038 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7039 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 7040 struct intel_bw_state *bw_state = 7041 to_intel_bw_state(dev_priv->bw_obj.state); 7042 enum intel_display_power_domain domain; 7043 struct intel_plane *plane; 7044 u64 domains; 7045 struct drm_atomic_state *state; 7046 struct intel_crtc_state *crtc_state; 7047 int ret; 7048 7049 if (!intel_crtc->active) 7050 return; 7051 7052 for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) { 7053 const struct intel_plane_state *plane_state = 7054 to_intel_plane_state(plane->base.state); 7055 7056 if (plane_state->base.visible) 7057 intel_plane_disable_noatomic(intel_crtc, plane); 7058 } 7059 7060 state = drm_atomic_state_alloc(crtc->dev); 7061 if (!state) { 7062 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory", 7063 crtc->base.id, crtc->name); 7064 return; 7065 } 7066 7067 state->acquire_ctx = ctx; 7068 7069 /* Everything's already locked, -EDEADLK can't happen. */ 7070 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 7071 ret = drm_atomic_add_affected_connectors(state, crtc); 7072 7073 WARN_ON(IS_ERR(crtc_state) || ret); 7074 7075 dev_priv->display.crtc_disable(crtc_state, to_intel_atomic_state(state)); 7076 7077 drm_atomic_state_put(state); 7078 7079 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", 7080 crtc->base.id, crtc->name); 7081 7082 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0); 7083 crtc->state->active = false; 7084 intel_crtc->active = false; 7085 crtc->enabled = false; 7086 crtc->state->connector_mask = 0; 7087 crtc->state->encoder_mask = 0; 7088 7089 for_each_encoder_on_crtc(crtc->dev, crtc, encoder) 7090 encoder->base.crtc = NULL; 7091 7092 intel_fbc_disable(intel_crtc); 7093 intel_update_watermarks(intel_crtc); 7094 intel_disable_shared_dpll(to_intel_crtc_state(crtc->state)); 7095 7096 domains = intel_crtc->enabled_power_domains; 7097 for_each_power_domain(domain, domains) 7098 intel_display_power_put_unchecked(dev_priv, domain); 7099 intel_crtc->enabled_power_domains = 0; 7100 7101 dev_priv->active_pipes &= ~BIT(intel_crtc->pipe); 7102 dev_priv->min_cdclk[intel_crtc->pipe] = 0; 7103 dev_priv->min_voltage_level[intel_crtc->pipe] = 0; 7104 7105 bw_state->data_rate[intel_crtc->pipe] = 0; 7106 bw_state->num_active_planes[intel_crtc->pipe] = 0; 7107 } 7108 7109 /* 7110 * turn all crtc's off, but do not adjust state 7111 * This has to be paired with a call to intel_modeset_setup_hw_state. 7112 */ 7113 int intel_display_suspend(struct drm_device *dev) 7114 { 7115 struct drm_i915_private *dev_priv = to_i915(dev); 7116 struct drm_atomic_state *state; 7117 int ret; 7118 7119 state = drm_atomic_helper_suspend(dev); 7120 ret = PTR_ERR_OR_ZERO(state); 7121 if (ret) 7122 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 7123 else 7124 dev_priv->modeset_restore_state = state; 7125 return ret; 7126 } 7127 7128 void intel_encoder_destroy(struct drm_encoder *encoder) 7129 { 7130 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 7131 7132 drm_encoder_cleanup(encoder); 7133 kfree(intel_encoder); 7134 } 7135 7136 /* Cross check the actual hw state with our own modeset state tracking (and it's 7137 * internal consistency). */ 7138 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, 7139 struct drm_connector_state *conn_state) 7140 { 7141 struct intel_connector *connector = to_intel_connector(conn_state->connector); 7142 7143 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 7144 connector->base.base.id, 7145 connector->base.name); 7146 7147 if (connector->get_hw_state(connector)) { 7148 struct intel_encoder *encoder = connector->encoder; 7149 7150 I915_STATE_WARN(!crtc_state, 7151 "connector enabled without attached crtc\n"); 7152 7153 if (!crtc_state) 7154 return; 7155 7156 I915_STATE_WARN(!crtc_state->base.active, 7157 "connector is active, but attached crtc isn't\n"); 7158 7159 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 7160 return; 7161 7162 I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 7163 "atomic encoder doesn't match attached encoder\n"); 7164 7165 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 7166 "attached encoder crtc differs from connector crtc\n"); 7167 } else { 7168 I915_STATE_WARN(crtc_state && crtc_state->base.active, 7169 "attached crtc is active, but connector isn't\n"); 7170 I915_STATE_WARN(!crtc_state && conn_state->best_encoder, 7171 "best encoder set without crtc!\n"); 7172 } 7173 } 7174 7175 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 7176 { 7177 if (crtc_state->base.enable && crtc_state->has_pch_encoder) 7178 return crtc_state->fdi_lanes; 7179 7180 return 0; 7181 } 7182 7183 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, 7184 struct intel_crtc_state *pipe_config) 7185 { 7186 struct drm_i915_private *dev_priv = to_i915(dev); 7187 struct drm_atomic_state *state = pipe_config->base.state; 7188 struct intel_crtc *other_crtc; 7189 struct intel_crtc_state *other_crtc_state; 7190 7191 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", 7192 pipe_name(pipe), pipe_config->fdi_lanes); 7193 if (pipe_config->fdi_lanes > 4) { 7194 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", 7195 pipe_name(pipe), pipe_config->fdi_lanes); 7196 return -EINVAL; 7197 } 7198 7199 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 7200 if (pipe_config->fdi_lanes > 2) { 7201 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 7202 pipe_config->fdi_lanes); 7203 return -EINVAL; 7204 } else { 7205 return 0; 7206 } 7207 } 7208 7209 if (INTEL_NUM_PIPES(dev_priv) == 2) 7210 return 0; 7211 7212 /* Ivybridge 3 pipe is really complicated */ 7213 switch (pipe) { 7214 case PIPE_A: 7215 return 0; 7216 case PIPE_B: 7217 if (pipe_config->fdi_lanes <= 2) 7218 return 0; 7219 7220 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C); 7221 other_crtc_state = 7222 intel_atomic_get_crtc_state(state, other_crtc); 7223 if (IS_ERR(other_crtc_state)) 7224 return PTR_ERR(other_crtc_state); 7225 7226 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 7227 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 7228 pipe_name(pipe), pipe_config->fdi_lanes); 7229 return -EINVAL; 7230 } 7231 return 0; 7232 case PIPE_C: 7233 if (pipe_config->fdi_lanes > 2) { 7234 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", 7235 pipe_name(pipe), pipe_config->fdi_lanes); 7236 return -EINVAL; 7237 } 7238 7239 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B); 7240 other_crtc_state = 7241 intel_atomic_get_crtc_state(state, other_crtc); 7242 if (IS_ERR(other_crtc_state)) 7243 return PTR_ERR(other_crtc_state); 7244 7245 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 7246 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 7247 return -EINVAL; 7248 } 7249 return 0; 7250 default: 7251 BUG(); 7252 } 7253 } 7254 7255 #define RETRY 1 7256 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, 7257 struct intel_crtc_state *pipe_config) 7258 { 7259 struct drm_device *dev = intel_crtc->base.dev; 7260 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 7261 int lane, link_bw, fdi_dotclock, ret; 7262 bool needs_recompute = false; 7263 7264 retry: 7265 /* FDI is a binary signal running at ~2.7GHz, encoding 7266 * each output octet as 10 bits. The actual frequency 7267 * is stored as a divider into a 100MHz clock, and the 7268 * mode pixel clock is stored in units of 1KHz. 7269 * Hence the bw of each lane in terms of the mode signal 7270 * is: 7271 */ 7272 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config); 7273 7274 fdi_dotclock = adjusted_mode->crtc_clock; 7275 7276 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 7277 pipe_config->pipe_bpp); 7278 7279 pipe_config->fdi_lanes = lane; 7280 7281 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 7282 link_bw, &pipe_config->fdi_m_n, false, false); 7283 7284 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 7285 if (ret == -EDEADLK) 7286 return ret; 7287 7288 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 7289 pipe_config->pipe_bpp -= 2*3; 7290 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 7291 pipe_config->pipe_bpp); 7292 needs_recompute = true; 7293 pipe_config->bw_constrained = true; 7294 7295 goto retry; 7296 } 7297 7298 if (needs_recompute) 7299 return RETRY; 7300 7301 return ret; 7302 } 7303 7304 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) 7305 { 7306 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 7307 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7308 7309 /* IPS only exists on ULT machines and is tied to pipe A. */ 7310 if (!hsw_crtc_supports_ips(crtc)) 7311 return false; 7312 7313 if (!i915_modparams.enable_ips) 7314 return false; 7315 7316 if (crtc_state->pipe_bpp > 24) 7317 return false; 7318 7319 /* 7320 * We compare against max which means we must take 7321 * the increased cdclk requirement into account when 7322 * calculating the new cdclk. 7323 * 7324 * Should measure whether using a lower cdclk w/o IPS 7325 */ 7326 if (IS_BROADWELL(dev_priv) && 7327 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100) 7328 return false; 7329 7330 return true; 7331 } 7332 7333 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state) 7334 { 7335 struct drm_i915_private *dev_priv = 7336 to_i915(crtc_state->base.crtc->dev); 7337 struct intel_atomic_state *intel_state = 7338 to_intel_atomic_state(crtc_state->base.state); 7339 7340 if (!hsw_crtc_state_ips_capable(crtc_state)) 7341 return false; 7342 7343 /* 7344 * When IPS gets enabled, the pipe CRC changes. Since IPS gets 7345 * enabled and disabled dynamically based on package C states, 7346 * user space can't make reliable use of the CRCs, so let's just 7347 * completely disable it. 7348 */ 7349 if (crtc_state->crc_enabled) 7350 return false; 7351 7352 /* IPS should be fine as long as at least one plane is enabled. */ 7353 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))) 7354 return false; 7355 7356 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 7357 if (IS_BROADWELL(dev_priv) && 7358 crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100) 7359 return false; 7360 7361 return true; 7362 } 7363 7364 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 7365 { 7366 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7367 7368 /* GDG double wide on either pipe, otherwise pipe A only */ 7369 return INTEL_GEN(dev_priv) < 4 && 7370 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 7371 } 7372 7373 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) 7374 { 7375 u32 pixel_rate; 7376 7377 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock; 7378 7379 /* 7380 * We only use IF-ID interlacing. If we ever use 7381 * PF-ID we'll need to adjust the pixel_rate here. 7382 */ 7383 7384 if (pipe_config->pch_pfit.enabled) { 7385 u64 pipe_w, pipe_h, pfit_w, pfit_h; 7386 u32 pfit_size = pipe_config->pch_pfit.size; 7387 7388 pipe_w = pipe_config->pipe_src_w; 7389 pipe_h = pipe_config->pipe_src_h; 7390 7391 pfit_w = (pfit_size >> 16) & 0xFFFF; 7392 pfit_h = pfit_size & 0xFFFF; 7393 if (pipe_w < pfit_w) 7394 pipe_w = pfit_w; 7395 if (pipe_h < pfit_h) 7396 pipe_h = pfit_h; 7397 7398 if (WARN_ON(!pfit_w || !pfit_h)) 7399 return pixel_rate; 7400 7401 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h), 7402 pfit_w * pfit_h); 7403 } 7404 7405 return pixel_rate; 7406 } 7407 7408 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 7409 { 7410 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 7411 7412 if (HAS_GMCH(dev_priv)) 7413 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 7414 crtc_state->pixel_rate = 7415 crtc_state->base.adjusted_mode.crtc_clock; 7416 else 7417 crtc_state->pixel_rate = 7418 ilk_pipe_pixel_rate(crtc_state); 7419 } 7420 7421 static int intel_crtc_compute_config(struct intel_crtc *crtc, 7422 struct intel_crtc_state *pipe_config) 7423 { 7424 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7425 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 7426 int clock_limit = dev_priv->max_dotclk_freq; 7427 7428 if (INTEL_GEN(dev_priv) < 4) { 7429 clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 7430 7431 /* 7432 * Enable double wide mode when the dot clock 7433 * is > 90% of the (display) core speed. 7434 */ 7435 if (intel_crtc_supports_double_wide(crtc) && 7436 adjusted_mode->crtc_clock > clock_limit) { 7437 clock_limit = dev_priv->max_dotclk_freq; 7438 pipe_config->double_wide = true; 7439 } 7440 } 7441 7442 if (adjusted_mode->crtc_clock > clock_limit) { 7443 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 7444 adjusted_mode->crtc_clock, clock_limit, 7445 yesno(pipe_config->double_wide)); 7446 return -EINVAL; 7447 } 7448 7449 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 7450 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) && 7451 pipe_config->base.ctm) { 7452 /* 7453 * There is only one pipe CSC unit per pipe, and we need that 7454 * for output conversion from RGB->YCBCR. So if CTM is already 7455 * applied we can't support YCBCR420 output. 7456 */ 7457 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n"); 7458 return -EINVAL; 7459 } 7460 7461 /* 7462 * Pipe horizontal size must be even in: 7463 * - DVO ganged mode 7464 * - LVDS dual channel mode 7465 * - Double wide pipe 7466 */ 7467 if (pipe_config->pipe_src_w & 1) { 7468 if (pipe_config->double_wide) { 7469 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n"); 7470 return -EINVAL; 7471 } 7472 7473 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && 7474 intel_is_dual_link_lvds(dev_priv)) { 7475 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n"); 7476 return -EINVAL; 7477 } 7478 } 7479 7480 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 7481 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 7482 */ 7483 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) && 7484 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay) 7485 return -EINVAL; 7486 7487 intel_crtc_compute_pixel_rate(pipe_config); 7488 7489 if (pipe_config->has_pch_encoder) 7490 return ironlake_fdi_compute_config(crtc, pipe_config); 7491 7492 return 0; 7493 } 7494 7495 static void 7496 intel_reduce_m_n_ratio(u32 *num, u32 *den) 7497 { 7498 while (*num > DATA_LINK_M_N_MASK || 7499 *den > DATA_LINK_M_N_MASK) { 7500 *num >>= 1; 7501 *den >>= 1; 7502 } 7503 } 7504 7505 static void compute_m_n(unsigned int m, unsigned int n, 7506 u32 *ret_m, u32 *ret_n, 7507 bool constant_n) 7508 { 7509 /* 7510 * Several DP dongles in particular seem to be fussy about 7511 * too large link M/N values. Give N value as 0x8000 that 7512 * should be acceptable by specific devices. 0x8000 is the 7513 * specified fixed N value for asynchronous clock mode, 7514 * which the devices expect also in synchronous clock mode. 7515 */ 7516 if (constant_n) 7517 *ret_n = 0x8000; 7518 else 7519 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 7520 7521 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 7522 intel_reduce_m_n_ratio(ret_m, ret_n); 7523 } 7524 7525 void 7526 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, 7527 int pixel_clock, int link_clock, 7528 struct intel_link_m_n *m_n, 7529 bool constant_n, bool fec_enable) 7530 { 7531 u32 data_clock = bits_per_pixel * pixel_clock; 7532 7533 if (fec_enable) 7534 data_clock = intel_dp_mode_to_fec_clock(data_clock); 7535 7536 m_n->tu = 64; 7537 compute_m_n(data_clock, 7538 link_clock * nlanes * 8, 7539 &m_n->gmch_m, &m_n->gmch_n, 7540 constant_n); 7541 7542 compute_m_n(pixel_clock, link_clock, 7543 &m_n->link_m, &m_n->link_n, 7544 constant_n); 7545 } 7546 7547 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) 7548 { 7549 /* 7550 * There may be no VBT; and if the BIOS enabled SSC we can 7551 * just keep using it to avoid unnecessary flicker. Whereas if the 7552 * BIOS isn't using it, don't assume it will work even if the VBT 7553 * indicates as much. 7554 */ 7555 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 7556 bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) & 7557 DREF_SSC1_ENABLE; 7558 7559 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 7560 DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n", 7561 enableddisabled(bios_lvds_use_ssc), 7562 enableddisabled(dev_priv->vbt.lvds_use_ssc)); 7563 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 7564 } 7565 } 7566 } 7567 7568 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 7569 { 7570 if (i915_modparams.panel_use_ssc >= 0) 7571 return i915_modparams.panel_use_ssc != 0; 7572 return dev_priv->vbt.lvds_use_ssc 7573 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 7574 } 7575 7576 static u32 pnv_dpll_compute_fp(struct dpll *dpll) 7577 { 7578 return (1 << dpll->n) << 16 | dpll->m2; 7579 } 7580 7581 static u32 i9xx_dpll_compute_fp(struct dpll *dpll) 7582 { 7583 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 7584 } 7585 7586 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 7587 struct intel_crtc_state *crtc_state, 7588 struct dpll *reduced_clock) 7589 { 7590 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7591 u32 fp, fp2 = 0; 7592 7593 if (IS_PINEVIEW(dev_priv)) { 7594 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 7595 if (reduced_clock) 7596 fp2 = pnv_dpll_compute_fp(reduced_clock); 7597 } else { 7598 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 7599 if (reduced_clock) 7600 fp2 = i9xx_dpll_compute_fp(reduced_clock); 7601 } 7602 7603 crtc_state->dpll_hw_state.fp0 = fp; 7604 7605 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 7606 reduced_clock) { 7607 crtc_state->dpll_hw_state.fp1 = fp2; 7608 } else { 7609 crtc_state->dpll_hw_state.fp1 = fp; 7610 } 7611 } 7612 7613 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe 7614 pipe) 7615 { 7616 u32 reg_val; 7617 7618 /* 7619 * PLLB opamp always calibrates to max value of 0x3f, force enable it 7620 * and set it to a reasonable value instead. 7621 */ 7622 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 7623 reg_val &= 0xffffff00; 7624 reg_val |= 0x00000030; 7625 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 7626 7627 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 7628 reg_val &= 0x00ffffff; 7629 reg_val |= 0x8c000000; 7630 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 7631 7632 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 7633 reg_val &= 0xffffff00; 7634 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 7635 7636 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 7637 reg_val &= 0x00ffffff; 7638 reg_val |= 0xb0000000; 7639 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 7640 } 7641 7642 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 7643 const struct intel_link_m_n *m_n) 7644 { 7645 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 7646 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7647 enum pipe pipe = crtc->pipe; 7648 7649 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7650 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 7651 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); 7652 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 7653 } 7654 7655 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, 7656 enum transcoder transcoder) 7657 { 7658 if (IS_HASWELL(dev_priv)) 7659 return transcoder == TRANSCODER_EDP; 7660 7661 /* 7662 * Strictly speaking some registers are available before 7663 * gen7, but we only support DRRS on gen7+ 7664 */ 7665 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv); 7666 } 7667 7668 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 7669 const struct intel_link_m_n *m_n, 7670 const struct intel_link_m_n *m2_n2) 7671 { 7672 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 7673 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7674 enum pipe pipe = crtc->pipe; 7675 enum transcoder transcoder = crtc_state->cpu_transcoder; 7676 7677 if (INTEL_GEN(dev_priv) >= 5) { 7678 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 7679 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 7680 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 7681 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 7682 /* 7683 * M2_N2 registers are set only if DRRS is supported 7684 * (to make sure the registers are not unnecessarily accessed). 7685 */ 7686 if (m2_n2 && crtc_state->has_drrs && 7687 transcoder_has_m2_n2(dev_priv, transcoder)) { 7688 I915_WRITE(PIPE_DATA_M2(transcoder), 7689 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 7690 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 7691 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); 7692 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); 7693 } 7694 } else { 7695 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7696 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 7697 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); 7698 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); 7699 } 7700 } 7701 7702 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n) 7703 { 7704 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 7705 7706 if (m_n == M1_N1) { 7707 dp_m_n = &crtc_state->dp_m_n; 7708 dp_m2_n2 = &crtc_state->dp_m2_n2; 7709 } else if (m_n == M2_N2) { 7710 7711 /* 7712 * M2_N2 registers are not supported. Hence m2_n2 divider value 7713 * needs to be programmed into M1_N1. 7714 */ 7715 dp_m_n = &crtc_state->dp_m2_n2; 7716 } else { 7717 DRM_ERROR("Unsupported divider value\n"); 7718 return; 7719 } 7720 7721 if (crtc_state->has_pch_encoder) 7722 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n); 7723 else 7724 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2); 7725 } 7726 7727 static void vlv_compute_dpll(struct intel_crtc *crtc, 7728 struct intel_crtc_state *pipe_config) 7729 { 7730 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | 7731 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 7732 if (crtc->pipe != PIPE_A) 7733 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7734 7735 /* DPLL not used with DSI, but still need the rest set up */ 7736 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 7737 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | 7738 DPLL_EXT_BUFFER_ENABLE_VLV; 7739 7740 pipe_config->dpll_hw_state.dpll_md = 7741 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7742 } 7743 7744 static void chv_compute_dpll(struct intel_crtc *crtc, 7745 struct intel_crtc_state *pipe_config) 7746 { 7747 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | 7748 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 7749 if (crtc->pipe != PIPE_A) 7750 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7751 7752 /* DPLL not used with DSI, but still need the rest set up */ 7753 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 7754 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; 7755 7756 pipe_config->dpll_hw_state.dpll_md = 7757 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7758 } 7759 7760 static void vlv_prepare_pll(struct intel_crtc *crtc, 7761 const struct intel_crtc_state *pipe_config) 7762 { 7763 struct drm_device *dev = crtc->base.dev; 7764 struct drm_i915_private *dev_priv = to_i915(dev); 7765 enum pipe pipe = crtc->pipe; 7766 u32 mdiv; 7767 u32 bestn, bestm1, bestm2, bestp1, bestp2; 7768 u32 coreclk, reg_val; 7769 7770 /* Enable Refclk */ 7771 I915_WRITE(DPLL(pipe), 7772 pipe_config->dpll_hw_state.dpll & 7773 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); 7774 7775 /* No need to actually set up the DPLL with DSI */ 7776 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7777 return; 7778 7779 vlv_dpio_get(dev_priv); 7780 7781 bestn = pipe_config->dpll.n; 7782 bestm1 = pipe_config->dpll.m1; 7783 bestm2 = pipe_config->dpll.m2; 7784 bestp1 = pipe_config->dpll.p1; 7785 bestp2 = pipe_config->dpll.p2; 7786 7787 /* See eDP HDMI DPIO driver vbios notes doc */ 7788 7789 /* PLL B needs special handling */ 7790 if (pipe == PIPE_B) 7791 vlv_pllb_recal_opamp(dev_priv, pipe); 7792 7793 /* Set up Tx target for periodic Rcomp update */ 7794 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 7795 7796 /* Disable target IRef on PLL */ 7797 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 7798 reg_val &= 0x00ffffff; 7799 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 7800 7801 /* Disable fast lock */ 7802 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 7803 7804 /* Set idtafcrecal before PLL is enabled */ 7805 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 7806 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 7807 mdiv |= ((bestn << DPIO_N_SHIFT)); 7808 mdiv |= (1 << DPIO_K_SHIFT); 7809 7810 /* 7811 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 7812 * but we don't support that). 7813 * Note: don't use the DAC post divider as it seems unstable. 7814 */ 7815 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 7816 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 7817 7818 mdiv |= DPIO_ENABLE_CALIBRATION; 7819 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 7820 7821 /* Set HBR and RBR LPF coefficients */ 7822 if (pipe_config->port_clock == 162000 || 7823 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) || 7824 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) 7825 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7826 0x009f0003); 7827 else 7828 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7829 0x00d0000f); 7830 7831 if (intel_crtc_has_dp_encoder(pipe_config)) { 7832 /* Use SSC source */ 7833 if (pipe == PIPE_A) 7834 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7835 0x0df40000); 7836 else 7837 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7838 0x0df70000); 7839 } else { /* HDMI or VGA */ 7840 /* Use bend source */ 7841 if (pipe == PIPE_A) 7842 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7843 0x0df70000); 7844 else 7845 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7846 0x0df40000); 7847 } 7848 7849 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 7850 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 7851 if (intel_crtc_has_dp_encoder(pipe_config)) 7852 coreclk |= 0x01000000; 7853 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 7854 7855 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 7856 7857 vlv_dpio_put(dev_priv); 7858 } 7859 7860 static void chv_prepare_pll(struct intel_crtc *crtc, 7861 const struct intel_crtc_state *pipe_config) 7862 { 7863 struct drm_device *dev = crtc->base.dev; 7864 struct drm_i915_private *dev_priv = to_i915(dev); 7865 enum pipe pipe = crtc->pipe; 7866 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7867 u32 loopfilter, tribuf_calcntr; 7868 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 7869 u32 dpio_val; 7870 int vco; 7871 7872 /* Enable Refclk and SSC */ 7873 I915_WRITE(DPLL(pipe), 7874 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 7875 7876 /* No need to actually set up the DPLL with DSI */ 7877 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7878 return; 7879 7880 bestn = pipe_config->dpll.n; 7881 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 7882 bestm1 = pipe_config->dpll.m1; 7883 bestm2 = pipe_config->dpll.m2 >> 22; 7884 bestp1 = pipe_config->dpll.p1; 7885 bestp2 = pipe_config->dpll.p2; 7886 vco = pipe_config->dpll.vco; 7887 dpio_val = 0; 7888 loopfilter = 0; 7889 7890 vlv_dpio_get(dev_priv); 7891 7892 /* p1 and p2 divider */ 7893 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 7894 5 << DPIO_CHV_S1_DIV_SHIFT | 7895 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 7896 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 7897 1 << DPIO_CHV_K_DIV_SHIFT); 7898 7899 /* Feedback post-divider - m2 */ 7900 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 7901 7902 /* Feedback refclk divider - n and m1 */ 7903 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 7904 DPIO_CHV_M1_DIV_BY_2 | 7905 1 << DPIO_CHV_N_DIV_SHIFT); 7906 7907 /* M2 fraction division */ 7908 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 7909 7910 /* M2 fraction division enable */ 7911 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 7912 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 7913 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 7914 if (bestm2_frac) 7915 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 7916 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 7917 7918 /* Program digital lock detect threshold */ 7919 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 7920 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 7921 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 7922 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 7923 if (!bestm2_frac) 7924 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 7925 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 7926 7927 /* Loop filter */ 7928 if (vco == 5400000) { 7929 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 7930 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 7931 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 7932 tribuf_calcntr = 0x9; 7933 } else if (vco <= 6200000) { 7934 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 7935 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 7936 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7937 tribuf_calcntr = 0x9; 7938 } else if (vco <= 6480000) { 7939 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 7940 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 7941 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7942 tribuf_calcntr = 0x8; 7943 } else { 7944 /* Not supported. Apply the same limits as in the max case */ 7945 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 7946 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 7947 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7948 tribuf_calcntr = 0; 7949 } 7950 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 7951 7952 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 7953 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 7954 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 7955 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 7956 7957 /* AFC Recal */ 7958 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 7959 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 7960 DPIO_AFC_RECAL); 7961 7962 vlv_dpio_put(dev_priv); 7963 } 7964 7965 /** 7966 * vlv_force_pll_on - forcibly enable just the PLL 7967 * @dev_priv: i915 private structure 7968 * @pipe: pipe PLL to enable 7969 * @dpll: PLL configuration 7970 * 7971 * Enable the PLL for @pipe using the supplied @dpll config. To be used 7972 * in cases where we need the PLL enabled even when @pipe is not going to 7973 * be enabled. 7974 */ 7975 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, 7976 const struct dpll *dpll) 7977 { 7978 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 7979 struct intel_crtc_state *pipe_config; 7980 7981 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 7982 if (!pipe_config) 7983 return -ENOMEM; 7984 7985 pipe_config->base.crtc = &crtc->base; 7986 pipe_config->pixel_multiplier = 1; 7987 pipe_config->dpll = *dpll; 7988 7989 if (IS_CHERRYVIEW(dev_priv)) { 7990 chv_compute_dpll(crtc, pipe_config); 7991 chv_prepare_pll(crtc, pipe_config); 7992 chv_enable_pll(crtc, pipe_config); 7993 } else { 7994 vlv_compute_dpll(crtc, pipe_config); 7995 vlv_prepare_pll(crtc, pipe_config); 7996 vlv_enable_pll(crtc, pipe_config); 7997 } 7998 7999 kfree(pipe_config); 8000 8001 return 0; 8002 } 8003 8004 /** 8005 * vlv_force_pll_off - forcibly disable just the PLL 8006 * @dev_priv: i915 private structure 8007 * @pipe: pipe PLL to disable 8008 * 8009 * Disable the PLL for @pipe. To be used in cases where we need 8010 * the PLL enabled even when @pipe is not going to be enabled. 8011 */ 8012 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) 8013 { 8014 if (IS_CHERRYVIEW(dev_priv)) 8015 chv_disable_pll(dev_priv, pipe); 8016 else 8017 vlv_disable_pll(dev_priv, pipe); 8018 } 8019 8020 static void i9xx_compute_dpll(struct intel_crtc *crtc, 8021 struct intel_crtc_state *crtc_state, 8022 struct dpll *reduced_clock) 8023 { 8024 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8025 u32 dpll; 8026 struct dpll *clock = &crtc_state->dpll; 8027 8028 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 8029 8030 dpll = DPLL_VGA_MODE_DIS; 8031 8032 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 8033 dpll |= DPLLB_MODE_LVDS; 8034 else 8035 dpll |= DPLLB_MODE_DAC_SERIAL; 8036 8037 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 8038 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 8039 dpll |= (crtc_state->pixel_multiplier - 1) 8040 << SDVO_MULTIPLIER_SHIFT_HIRES; 8041 } 8042 8043 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 8044 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 8045 dpll |= DPLL_SDVO_HIGH_SPEED; 8046 8047 if (intel_crtc_has_dp_encoder(crtc_state)) 8048 dpll |= DPLL_SDVO_HIGH_SPEED; 8049 8050 /* compute bitmask from p1 value */ 8051 if (IS_PINEVIEW(dev_priv)) 8052 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 8053 else { 8054 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8055 if (IS_G4X(dev_priv) && reduced_clock) 8056 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 8057 } 8058 switch (clock->p2) { 8059 case 5: 8060 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 8061 break; 8062 case 7: 8063 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 8064 break; 8065 case 10: 8066 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 8067 break; 8068 case 14: 8069 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 8070 break; 8071 } 8072 if (INTEL_GEN(dev_priv) >= 4) 8073 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 8074 8075 if (crtc_state->sdvo_tv_clock) 8076 dpll |= PLL_REF_INPUT_TVCLKINBC; 8077 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8078 intel_panel_use_ssc(dev_priv)) 8079 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8080 else 8081 dpll |= PLL_REF_INPUT_DREFCLK; 8082 8083 dpll |= DPLL_VCO_ENABLE; 8084 crtc_state->dpll_hw_state.dpll = dpll; 8085 8086 if (INTEL_GEN(dev_priv) >= 4) { 8087 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 8088 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 8089 crtc_state->dpll_hw_state.dpll_md = dpll_md; 8090 } 8091 } 8092 8093 static void i8xx_compute_dpll(struct intel_crtc *crtc, 8094 struct intel_crtc_state *crtc_state, 8095 struct dpll *reduced_clock) 8096 { 8097 struct drm_device *dev = crtc->base.dev; 8098 struct drm_i915_private *dev_priv = to_i915(dev); 8099 u32 dpll; 8100 struct dpll *clock = &crtc_state->dpll; 8101 8102 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 8103 8104 dpll = DPLL_VGA_MODE_DIS; 8105 8106 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8107 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8108 } else { 8109 if (clock->p1 == 2) 8110 dpll |= PLL_P1_DIVIDE_BY_TWO; 8111 else 8112 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8113 if (clock->p2 == 4) 8114 dpll |= PLL_P2_DIVIDE_BY_4; 8115 } 8116 8117 /* 8118 * Bspec: 8119 * "[Almador Errata}: For the correct operation of the muxed DVO pins 8120 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data, 8121 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock 8122 * Enable) must be set to “1” in both the DPLL A Control Register 8123 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)." 8124 * 8125 * For simplicity We simply keep both bits always enabled in 8126 * both DPLLS. The spec says we should disable the DVO 2X clock 8127 * when not needed, but this seems to work fine in practice. 8128 */ 8129 if (IS_I830(dev_priv) || 8130 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) 8131 dpll |= DPLL_DVO_2X_MODE; 8132 8133 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8134 intel_panel_use_ssc(dev_priv)) 8135 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8136 else 8137 dpll |= PLL_REF_INPUT_DREFCLK; 8138 8139 dpll |= DPLL_VCO_ENABLE; 8140 crtc_state->dpll_hw_state.dpll = dpll; 8141 } 8142 8143 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state) 8144 { 8145 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 8146 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8147 enum pipe pipe = crtc->pipe; 8148 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 8149 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; 8150 u32 crtc_vtotal, crtc_vblank_end; 8151 int vsyncshift = 0; 8152 8153 /* We need to be careful not to changed the adjusted mode, for otherwise 8154 * the hw state checker will get angry at the mismatch. */ 8155 crtc_vtotal = adjusted_mode->crtc_vtotal; 8156 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 8157 8158 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 8159 /* the chip adds 2 halflines automatically */ 8160 crtc_vtotal -= 1; 8161 crtc_vblank_end -= 1; 8162 8163 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 8164 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 8165 else 8166 vsyncshift = adjusted_mode->crtc_hsync_start - 8167 adjusted_mode->crtc_htotal / 2; 8168 if (vsyncshift < 0) 8169 vsyncshift += adjusted_mode->crtc_htotal; 8170 } 8171 8172 if (INTEL_GEN(dev_priv) > 3) 8173 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 8174 8175 I915_WRITE(HTOTAL(cpu_transcoder), 8176 (adjusted_mode->crtc_hdisplay - 1) | 8177 ((adjusted_mode->crtc_htotal - 1) << 16)); 8178 I915_WRITE(HBLANK(cpu_transcoder), 8179 (adjusted_mode->crtc_hblank_start - 1) | 8180 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 8181 I915_WRITE(HSYNC(cpu_transcoder), 8182 (adjusted_mode->crtc_hsync_start - 1) | 8183 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 8184 8185 I915_WRITE(VTOTAL(cpu_transcoder), 8186 (adjusted_mode->crtc_vdisplay - 1) | 8187 ((crtc_vtotal - 1) << 16)); 8188 I915_WRITE(VBLANK(cpu_transcoder), 8189 (adjusted_mode->crtc_vblank_start - 1) | 8190 ((crtc_vblank_end - 1) << 16)); 8191 I915_WRITE(VSYNC(cpu_transcoder), 8192 (adjusted_mode->crtc_vsync_start - 1) | 8193 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 8194 8195 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 8196 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 8197 * documented on the DDI_FUNC_CTL register description, EDP Input Select 8198 * bits. */ 8199 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 8200 (pipe == PIPE_B || pipe == PIPE_C)) 8201 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 8202 8203 } 8204 8205 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 8206 { 8207 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 8208 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8209 enum pipe pipe = crtc->pipe; 8210 8211 /* pipesrc controls the size that is scaled from, which should 8212 * always be the user's requested size. 8213 */ 8214 I915_WRITE(PIPESRC(pipe), 8215 ((crtc_state->pipe_src_w - 1) << 16) | 8216 (crtc_state->pipe_src_h - 1)); 8217 } 8218 8219 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) 8220 { 8221 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 8222 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 8223 8224 if (IS_GEN(dev_priv, 2)) 8225 return false; 8226 8227 if (INTEL_GEN(dev_priv) >= 9 || 8228 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 8229 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW; 8230 else 8231 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK; 8232 } 8233 8234 static void intel_get_pipe_timings(struct intel_crtc *crtc, 8235 struct intel_crtc_state *pipe_config) 8236 { 8237 struct drm_device *dev = crtc->base.dev; 8238 struct drm_i915_private *dev_priv = to_i915(dev); 8239 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 8240 u32 tmp; 8241 8242 tmp = I915_READ(HTOTAL(cpu_transcoder)); 8243 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 8244 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 8245 8246 if (!transcoder_is_dsi(cpu_transcoder)) { 8247 tmp = I915_READ(HBLANK(cpu_transcoder)); 8248 pipe_config->base.adjusted_mode.crtc_hblank_start = 8249 (tmp & 0xffff) + 1; 8250 pipe_config->base.adjusted_mode.crtc_hblank_end = 8251 ((tmp >> 16) & 0xffff) + 1; 8252 } 8253 tmp = I915_READ(HSYNC(cpu_transcoder)); 8254 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 8255 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 8256 8257 tmp = I915_READ(VTOTAL(cpu_transcoder)); 8258 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 8259 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 8260 8261 if (!transcoder_is_dsi(cpu_transcoder)) { 8262 tmp = I915_READ(VBLANK(cpu_transcoder)); 8263 pipe_config->base.adjusted_mode.crtc_vblank_start = 8264 (tmp & 0xffff) + 1; 8265 pipe_config->base.adjusted_mode.crtc_vblank_end = 8266 ((tmp >> 16) & 0xffff) + 1; 8267 } 8268 tmp = I915_READ(VSYNC(cpu_transcoder)); 8269 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 8270 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 8271 8272 if (intel_pipe_is_interlaced(pipe_config)) { 8273 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 8274 pipe_config->base.adjusted_mode.crtc_vtotal += 1; 8275 pipe_config->base.adjusted_mode.crtc_vblank_end += 1; 8276 } 8277 } 8278 8279 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 8280 struct intel_crtc_state *pipe_config) 8281 { 8282 struct drm_device *dev = crtc->base.dev; 8283 struct drm_i915_private *dev_priv = to_i915(dev); 8284 u32 tmp; 8285 8286 tmp = I915_READ(PIPESRC(crtc->pipe)); 8287 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 8288 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 8289 8290 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h; 8291 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w; 8292 } 8293 8294 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 8295 struct intel_crtc_state *pipe_config) 8296 { 8297 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay; 8298 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal; 8299 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start; 8300 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end; 8301 8302 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay; 8303 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal; 8304 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start; 8305 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end; 8306 8307 mode->flags = pipe_config->base.adjusted_mode.flags; 8308 mode->type = DRM_MODE_TYPE_DRIVER; 8309 8310 mode->clock = pipe_config->base.adjusted_mode.crtc_clock; 8311 8312 mode->hsync = drm_mode_hsync(mode); 8313 mode->vrefresh = drm_mode_vrefresh(mode); 8314 drm_mode_set_name(mode); 8315 } 8316 8317 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 8318 { 8319 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 8320 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8321 u32 pipeconf; 8322 8323 pipeconf = 0; 8324 8325 /* we keep both pipes enabled on 830 */ 8326 if (IS_I830(dev_priv)) 8327 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE; 8328 8329 if (crtc_state->double_wide) 8330 pipeconf |= PIPECONF_DOUBLE_WIDE; 8331 8332 /* only g4x and later have fancy bpc/dither controls */ 8333 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 8334 IS_CHERRYVIEW(dev_priv)) { 8335 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 8336 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 8337 pipeconf |= PIPECONF_DITHER_EN | 8338 PIPECONF_DITHER_TYPE_SP; 8339 8340 switch (crtc_state->pipe_bpp) { 8341 case 18: 8342 pipeconf |= PIPECONF_6BPC; 8343 break; 8344 case 24: 8345 pipeconf |= PIPECONF_8BPC; 8346 break; 8347 case 30: 8348 pipeconf |= PIPECONF_10BPC; 8349 break; 8350 default: 8351 /* Case prevented by intel_choose_pipe_bpp_dither. */ 8352 BUG(); 8353 } 8354 } 8355 8356 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 8357 if (INTEL_GEN(dev_priv) < 4 || 8358 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 8359 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 8360 else 8361 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 8362 } else { 8363 pipeconf |= PIPECONF_PROGRESSIVE; 8364 } 8365 8366 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 8367 crtc_state->limited_color_range) 8368 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 8369 8370 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 8371 8372 I915_WRITE(PIPECONF(crtc->pipe), pipeconf); 8373 POSTING_READ(PIPECONF(crtc->pipe)); 8374 } 8375 8376 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, 8377 struct intel_crtc_state *crtc_state) 8378 { 8379 struct drm_device *dev = crtc->base.dev; 8380 struct drm_i915_private *dev_priv = to_i915(dev); 8381 const struct intel_limit *limit; 8382 int refclk = 48000; 8383 8384 memset(&crtc_state->dpll_hw_state, 0, 8385 sizeof(crtc_state->dpll_hw_state)); 8386 8387 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8388 if (intel_panel_use_ssc(dev_priv)) { 8389 refclk = dev_priv->vbt.lvds_ssc_freq; 8390 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8391 } 8392 8393 limit = &intel_limits_i8xx_lvds; 8394 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) { 8395 limit = &intel_limits_i8xx_dvo; 8396 } else { 8397 limit = &intel_limits_i8xx_dac; 8398 } 8399 8400 if (!crtc_state->clock_set && 8401 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8402 refclk, NULL, &crtc_state->dpll)) { 8403 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8404 return -EINVAL; 8405 } 8406 8407 i8xx_compute_dpll(crtc, crtc_state, NULL); 8408 8409 return 0; 8410 } 8411 8412 static int g4x_crtc_compute_clock(struct intel_crtc *crtc, 8413 struct intel_crtc_state *crtc_state) 8414 { 8415 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8416 const struct intel_limit *limit; 8417 int refclk = 96000; 8418 8419 memset(&crtc_state->dpll_hw_state, 0, 8420 sizeof(crtc_state->dpll_hw_state)); 8421 8422 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8423 if (intel_panel_use_ssc(dev_priv)) { 8424 refclk = dev_priv->vbt.lvds_ssc_freq; 8425 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8426 } 8427 8428 if (intel_is_dual_link_lvds(dev_priv)) 8429 limit = &intel_limits_g4x_dual_channel_lvds; 8430 else 8431 limit = &intel_limits_g4x_single_channel_lvds; 8432 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || 8433 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 8434 limit = &intel_limits_g4x_hdmi; 8435 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) { 8436 limit = &intel_limits_g4x_sdvo; 8437 } else { 8438 /* The option is for other outputs */ 8439 limit = &intel_limits_i9xx_sdvo; 8440 } 8441 8442 if (!crtc_state->clock_set && 8443 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8444 refclk, NULL, &crtc_state->dpll)) { 8445 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8446 return -EINVAL; 8447 } 8448 8449 i9xx_compute_dpll(crtc, crtc_state, NULL); 8450 8451 return 0; 8452 } 8453 8454 static int pnv_crtc_compute_clock(struct intel_crtc *crtc, 8455 struct intel_crtc_state *crtc_state) 8456 { 8457 struct drm_device *dev = crtc->base.dev; 8458 struct drm_i915_private *dev_priv = to_i915(dev); 8459 const struct intel_limit *limit; 8460 int refclk = 96000; 8461 8462 memset(&crtc_state->dpll_hw_state, 0, 8463 sizeof(crtc_state->dpll_hw_state)); 8464 8465 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8466 if (intel_panel_use_ssc(dev_priv)) { 8467 refclk = dev_priv->vbt.lvds_ssc_freq; 8468 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8469 } 8470 8471 limit = &intel_limits_pineview_lvds; 8472 } else { 8473 limit = &intel_limits_pineview_sdvo; 8474 } 8475 8476 if (!crtc_state->clock_set && 8477 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8478 refclk, NULL, &crtc_state->dpll)) { 8479 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8480 return -EINVAL; 8481 } 8482 8483 i9xx_compute_dpll(crtc, crtc_state, NULL); 8484 8485 return 0; 8486 } 8487 8488 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 8489 struct intel_crtc_state *crtc_state) 8490 { 8491 struct drm_device *dev = crtc->base.dev; 8492 struct drm_i915_private *dev_priv = to_i915(dev); 8493 const struct intel_limit *limit; 8494 int refclk = 96000; 8495 8496 memset(&crtc_state->dpll_hw_state, 0, 8497 sizeof(crtc_state->dpll_hw_state)); 8498 8499 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8500 if (intel_panel_use_ssc(dev_priv)) { 8501 refclk = dev_priv->vbt.lvds_ssc_freq; 8502 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8503 } 8504 8505 limit = &intel_limits_i9xx_lvds; 8506 } else { 8507 limit = &intel_limits_i9xx_sdvo; 8508 } 8509 8510 if (!crtc_state->clock_set && 8511 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8512 refclk, NULL, &crtc_state->dpll)) { 8513 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8514 return -EINVAL; 8515 } 8516 8517 i9xx_compute_dpll(crtc, crtc_state, NULL); 8518 8519 return 0; 8520 } 8521 8522 static int chv_crtc_compute_clock(struct intel_crtc *crtc, 8523 struct intel_crtc_state *crtc_state) 8524 { 8525 int refclk = 100000; 8526 const struct intel_limit *limit = &intel_limits_chv; 8527 8528 memset(&crtc_state->dpll_hw_state, 0, 8529 sizeof(crtc_state->dpll_hw_state)); 8530 8531 if (!crtc_state->clock_set && 8532 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8533 refclk, NULL, &crtc_state->dpll)) { 8534 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8535 return -EINVAL; 8536 } 8537 8538 chv_compute_dpll(crtc, crtc_state); 8539 8540 return 0; 8541 } 8542 8543 static int vlv_crtc_compute_clock(struct intel_crtc *crtc, 8544 struct intel_crtc_state *crtc_state) 8545 { 8546 int refclk = 100000; 8547 const struct intel_limit *limit = &intel_limits_vlv; 8548 8549 memset(&crtc_state->dpll_hw_state, 0, 8550 sizeof(crtc_state->dpll_hw_state)); 8551 8552 if (!crtc_state->clock_set && 8553 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8554 refclk, NULL, &crtc_state->dpll)) { 8555 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8556 return -EINVAL; 8557 } 8558 8559 vlv_compute_dpll(crtc, crtc_state); 8560 8561 return 0; 8562 } 8563 8564 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) 8565 { 8566 if (IS_I830(dev_priv)) 8567 return false; 8568 8569 return INTEL_GEN(dev_priv) >= 4 || 8570 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 8571 } 8572 8573 static void i9xx_get_pfit_config(struct intel_crtc *crtc, 8574 struct intel_crtc_state *pipe_config) 8575 { 8576 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8577 u32 tmp; 8578 8579 if (!i9xx_has_pfit(dev_priv)) 8580 return; 8581 8582 tmp = I915_READ(PFIT_CONTROL); 8583 if (!(tmp & PFIT_ENABLE)) 8584 return; 8585 8586 /* Check whether the pfit is attached to our pipe. */ 8587 if (INTEL_GEN(dev_priv) < 4) { 8588 if (crtc->pipe != PIPE_B) 8589 return; 8590 } else { 8591 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 8592 return; 8593 } 8594 8595 pipe_config->gmch_pfit.control = tmp; 8596 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 8597 } 8598 8599 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 8600 struct intel_crtc_state *pipe_config) 8601 { 8602 struct drm_device *dev = crtc->base.dev; 8603 struct drm_i915_private *dev_priv = to_i915(dev); 8604 enum pipe pipe = crtc->pipe; 8605 struct dpll clock; 8606 u32 mdiv; 8607 int refclk = 100000; 8608 8609 /* In case of DSI, DPLL will not be used */ 8610 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8611 return; 8612 8613 vlv_dpio_get(dev_priv); 8614 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 8615 vlv_dpio_put(dev_priv); 8616 8617 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 8618 clock.m2 = mdiv & DPIO_M2DIV_MASK; 8619 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 8620 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 8621 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 8622 8623 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 8624 } 8625 8626 static void 8627 i9xx_get_initial_plane_config(struct intel_crtc *crtc, 8628 struct intel_initial_plane_config *plane_config) 8629 { 8630 struct drm_device *dev = crtc->base.dev; 8631 struct drm_i915_private *dev_priv = to_i915(dev); 8632 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 8633 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 8634 enum pipe pipe; 8635 u32 val, base, offset; 8636 int fourcc, pixel_format; 8637 unsigned int aligned_height; 8638 struct drm_framebuffer *fb; 8639 struct intel_framebuffer *intel_fb; 8640 8641 if (!plane->get_hw_state(plane, &pipe)) 8642 return; 8643 8644 WARN_ON(pipe != crtc->pipe); 8645 8646 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8647 if (!intel_fb) { 8648 DRM_DEBUG_KMS("failed to alloc fb\n"); 8649 return; 8650 } 8651 8652 fb = &intel_fb->base; 8653 8654 fb->dev = dev; 8655 8656 val = I915_READ(DSPCNTR(i9xx_plane)); 8657 8658 if (INTEL_GEN(dev_priv) >= 4) { 8659 if (val & DISPPLANE_TILED) { 8660 plane_config->tiling = I915_TILING_X; 8661 fb->modifier = I915_FORMAT_MOD_X_TILED; 8662 } 8663 8664 if (val & DISPPLANE_ROTATE_180) 8665 plane_config->rotation = DRM_MODE_ROTATE_180; 8666 } 8667 8668 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B && 8669 val & DISPPLANE_MIRROR) 8670 plane_config->rotation |= DRM_MODE_REFLECT_X; 8671 8672 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 8673 fourcc = i9xx_format_to_fourcc(pixel_format); 8674 fb->format = drm_format_info(fourcc); 8675 8676 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 8677 offset = I915_READ(DSPOFFSET(i9xx_plane)); 8678 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000; 8679 } else if (INTEL_GEN(dev_priv) >= 4) { 8680 if (plane_config->tiling) 8681 offset = I915_READ(DSPTILEOFF(i9xx_plane)); 8682 else 8683 offset = I915_READ(DSPLINOFF(i9xx_plane)); 8684 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000; 8685 } else { 8686 base = I915_READ(DSPADDR(i9xx_plane)); 8687 } 8688 plane_config->base = base; 8689 8690 val = I915_READ(PIPESRC(pipe)); 8691 fb->width = ((val >> 16) & 0xfff) + 1; 8692 fb->height = ((val >> 0) & 0xfff) + 1; 8693 8694 val = I915_READ(DSPSTRIDE(i9xx_plane)); 8695 fb->pitches[0] = val & 0xffffffc0; 8696 8697 aligned_height = intel_fb_align_height(fb, 0, fb->height); 8698 8699 plane_config->size = fb->pitches[0] * aligned_height; 8700 8701 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8702 crtc->base.name, plane->base.name, fb->width, fb->height, 8703 fb->format->cpp[0] * 8, base, fb->pitches[0], 8704 plane_config->size); 8705 8706 plane_config->fb = intel_fb; 8707 } 8708 8709 static void chv_crtc_clock_get(struct intel_crtc *crtc, 8710 struct intel_crtc_state *pipe_config) 8711 { 8712 struct drm_device *dev = crtc->base.dev; 8713 struct drm_i915_private *dev_priv = to_i915(dev); 8714 enum pipe pipe = crtc->pipe; 8715 enum dpio_channel port = vlv_pipe_to_channel(pipe); 8716 struct dpll clock; 8717 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 8718 int refclk = 100000; 8719 8720 /* In case of DSI, DPLL will not be used */ 8721 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8722 return; 8723 8724 vlv_dpio_get(dev_priv); 8725 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 8726 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 8727 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 8728 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 8729 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 8730 vlv_dpio_put(dev_priv); 8731 8732 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 8733 clock.m2 = (pll_dw0 & 0xff) << 22; 8734 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 8735 clock.m2 |= pll_dw2 & 0x3fffff; 8736 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 8737 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 8738 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 8739 8740 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 8741 } 8742 8743 static enum intel_output_format 8744 bdw_get_pipemisc_output_format(struct intel_crtc *crtc) 8745 { 8746 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8747 u32 tmp; 8748 8749 tmp = I915_READ(PIPEMISC(crtc->pipe)); 8750 8751 if (tmp & PIPEMISC_YUV420_ENABLE) { 8752 /* We support 4:2:0 in full blend mode only */ 8753 WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0); 8754 8755 return INTEL_OUTPUT_FORMAT_YCBCR420; 8756 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { 8757 return INTEL_OUTPUT_FORMAT_YCBCR444; 8758 } else { 8759 return INTEL_OUTPUT_FORMAT_RGB; 8760 } 8761 } 8762 8763 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) 8764 { 8765 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 8766 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 8767 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8768 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 8769 u32 tmp; 8770 8771 tmp = I915_READ(DSPCNTR(i9xx_plane)); 8772 8773 if (tmp & DISPPLANE_GAMMA_ENABLE) 8774 crtc_state->gamma_enable = true; 8775 8776 if (!HAS_GMCH(dev_priv) && 8777 tmp & DISPPLANE_PIPE_CSC_ENABLE) 8778 crtc_state->csc_enable = true; 8779 } 8780 8781 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 8782 struct intel_crtc_state *pipe_config) 8783 { 8784 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8785 enum intel_display_power_domain power_domain; 8786 intel_wakeref_t wakeref; 8787 u32 tmp; 8788 bool ret; 8789 8790 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 8791 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 8792 if (!wakeref) 8793 return false; 8794 8795 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 8796 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8797 pipe_config->shared_dpll = NULL; 8798 8799 ret = false; 8800 8801 tmp = I915_READ(PIPECONF(crtc->pipe)); 8802 if (!(tmp & PIPECONF_ENABLE)) 8803 goto out; 8804 8805 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 8806 IS_CHERRYVIEW(dev_priv)) { 8807 switch (tmp & PIPECONF_BPC_MASK) { 8808 case PIPECONF_6BPC: 8809 pipe_config->pipe_bpp = 18; 8810 break; 8811 case PIPECONF_8BPC: 8812 pipe_config->pipe_bpp = 24; 8813 break; 8814 case PIPECONF_10BPC: 8815 pipe_config->pipe_bpp = 30; 8816 break; 8817 default: 8818 break; 8819 } 8820 } 8821 8822 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 8823 (tmp & PIPECONF_COLOR_RANGE_SELECT)) 8824 pipe_config->limited_color_range = true; 8825 8826 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >> 8827 PIPECONF_GAMMA_MODE_SHIFT; 8828 8829 if (IS_CHERRYVIEW(dev_priv)) 8830 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe)); 8831 8832 i9xx_get_pipe_color_config(pipe_config); 8833 intel_color_get_config(pipe_config); 8834 8835 if (INTEL_GEN(dev_priv) < 4) 8836 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 8837 8838 intel_get_pipe_timings(crtc, pipe_config); 8839 intel_get_pipe_src_size(crtc, pipe_config); 8840 8841 i9xx_get_pfit_config(crtc, pipe_config); 8842 8843 if (INTEL_GEN(dev_priv) >= 4) { 8844 /* No way to read it out on pipes B and C */ 8845 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 8846 tmp = dev_priv->chv_dpll_md[crtc->pipe]; 8847 else 8848 tmp = I915_READ(DPLL_MD(crtc->pipe)); 8849 pipe_config->pixel_multiplier = 8850 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 8851 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 8852 pipe_config->dpll_hw_state.dpll_md = tmp; 8853 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 8854 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 8855 tmp = I915_READ(DPLL(crtc->pipe)); 8856 pipe_config->pixel_multiplier = 8857 ((tmp & SDVO_MULTIPLIER_MASK) 8858 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 8859 } else { 8860 /* Note that on i915G/GM the pixel multiplier is in the sdvo 8861 * port and will be fixed up in the encoder->get_config 8862 * function. */ 8863 pipe_config->pixel_multiplier = 1; 8864 } 8865 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 8866 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 8867 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 8868 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 8869 } else { 8870 /* Mask out read-only status bits. */ 8871 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 8872 DPLL_PORTC_READY_MASK | 8873 DPLL_PORTB_READY_MASK); 8874 } 8875 8876 if (IS_CHERRYVIEW(dev_priv)) 8877 chv_crtc_clock_get(crtc, pipe_config); 8878 else if (IS_VALLEYVIEW(dev_priv)) 8879 vlv_crtc_clock_get(crtc, pipe_config); 8880 else 8881 i9xx_crtc_clock_get(crtc, pipe_config); 8882 8883 /* 8884 * Normally the dotclock is filled in by the encoder .get_config() 8885 * but in case the pipe is enabled w/o any ports we need a sane 8886 * default. 8887 */ 8888 pipe_config->base.adjusted_mode.crtc_clock = 8889 pipe_config->port_clock / pipe_config->pixel_multiplier; 8890 8891 ret = true; 8892 8893 out: 8894 intel_display_power_put(dev_priv, power_domain, wakeref); 8895 8896 return ret; 8897 } 8898 8899 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv) 8900 { 8901 struct intel_encoder *encoder; 8902 int i; 8903 u32 val, final; 8904 bool has_lvds = false; 8905 bool has_cpu_edp = false; 8906 bool has_panel = false; 8907 bool has_ck505 = false; 8908 bool can_ssc = false; 8909 bool using_ssc_source = false; 8910 8911 /* We need to take the global config into account */ 8912 for_each_intel_encoder(&dev_priv->drm, encoder) { 8913 switch (encoder->type) { 8914 case INTEL_OUTPUT_LVDS: 8915 has_panel = true; 8916 has_lvds = true; 8917 break; 8918 case INTEL_OUTPUT_EDP: 8919 has_panel = true; 8920 if (encoder->port == PORT_A) 8921 has_cpu_edp = true; 8922 break; 8923 default: 8924 break; 8925 } 8926 } 8927 8928 if (HAS_PCH_IBX(dev_priv)) { 8929 has_ck505 = dev_priv->vbt.display_clock_mode; 8930 can_ssc = has_ck505; 8931 } else { 8932 has_ck505 = false; 8933 can_ssc = true; 8934 } 8935 8936 /* Check if any DPLLs are using the SSC source */ 8937 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 8938 u32 temp = I915_READ(PCH_DPLL(i)); 8939 8940 if (!(temp & DPLL_VCO_ENABLE)) 8941 continue; 8942 8943 if ((temp & PLL_REF_INPUT_MASK) == 8944 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 8945 using_ssc_source = true; 8946 break; 8947 } 8948 } 8949 8950 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 8951 has_panel, has_lvds, has_ck505, using_ssc_source); 8952 8953 /* Ironlake: try to setup display ref clock before DPLL 8954 * enabling. This is only under driver's control after 8955 * PCH B stepping, previous chipset stepping should be 8956 * ignoring this setting. 8957 */ 8958 val = I915_READ(PCH_DREF_CONTROL); 8959 8960 /* As we must carefully and slowly disable/enable each source in turn, 8961 * compute the final state we want first and check if we need to 8962 * make any changes at all. 8963 */ 8964 final = val; 8965 final &= ~DREF_NONSPREAD_SOURCE_MASK; 8966 if (has_ck505) 8967 final |= DREF_NONSPREAD_CK505_ENABLE; 8968 else 8969 final |= DREF_NONSPREAD_SOURCE_ENABLE; 8970 8971 final &= ~DREF_SSC_SOURCE_MASK; 8972 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8973 final &= ~DREF_SSC1_ENABLE; 8974 8975 if (has_panel) { 8976 final |= DREF_SSC_SOURCE_ENABLE; 8977 8978 if (intel_panel_use_ssc(dev_priv) && can_ssc) 8979 final |= DREF_SSC1_ENABLE; 8980 8981 if (has_cpu_edp) { 8982 if (intel_panel_use_ssc(dev_priv) && can_ssc) 8983 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 8984 else 8985 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 8986 } else 8987 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8988 } else if (using_ssc_source) { 8989 final |= DREF_SSC_SOURCE_ENABLE; 8990 final |= DREF_SSC1_ENABLE; 8991 } 8992 8993 if (final == val) 8994 return; 8995 8996 /* Always enable nonspread source */ 8997 val &= ~DREF_NONSPREAD_SOURCE_MASK; 8998 8999 if (has_ck505) 9000 val |= DREF_NONSPREAD_CK505_ENABLE; 9001 else 9002 val |= DREF_NONSPREAD_SOURCE_ENABLE; 9003 9004 if (has_panel) { 9005 val &= ~DREF_SSC_SOURCE_MASK; 9006 val |= DREF_SSC_SOURCE_ENABLE; 9007 9008 /* SSC must be turned on before enabling the CPU output */ 9009 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 9010 DRM_DEBUG_KMS("Using SSC on panel\n"); 9011 val |= DREF_SSC1_ENABLE; 9012 } else 9013 val &= ~DREF_SSC1_ENABLE; 9014 9015 /* Get SSC going before enabling the outputs */ 9016 I915_WRITE(PCH_DREF_CONTROL, val); 9017 POSTING_READ(PCH_DREF_CONTROL); 9018 udelay(200); 9019 9020 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9021 9022 /* Enable CPU source on CPU attached eDP */ 9023 if (has_cpu_edp) { 9024 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 9025 DRM_DEBUG_KMS("Using SSC on eDP\n"); 9026 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 9027 } else 9028 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 9029 } else 9030 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9031 9032 I915_WRITE(PCH_DREF_CONTROL, val); 9033 POSTING_READ(PCH_DREF_CONTROL); 9034 udelay(200); 9035 } else { 9036 DRM_DEBUG_KMS("Disabling CPU source output\n"); 9037 9038 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9039 9040 /* Turn off CPU output */ 9041 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9042 9043 I915_WRITE(PCH_DREF_CONTROL, val); 9044 POSTING_READ(PCH_DREF_CONTROL); 9045 udelay(200); 9046 9047 if (!using_ssc_source) { 9048 DRM_DEBUG_KMS("Disabling SSC source\n"); 9049 9050 /* Turn off the SSC source */ 9051 val &= ~DREF_SSC_SOURCE_MASK; 9052 val |= DREF_SSC_SOURCE_DISABLE; 9053 9054 /* Turn off SSC1 */ 9055 val &= ~DREF_SSC1_ENABLE; 9056 9057 I915_WRITE(PCH_DREF_CONTROL, val); 9058 POSTING_READ(PCH_DREF_CONTROL); 9059 udelay(200); 9060 } 9061 } 9062 9063 BUG_ON(val != final); 9064 } 9065 9066 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 9067 { 9068 u32 tmp; 9069 9070 tmp = I915_READ(SOUTH_CHICKEN2); 9071 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 9072 I915_WRITE(SOUTH_CHICKEN2, tmp); 9073 9074 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) & 9075 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 9076 DRM_ERROR("FDI mPHY reset assert timeout\n"); 9077 9078 tmp = I915_READ(SOUTH_CHICKEN2); 9079 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 9080 I915_WRITE(SOUTH_CHICKEN2, tmp); 9081 9082 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) & 9083 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 9084 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 9085 } 9086 9087 /* WaMPhyProgramming:hsw */ 9088 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 9089 { 9090 u32 tmp; 9091 9092 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 9093 tmp &= ~(0xFF << 24); 9094 tmp |= (0x12 << 24); 9095 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 9096 9097 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 9098 tmp |= (1 << 11); 9099 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 9100 9101 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 9102 tmp |= (1 << 11); 9103 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 9104 9105 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 9106 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 9107 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 9108 9109 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 9110 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 9111 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 9112 9113 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 9114 tmp &= ~(7 << 13); 9115 tmp |= (5 << 13); 9116 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 9117 9118 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 9119 tmp &= ~(7 << 13); 9120 tmp |= (5 << 13); 9121 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 9122 9123 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 9124 tmp &= ~0xFF; 9125 tmp |= 0x1C; 9126 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 9127 9128 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 9129 tmp &= ~0xFF; 9130 tmp |= 0x1C; 9131 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 9132 9133 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 9134 tmp &= ~(0xFF << 16); 9135 tmp |= (0x1C << 16); 9136 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 9137 9138 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 9139 tmp &= ~(0xFF << 16); 9140 tmp |= (0x1C << 16); 9141 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 9142 9143 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 9144 tmp |= (1 << 27); 9145 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 9146 9147 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 9148 tmp |= (1 << 27); 9149 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 9150 9151 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 9152 tmp &= ~(0xF << 28); 9153 tmp |= (4 << 28); 9154 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 9155 9156 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 9157 tmp &= ~(0xF << 28); 9158 tmp |= (4 << 28); 9159 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 9160 } 9161 9162 /* Implements 3 different sequences from BSpec chapter "Display iCLK 9163 * Programming" based on the parameters passed: 9164 * - Sequence to enable CLKOUT_DP 9165 * - Sequence to enable CLKOUT_DP without spread 9166 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 9167 */ 9168 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, 9169 bool with_spread, bool with_fdi) 9170 { 9171 u32 reg, tmp; 9172 9173 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 9174 with_spread = true; 9175 if (WARN(HAS_PCH_LPT_LP(dev_priv) && 9176 with_fdi, "LP PCH doesn't have FDI\n")) 9177 with_fdi = false; 9178 9179 mutex_lock(&dev_priv->sb_lock); 9180 9181 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9182 tmp &= ~SBI_SSCCTL_DISABLE; 9183 tmp |= SBI_SSCCTL_PATHALT; 9184 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9185 9186 udelay(24); 9187 9188 if (with_spread) { 9189 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9190 tmp &= ~SBI_SSCCTL_PATHALT; 9191 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9192 9193 if (with_fdi) { 9194 lpt_reset_fdi_mphy(dev_priv); 9195 lpt_program_fdi_mphy(dev_priv); 9196 } 9197 } 9198 9199 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 9200 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 9201 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 9202 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 9203 9204 mutex_unlock(&dev_priv->sb_lock); 9205 } 9206 9207 /* Sequence to disable CLKOUT_DP */ 9208 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) 9209 { 9210 u32 reg, tmp; 9211 9212 mutex_lock(&dev_priv->sb_lock); 9213 9214 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 9215 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 9216 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 9217 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 9218 9219 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9220 if (!(tmp & SBI_SSCCTL_DISABLE)) { 9221 if (!(tmp & SBI_SSCCTL_PATHALT)) { 9222 tmp |= SBI_SSCCTL_PATHALT; 9223 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9224 udelay(32); 9225 } 9226 tmp |= SBI_SSCCTL_DISABLE; 9227 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9228 } 9229 9230 mutex_unlock(&dev_priv->sb_lock); 9231 } 9232 9233 #define BEND_IDX(steps) ((50 + (steps)) / 5) 9234 9235 static const u16 sscdivintphase[] = { 9236 [BEND_IDX( 50)] = 0x3B23, 9237 [BEND_IDX( 45)] = 0x3B23, 9238 [BEND_IDX( 40)] = 0x3C23, 9239 [BEND_IDX( 35)] = 0x3C23, 9240 [BEND_IDX( 30)] = 0x3D23, 9241 [BEND_IDX( 25)] = 0x3D23, 9242 [BEND_IDX( 20)] = 0x3E23, 9243 [BEND_IDX( 15)] = 0x3E23, 9244 [BEND_IDX( 10)] = 0x3F23, 9245 [BEND_IDX( 5)] = 0x3F23, 9246 [BEND_IDX( 0)] = 0x0025, 9247 [BEND_IDX( -5)] = 0x0025, 9248 [BEND_IDX(-10)] = 0x0125, 9249 [BEND_IDX(-15)] = 0x0125, 9250 [BEND_IDX(-20)] = 0x0225, 9251 [BEND_IDX(-25)] = 0x0225, 9252 [BEND_IDX(-30)] = 0x0325, 9253 [BEND_IDX(-35)] = 0x0325, 9254 [BEND_IDX(-40)] = 0x0425, 9255 [BEND_IDX(-45)] = 0x0425, 9256 [BEND_IDX(-50)] = 0x0525, 9257 }; 9258 9259 /* 9260 * Bend CLKOUT_DP 9261 * steps -50 to 50 inclusive, in steps of 5 9262 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 9263 * change in clock period = -(steps / 10) * 5.787 ps 9264 */ 9265 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 9266 { 9267 u32 tmp; 9268 int idx = BEND_IDX(steps); 9269 9270 if (WARN_ON(steps % 5 != 0)) 9271 return; 9272 9273 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase))) 9274 return; 9275 9276 mutex_lock(&dev_priv->sb_lock); 9277 9278 if (steps % 10 != 0) 9279 tmp = 0xAAAAAAAB; 9280 else 9281 tmp = 0x00000000; 9282 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 9283 9284 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 9285 tmp &= 0xffff0000; 9286 tmp |= sscdivintphase[idx]; 9287 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 9288 9289 mutex_unlock(&dev_priv->sb_lock); 9290 } 9291 9292 #undef BEND_IDX 9293 9294 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) 9295 { 9296 u32 fuse_strap = I915_READ(FUSE_STRAP); 9297 u32 ctl = I915_READ(SPLL_CTL); 9298 9299 if ((ctl & SPLL_PLL_ENABLE) == 0) 9300 return false; 9301 9302 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && 9303 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 9304 return true; 9305 9306 if (IS_BROADWELL(dev_priv) && 9307 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) 9308 return true; 9309 9310 return false; 9311 } 9312 9313 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, 9314 enum intel_dpll_id id) 9315 { 9316 u32 fuse_strap = I915_READ(FUSE_STRAP); 9317 u32 ctl = I915_READ(WRPLL_CTL(id)); 9318 9319 if ((ctl & WRPLL_PLL_ENABLE) == 0) 9320 return false; 9321 9322 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) 9323 return true; 9324 9325 if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && 9326 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && 9327 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 9328 return true; 9329 9330 return false; 9331 } 9332 9333 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) 9334 { 9335 struct intel_encoder *encoder; 9336 bool pch_ssc_in_use = false; 9337 bool has_fdi = false; 9338 9339 for_each_intel_encoder(&dev_priv->drm, encoder) { 9340 switch (encoder->type) { 9341 case INTEL_OUTPUT_ANALOG: 9342 has_fdi = true; 9343 break; 9344 default: 9345 break; 9346 } 9347 } 9348 9349 /* 9350 * The BIOS may have decided to use the PCH SSC 9351 * reference so we must not disable it until the 9352 * relevant PLLs have stopped relying on it. We'll 9353 * just leave the PCH SSC reference enabled in case 9354 * any active PLL is using it. It will get disabled 9355 * after runtime suspend if we don't have FDI. 9356 * 9357 * TODO: Move the whole reference clock handling 9358 * to the modeset sequence proper so that we can 9359 * actually enable/disable/reconfigure these things 9360 * safely. To do that we need to introduce a real 9361 * clock hierarchy. That would also allow us to do 9362 * clock bending finally. 9363 */ 9364 if (spll_uses_pch_ssc(dev_priv)) { 9365 DRM_DEBUG_KMS("SPLL using PCH SSC\n"); 9366 pch_ssc_in_use = true; 9367 } 9368 9369 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { 9370 DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n"); 9371 pch_ssc_in_use = true; 9372 } 9373 9374 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { 9375 DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n"); 9376 pch_ssc_in_use = true; 9377 } 9378 9379 if (pch_ssc_in_use) 9380 return; 9381 9382 if (has_fdi) { 9383 lpt_bend_clkout_dp(dev_priv, 0); 9384 lpt_enable_clkout_dp(dev_priv, true, true); 9385 } else { 9386 lpt_disable_clkout_dp(dev_priv); 9387 } 9388 } 9389 9390 /* 9391 * Initialize reference clocks when the driver loads 9392 */ 9393 void intel_init_pch_refclk(struct drm_i915_private *dev_priv) 9394 { 9395 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 9396 ironlake_init_pch_refclk(dev_priv); 9397 else if (HAS_PCH_LPT(dev_priv)) 9398 lpt_init_pch_refclk(dev_priv); 9399 } 9400 9401 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state) 9402 { 9403 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 9404 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9405 enum pipe pipe = crtc->pipe; 9406 u32 val; 9407 9408 val = 0; 9409 9410 switch (crtc_state->pipe_bpp) { 9411 case 18: 9412 val |= PIPECONF_6BPC; 9413 break; 9414 case 24: 9415 val |= PIPECONF_8BPC; 9416 break; 9417 case 30: 9418 val |= PIPECONF_10BPC; 9419 break; 9420 case 36: 9421 val |= PIPECONF_12BPC; 9422 break; 9423 default: 9424 /* Case prevented by intel_choose_pipe_bpp_dither. */ 9425 BUG(); 9426 } 9427 9428 if (crtc_state->dither) 9429 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 9430 9431 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 9432 val |= PIPECONF_INTERLACED_ILK; 9433 else 9434 val |= PIPECONF_PROGRESSIVE; 9435 9436 /* 9437 * This would end up with an odd purple hue over 9438 * the entire display. Make sure we don't do it. 9439 */ 9440 WARN_ON(crtc_state->limited_color_range && 9441 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); 9442 9443 if (crtc_state->limited_color_range) 9444 val |= PIPECONF_COLOR_RANGE_SELECT; 9445 9446 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 9447 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709; 9448 9449 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 9450 9451 I915_WRITE(PIPECONF(pipe), val); 9452 POSTING_READ(PIPECONF(pipe)); 9453 } 9454 9455 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state) 9456 { 9457 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 9458 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9459 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 9460 u32 val = 0; 9461 9462 if (IS_HASWELL(dev_priv) && crtc_state->dither) 9463 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 9464 9465 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 9466 val |= PIPECONF_INTERLACED_ILK; 9467 else 9468 val |= PIPECONF_PROGRESSIVE; 9469 9470 if (IS_HASWELL(dev_priv) && 9471 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 9472 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW; 9473 9474 I915_WRITE(PIPECONF(cpu_transcoder), val); 9475 POSTING_READ(PIPECONF(cpu_transcoder)); 9476 } 9477 9478 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) 9479 { 9480 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 9481 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9482 u32 val = 0; 9483 9484 switch (crtc_state->pipe_bpp) { 9485 case 18: 9486 val |= PIPEMISC_DITHER_6_BPC; 9487 break; 9488 case 24: 9489 val |= PIPEMISC_DITHER_8_BPC; 9490 break; 9491 case 30: 9492 val |= PIPEMISC_DITHER_10_BPC; 9493 break; 9494 case 36: 9495 val |= PIPEMISC_DITHER_12_BPC; 9496 break; 9497 default: 9498 MISSING_CASE(crtc_state->pipe_bpp); 9499 break; 9500 } 9501 9502 if (crtc_state->dither) 9503 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 9504 9505 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 9506 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 9507 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV; 9508 9509 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 9510 val |= PIPEMISC_YUV420_ENABLE | 9511 PIPEMISC_YUV420_MODE_FULL_BLEND; 9512 9513 if (INTEL_GEN(dev_priv) >= 11 && 9514 (crtc_state->active_planes & ~(icl_hdr_plane_mask() | 9515 BIT(PLANE_CURSOR))) == 0) 9516 val |= PIPEMISC_HDR_MODE_PRECISION; 9517 9518 I915_WRITE(PIPEMISC(crtc->pipe), val); 9519 } 9520 9521 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) 9522 { 9523 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9524 u32 tmp; 9525 9526 tmp = I915_READ(PIPEMISC(crtc->pipe)); 9527 9528 switch (tmp & PIPEMISC_DITHER_BPC_MASK) { 9529 case PIPEMISC_DITHER_6_BPC: 9530 return 18; 9531 case PIPEMISC_DITHER_8_BPC: 9532 return 24; 9533 case PIPEMISC_DITHER_10_BPC: 9534 return 30; 9535 case PIPEMISC_DITHER_12_BPC: 9536 return 36; 9537 default: 9538 MISSING_CASE(tmp); 9539 return 0; 9540 } 9541 } 9542 9543 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 9544 { 9545 /* 9546 * Account for spread spectrum to avoid 9547 * oversubscribing the link. Max center spread 9548 * is 2.5%; use 5% for safety's sake. 9549 */ 9550 u32 bps = target_clock * bpp * 21 / 20; 9551 return DIV_ROUND_UP(bps, link_bw * 8); 9552 } 9553 9554 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) 9555 { 9556 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 9557 } 9558 9559 static void ironlake_compute_dpll(struct intel_crtc *crtc, 9560 struct intel_crtc_state *crtc_state, 9561 struct dpll *reduced_clock) 9562 { 9563 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9564 u32 dpll, fp, fp2; 9565 int factor; 9566 9567 /* Enable autotuning of the PLL clock (if permissible) */ 9568 factor = 21; 9569 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9570 if ((intel_panel_use_ssc(dev_priv) && 9571 dev_priv->vbt.lvds_ssc_freq == 100000) || 9572 (HAS_PCH_IBX(dev_priv) && 9573 intel_is_dual_link_lvds(dev_priv))) 9574 factor = 25; 9575 } else if (crtc_state->sdvo_tv_clock) { 9576 factor = 20; 9577 } 9578 9579 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 9580 9581 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor)) 9582 fp |= FP_CB_TUNE; 9583 9584 if (reduced_clock) { 9585 fp2 = i9xx_dpll_compute_fp(reduced_clock); 9586 9587 if (reduced_clock->m < factor * reduced_clock->n) 9588 fp2 |= FP_CB_TUNE; 9589 } else { 9590 fp2 = fp; 9591 } 9592 9593 dpll = 0; 9594 9595 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 9596 dpll |= DPLLB_MODE_LVDS; 9597 else 9598 dpll |= DPLLB_MODE_DAC_SERIAL; 9599 9600 dpll |= (crtc_state->pixel_multiplier - 1) 9601 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 9602 9603 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 9604 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 9605 dpll |= DPLL_SDVO_HIGH_SPEED; 9606 9607 if (intel_crtc_has_dp_encoder(crtc_state)) 9608 dpll |= DPLL_SDVO_HIGH_SPEED; 9609 9610 /* 9611 * The high speed IO clock is only really required for 9612 * SDVO/HDMI/DP, but we also enable it for CRT to make it 9613 * possible to share the DPLL between CRT and HDMI. Enabling 9614 * the clock needlessly does no real harm, except use up a 9615 * bit of power potentially. 9616 * 9617 * We'll limit this to IVB with 3 pipes, since it has only two 9618 * DPLLs and so DPLL sharing is the only way to get three pipes 9619 * driving PCH ports at the same time. On SNB we could do this, 9620 * and potentially avoid enabling the second DPLL, but it's not 9621 * clear if it''s a win or loss power wise. No point in doing 9622 * this on ILK at all since it has a fixed DPLL<->pipe mapping. 9623 */ 9624 if (INTEL_NUM_PIPES(dev_priv) == 3 && 9625 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) 9626 dpll |= DPLL_SDVO_HIGH_SPEED; 9627 9628 /* compute bitmask from p1 value */ 9629 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 9630 /* also FPA1 */ 9631 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 9632 9633 switch (crtc_state->dpll.p2) { 9634 case 5: 9635 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 9636 break; 9637 case 7: 9638 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 9639 break; 9640 case 10: 9641 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 9642 break; 9643 case 14: 9644 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 9645 break; 9646 } 9647 9648 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 9649 intel_panel_use_ssc(dev_priv)) 9650 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 9651 else 9652 dpll |= PLL_REF_INPUT_DREFCLK; 9653 9654 dpll |= DPLL_VCO_ENABLE; 9655 9656 crtc_state->dpll_hw_state.dpll = dpll; 9657 crtc_state->dpll_hw_state.fp0 = fp; 9658 crtc_state->dpll_hw_state.fp1 = fp2; 9659 } 9660 9661 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, 9662 struct intel_crtc_state *crtc_state) 9663 { 9664 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9665 struct intel_atomic_state *state = 9666 to_intel_atomic_state(crtc_state->base.state); 9667 const struct intel_limit *limit; 9668 int refclk = 120000; 9669 9670 memset(&crtc_state->dpll_hw_state, 0, 9671 sizeof(crtc_state->dpll_hw_state)); 9672 9673 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 9674 if (!crtc_state->has_pch_encoder) 9675 return 0; 9676 9677 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9678 if (intel_panel_use_ssc(dev_priv)) { 9679 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 9680 dev_priv->vbt.lvds_ssc_freq); 9681 refclk = dev_priv->vbt.lvds_ssc_freq; 9682 } 9683 9684 if (intel_is_dual_link_lvds(dev_priv)) { 9685 if (refclk == 100000) 9686 limit = &intel_limits_ironlake_dual_lvds_100m; 9687 else 9688 limit = &intel_limits_ironlake_dual_lvds; 9689 } else { 9690 if (refclk == 100000) 9691 limit = &intel_limits_ironlake_single_lvds_100m; 9692 else 9693 limit = &intel_limits_ironlake_single_lvds; 9694 } 9695 } else { 9696 limit = &intel_limits_ironlake_dac; 9697 } 9698 9699 if (!crtc_state->clock_set && 9700 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9701 refclk, NULL, &crtc_state->dpll)) { 9702 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 9703 return -EINVAL; 9704 } 9705 9706 ironlake_compute_dpll(crtc, crtc_state, NULL); 9707 9708 if (!intel_reserve_shared_dplls(state, crtc, NULL)) { 9709 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", 9710 pipe_name(crtc->pipe)); 9711 return -EINVAL; 9712 } 9713 9714 return 0; 9715 } 9716 9717 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 9718 struct intel_link_m_n *m_n) 9719 { 9720 struct drm_device *dev = crtc->base.dev; 9721 struct drm_i915_private *dev_priv = to_i915(dev); 9722 enum pipe pipe = crtc->pipe; 9723 9724 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 9725 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); 9726 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) 9727 & ~TU_SIZE_MASK; 9728 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); 9729 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) 9730 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9731 } 9732 9733 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 9734 enum transcoder transcoder, 9735 struct intel_link_m_n *m_n, 9736 struct intel_link_m_n *m2_n2) 9737 { 9738 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9739 enum pipe pipe = crtc->pipe; 9740 9741 if (INTEL_GEN(dev_priv) >= 5) { 9742 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); 9743 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); 9744 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 9745 & ~TU_SIZE_MASK; 9746 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 9747 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 9748 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9749 9750 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) { 9751 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 9752 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 9753 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 9754 & ~TU_SIZE_MASK; 9755 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); 9756 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) 9757 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9758 } 9759 } else { 9760 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 9761 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 9762 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) 9763 & ~TU_SIZE_MASK; 9764 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); 9765 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) 9766 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9767 } 9768 } 9769 9770 void intel_dp_get_m_n(struct intel_crtc *crtc, 9771 struct intel_crtc_state *pipe_config) 9772 { 9773 if (pipe_config->has_pch_encoder) 9774 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 9775 else 9776 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 9777 &pipe_config->dp_m_n, 9778 &pipe_config->dp_m2_n2); 9779 } 9780 9781 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 9782 struct intel_crtc_state *pipe_config) 9783 { 9784 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 9785 &pipe_config->fdi_m_n, NULL); 9786 } 9787 9788 static void skylake_get_pfit_config(struct intel_crtc *crtc, 9789 struct intel_crtc_state *pipe_config) 9790 { 9791 struct drm_device *dev = crtc->base.dev; 9792 struct drm_i915_private *dev_priv = to_i915(dev); 9793 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; 9794 u32 ps_ctrl = 0; 9795 int id = -1; 9796 int i; 9797 9798 /* find scaler attached to this pipe */ 9799 for (i = 0; i < crtc->num_scalers; i++) { 9800 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i)); 9801 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { 9802 id = i; 9803 pipe_config->pch_pfit.enabled = true; 9804 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); 9805 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i)); 9806 scaler_state->scalers[i].in_use = true; 9807 break; 9808 } 9809 } 9810 9811 scaler_state->scaler_id = id; 9812 if (id >= 0) { 9813 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 9814 } else { 9815 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 9816 } 9817 } 9818 9819 static void 9820 skylake_get_initial_plane_config(struct intel_crtc *crtc, 9821 struct intel_initial_plane_config *plane_config) 9822 { 9823 struct drm_device *dev = crtc->base.dev; 9824 struct drm_i915_private *dev_priv = to_i915(dev); 9825 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 9826 enum plane_id plane_id = plane->id; 9827 enum pipe pipe; 9828 u32 val, base, offset, stride_mult, tiling, alpha; 9829 int fourcc, pixel_format; 9830 unsigned int aligned_height; 9831 struct drm_framebuffer *fb; 9832 struct intel_framebuffer *intel_fb; 9833 9834 if (!plane->get_hw_state(plane, &pipe)) 9835 return; 9836 9837 WARN_ON(pipe != crtc->pipe); 9838 9839 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9840 if (!intel_fb) { 9841 DRM_DEBUG_KMS("failed to alloc fb\n"); 9842 return; 9843 } 9844 9845 fb = &intel_fb->base; 9846 9847 fb->dev = dev; 9848 9849 val = I915_READ(PLANE_CTL(pipe, plane_id)); 9850 9851 if (INTEL_GEN(dev_priv) >= 11) 9852 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK; 9853 else 9854 pixel_format = val & PLANE_CTL_FORMAT_MASK; 9855 9856 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { 9857 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id)); 9858 alpha &= PLANE_COLOR_ALPHA_MASK; 9859 } else { 9860 alpha = val & PLANE_CTL_ALPHA_MASK; 9861 } 9862 9863 fourcc = skl_format_to_fourcc(pixel_format, 9864 val & PLANE_CTL_ORDER_RGBX, alpha); 9865 fb->format = drm_format_info(fourcc); 9866 9867 tiling = val & PLANE_CTL_TILED_MASK; 9868 switch (tiling) { 9869 case PLANE_CTL_TILED_LINEAR: 9870 fb->modifier = DRM_FORMAT_MOD_LINEAR; 9871 break; 9872 case PLANE_CTL_TILED_X: 9873 plane_config->tiling = I915_TILING_X; 9874 fb->modifier = I915_FORMAT_MOD_X_TILED; 9875 break; 9876 case PLANE_CTL_TILED_Y: 9877 plane_config->tiling = I915_TILING_Y; 9878 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) 9879 fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS; 9880 else 9881 fb->modifier = I915_FORMAT_MOD_Y_TILED; 9882 break; 9883 case PLANE_CTL_TILED_YF: 9884 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) 9885 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS; 9886 else 9887 fb->modifier = I915_FORMAT_MOD_Yf_TILED; 9888 break; 9889 default: 9890 MISSING_CASE(tiling); 9891 goto error; 9892 } 9893 9894 /* 9895 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 9896 * while i915 HW rotation is clockwise, thats why this swapping. 9897 */ 9898 switch (val & PLANE_CTL_ROTATE_MASK) { 9899 case PLANE_CTL_ROTATE_0: 9900 plane_config->rotation = DRM_MODE_ROTATE_0; 9901 break; 9902 case PLANE_CTL_ROTATE_90: 9903 plane_config->rotation = DRM_MODE_ROTATE_270; 9904 break; 9905 case PLANE_CTL_ROTATE_180: 9906 plane_config->rotation = DRM_MODE_ROTATE_180; 9907 break; 9908 case PLANE_CTL_ROTATE_270: 9909 plane_config->rotation = DRM_MODE_ROTATE_90; 9910 break; 9911 } 9912 9913 if (INTEL_GEN(dev_priv) >= 10 && 9914 val & PLANE_CTL_FLIP_HORIZONTAL) 9915 plane_config->rotation |= DRM_MODE_REFLECT_X; 9916 9917 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000; 9918 plane_config->base = base; 9919 9920 offset = I915_READ(PLANE_OFFSET(pipe, plane_id)); 9921 9922 val = I915_READ(PLANE_SIZE(pipe, plane_id)); 9923 fb->height = ((val >> 16) & 0xffff) + 1; 9924 fb->width = ((val >> 0) & 0xffff) + 1; 9925 9926 val = I915_READ(PLANE_STRIDE(pipe, plane_id)); 9927 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); 9928 fb->pitches[0] = (val & 0x3ff) * stride_mult; 9929 9930 aligned_height = intel_fb_align_height(fb, 0, fb->height); 9931 9932 plane_config->size = fb->pitches[0] * aligned_height; 9933 9934 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 9935 crtc->base.name, plane->base.name, fb->width, fb->height, 9936 fb->format->cpp[0] * 8, base, fb->pitches[0], 9937 plane_config->size); 9938 9939 plane_config->fb = intel_fb; 9940 return; 9941 9942 error: 9943 kfree(intel_fb); 9944 } 9945 9946 static void ironlake_get_pfit_config(struct intel_crtc *crtc, 9947 struct intel_crtc_state *pipe_config) 9948 { 9949 struct drm_device *dev = crtc->base.dev; 9950 struct drm_i915_private *dev_priv = to_i915(dev); 9951 u32 tmp; 9952 9953 tmp = I915_READ(PF_CTL(crtc->pipe)); 9954 9955 if (tmp & PF_ENABLE) { 9956 pipe_config->pch_pfit.enabled = true; 9957 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 9958 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 9959 9960 /* We currently do not free assignements of panel fitters on 9961 * ivb/hsw (since we don't use the higher upscaling modes which 9962 * differentiates them) so just WARN about this case for now. */ 9963 if (IS_GEN(dev_priv, 7)) { 9964 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 9965 PF_PIPE_SEL_IVB(crtc->pipe)); 9966 } 9967 } 9968 } 9969 9970 static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 9971 struct intel_crtc_state *pipe_config) 9972 { 9973 struct drm_device *dev = crtc->base.dev; 9974 struct drm_i915_private *dev_priv = to_i915(dev); 9975 enum intel_display_power_domain power_domain; 9976 intel_wakeref_t wakeref; 9977 u32 tmp; 9978 bool ret; 9979 9980 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9981 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 9982 if (!wakeref) 9983 return false; 9984 9985 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9986 pipe_config->shared_dpll = NULL; 9987 9988 ret = false; 9989 tmp = I915_READ(PIPECONF(crtc->pipe)); 9990 if (!(tmp & PIPECONF_ENABLE)) 9991 goto out; 9992 9993 switch (tmp & PIPECONF_BPC_MASK) { 9994 case PIPECONF_6BPC: 9995 pipe_config->pipe_bpp = 18; 9996 break; 9997 case PIPECONF_8BPC: 9998 pipe_config->pipe_bpp = 24; 9999 break; 10000 case PIPECONF_10BPC: 10001 pipe_config->pipe_bpp = 30; 10002 break; 10003 case PIPECONF_12BPC: 10004 pipe_config->pipe_bpp = 36; 10005 break; 10006 default: 10007 break; 10008 } 10009 10010 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 10011 pipe_config->limited_color_range = true; 10012 10013 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) { 10014 case PIPECONF_OUTPUT_COLORSPACE_YUV601: 10015 case PIPECONF_OUTPUT_COLORSPACE_YUV709: 10016 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 10017 break; 10018 default: 10019 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 10020 break; 10021 } 10022 10023 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >> 10024 PIPECONF_GAMMA_MODE_SHIFT; 10025 10026 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe)); 10027 10028 i9xx_get_pipe_color_config(pipe_config); 10029 intel_color_get_config(pipe_config); 10030 10031 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 10032 struct intel_shared_dpll *pll; 10033 enum intel_dpll_id pll_id; 10034 10035 pipe_config->has_pch_encoder = true; 10036 10037 tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); 10038 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 10039 FDI_DP_PORT_WIDTH_SHIFT) + 1; 10040 10041 ironlake_get_fdi_m_n_config(crtc, pipe_config); 10042 10043 if (HAS_PCH_IBX(dev_priv)) { 10044 /* 10045 * The pipe->pch transcoder and pch transcoder->pll 10046 * mapping is fixed. 10047 */ 10048 pll_id = (enum intel_dpll_id) crtc->pipe; 10049 } else { 10050 tmp = I915_READ(PCH_DPLL_SEL); 10051 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 10052 pll_id = DPLL_ID_PCH_PLL_B; 10053 else 10054 pll_id= DPLL_ID_PCH_PLL_A; 10055 } 10056 10057 pipe_config->shared_dpll = 10058 intel_get_shared_dpll_by_id(dev_priv, pll_id); 10059 pll = pipe_config->shared_dpll; 10060 10061 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll, 10062 &pipe_config->dpll_hw_state)); 10063 10064 tmp = pipe_config->dpll_hw_state.dpll; 10065 pipe_config->pixel_multiplier = 10066 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 10067 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 10068 10069 ironlake_pch_clock_get(crtc, pipe_config); 10070 } else { 10071 pipe_config->pixel_multiplier = 1; 10072 } 10073 10074 intel_get_pipe_timings(crtc, pipe_config); 10075 intel_get_pipe_src_size(crtc, pipe_config); 10076 10077 ironlake_get_pfit_config(crtc, pipe_config); 10078 10079 ret = true; 10080 10081 out: 10082 intel_display_power_put(dev_priv, power_domain, wakeref); 10083 10084 return ret; 10085 } 10086 static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 10087 struct intel_crtc_state *crtc_state) 10088 { 10089 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10090 struct intel_atomic_state *state = 10091 to_intel_atomic_state(crtc_state->base.state); 10092 10093 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) || 10094 INTEL_GEN(dev_priv) >= 11) { 10095 struct intel_encoder *encoder = 10096 intel_get_crtc_new_encoder(state, crtc_state); 10097 10098 if (!intel_reserve_shared_dplls(state, crtc, encoder)) { 10099 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", 10100 pipe_name(crtc->pipe)); 10101 return -EINVAL; 10102 } 10103 } 10104 10105 return 0; 10106 } 10107 10108 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv, 10109 enum port port, 10110 struct intel_crtc_state *pipe_config) 10111 { 10112 enum intel_dpll_id id; 10113 u32 temp; 10114 10115 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); 10116 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); 10117 10118 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2)) 10119 return; 10120 10121 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10122 } 10123 10124 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv, 10125 enum port port, 10126 struct intel_crtc_state *pipe_config) 10127 { 10128 enum phy phy = intel_port_to_phy(dev_priv, port); 10129 enum icl_port_dpll_id port_dpll_id; 10130 enum intel_dpll_id id; 10131 u32 temp; 10132 10133 if (intel_phy_is_combo(dev_priv, phy)) { 10134 temp = I915_READ(ICL_DPCLKA_CFGCR0) & 10135 ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); 10136 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); 10137 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 10138 } else if (intel_phy_is_tc(dev_priv, phy)) { 10139 u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; 10140 10141 if (clk_sel == DDI_CLK_SEL_MG) { 10142 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, 10143 port)); 10144 port_dpll_id = ICL_PORT_DPLL_MG_PHY; 10145 } else { 10146 WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162); 10147 id = DPLL_ID_ICL_TBTPLL; 10148 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 10149 } 10150 } else { 10151 WARN(1, "Invalid port %x\n", port); 10152 return; 10153 } 10154 10155 pipe_config->icl_port_dplls[port_dpll_id].pll = 10156 intel_get_shared_dpll_by_id(dev_priv, id); 10157 10158 icl_set_active_port_dpll(pipe_config, port_dpll_id); 10159 } 10160 10161 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 10162 enum port port, 10163 struct intel_crtc_state *pipe_config) 10164 { 10165 enum intel_dpll_id id; 10166 10167 switch (port) { 10168 case PORT_A: 10169 id = DPLL_ID_SKL_DPLL0; 10170 break; 10171 case PORT_B: 10172 id = DPLL_ID_SKL_DPLL1; 10173 break; 10174 case PORT_C: 10175 id = DPLL_ID_SKL_DPLL2; 10176 break; 10177 default: 10178 DRM_ERROR("Incorrect port type\n"); 10179 return; 10180 } 10181 10182 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10183 } 10184 10185 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, 10186 enum port port, 10187 struct intel_crtc_state *pipe_config) 10188 { 10189 enum intel_dpll_id id; 10190 u32 temp; 10191 10192 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 10193 id = temp >> (port * 3 + 1); 10194 10195 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3)) 10196 return; 10197 10198 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10199 } 10200 10201 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, 10202 enum port port, 10203 struct intel_crtc_state *pipe_config) 10204 { 10205 enum intel_dpll_id id; 10206 u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 10207 10208 switch (ddi_pll_sel) { 10209 case PORT_CLK_SEL_WRPLL1: 10210 id = DPLL_ID_WRPLL1; 10211 break; 10212 case PORT_CLK_SEL_WRPLL2: 10213 id = DPLL_ID_WRPLL2; 10214 break; 10215 case PORT_CLK_SEL_SPLL: 10216 id = DPLL_ID_SPLL; 10217 break; 10218 case PORT_CLK_SEL_LCPLL_810: 10219 id = DPLL_ID_LCPLL_810; 10220 break; 10221 case PORT_CLK_SEL_LCPLL_1350: 10222 id = DPLL_ID_LCPLL_1350; 10223 break; 10224 case PORT_CLK_SEL_LCPLL_2700: 10225 id = DPLL_ID_LCPLL_2700; 10226 break; 10227 default: 10228 MISSING_CASE(ddi_pll_sel); 10229 /* fall through */ 10230 case PORT_CLK_SEL_NONE: 10231 return; 10232 } 10233 10234 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10235 } 10236 10237 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 10238 struct intel_crtc_state *pipe_config, 10239 u64 *power_domain_mask, 10240 intel_wakeref_t *wakerefs) 10241 { 10242 struct drm_device *dev = crtc->base.dev; 10243 struct drm_i915_private *dev_priv = to_i915(dev); 10244 enum intel_display_power_domain power_domain; 10245 unsigned long panel_transcoder_mask = 0; 10246 unsigned long enabled_panel_transcoders = 0; 10247 enum transcoder panel_transcoder; 10248 intel_wakeref_t wf; 10249 u32 tmp; 10250 10251 if (INTEL_GEN(dev_priv) >= 11) 10252 panel_transcoder_mask |= 10253 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 10254 10255 if (HAS_TRANSCODER_EDP(dev_priv)) 10256 panel_transcoder_mask |= BIT(TRANSCODER_EDP); 10257 10258 /* 10259 * The pipe->transcoder mapping is fixed with the exception of the eDP 10260 * and DSI transcoders handled below. 10261 */ 10262 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 10263 10264 /* 10265 * XXX: Do intel_display_power_get_if_enabled before reading this (for 10266 * consistency and less surprising code; it's in always on power). 10267 */ 10268 for_each_set_bit(panel_transcoder, 10269 &panel_transcoder_mask, 10270 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) { 10271 bool force_thru = false; 10272 enum pipe trans_pipe; 10273 10274 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder)); 10275 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 10276 continue; 10277 10278 /* 10279 * Log all enabled ones, only use the first one. 10280 * 10281 * FIXME: This won't work for two separate DSI displays. 10282 */ 10283 enabled_panel_transcoders |= BIT(panel_transcoder); 10284 if (enabled_panel_transcoders != BIT(panel_transcoder)) 10285 continue; 10286 10287 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 10288 default: 10289 WARN(1, "unknown pipe linked to transcoder %s\n", 10290 transcoder_name(panel_transcoder)); 10291 /* fall through */ 10292 case TRANS_DDI_EDP_INPUT_A_ONOFF: 10293 force_thru = true; 10294 /* fall through */ 10295 case TRANS_DDI_EDP_INPUT_A_ON: 10296 trans_pipe = PIPE_A; 10297 break; 10298 case TRANS_DDI_EDP_INPUT_B_ONOFF: 10299 trans_pipe = PIPE_B; 10300 break; 10301 case TRANS_DDI_EDP_INPUT_C_ONOFF: 10302 trans_pipe = PIPE_C; 10303 break; 10304 } 10305 10306 if (trans_pipe == crtc->pipe) { 10307 pipe_config->cpu_transcoder = panel_transcoder; 10308 pipe_config->pch_pfit.force_thru = force_thru; 10309 } 10310 } 10311 10312 /* 10313 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1 10314 */ 10315 WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) && 10316 enabled_panel_transcoders != BIT(TRANSCODER_EDP)); 10317 10318 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); 10319 WARN_ON(*power_domain_mask & BIT_ULL(power_domain)); 10320 10321 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10322 if (!wf) 10323 return false; 10324 10325 wakerefs[power_domain] = wf; 10326 *power_domain_mask |= BIT_ULL(power_domain); 10327 10328 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 10329 10330 return tmp & PIPECONF_ENABLE; 10331 } 10332 10333 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 10334 struct intel_crtc_state *pipe_config, 10335 u64 *power_domain_mask, 10336 intel_wakeref_t *wakerefs) 10337 { 10338 struct drm_device *dev = crtc->base.dev; 10339 struct drm_i915_private *dev_priv = to_i915(dev); 10340 enum intel_display_power_domain power_domain; 10341 enum transcoder cpu_transcoder; 10342 intel_wakeref_t wf; 10343 enum port port; 10344 u32 tmp; 10345 10346 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 10347 if (port == PORT_A) 10348 cpu_transcoder = TRANSCODER_DSI_A; 10349 else 10350 cpu_transcoder = TRANSCODER_DSI_C; 10351 10352 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 10353 WARN_ON(*power_domain_mask & BIT_ULL(power_domain)); 10354 10355 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10356 if (!wf) 10357 continue; 10358 10359 wakerefs[power_domain] = wf; 10360 *power_domain_mask |= BIT_ULL(power_domain); 10361 10362 /* 10363 * The PLL needs to be enabled with a valid divider 10364 * configuration, otherwise accessing DSI registers will hang 10365 * the machine. See BSpec North Display Engine 10366 * registers/MIPI[BXT]. We can break out here early, since we 10367 * need the same DSI PLL to be enabled for both DSI ports. 10368 */ 10369 if (!bxt_dsi_pll_is_enabled(dev_priv)) 10370 break; 10371 10372 /* XXX: this works for video mode only */ 10373 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port)); 10374 if (!(tmp & DPI_ENABLE)) 10375 continue; 10376 10377 tmp = I915_READ(MIPI_CTRL(port)); 10378 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 10379 continue; 10380 10381 pipe_config->cpu_transcoder = cpu_transcoder; 10382 break; 10383 } 10384 10385 return transcoder_is_dsi(pipe_config->cpu_transcoder); 10386 } 10387 10388 static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 10389 struct intel_crtc_state *pipe_config) 10390 { 10391 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10392 struct intel_shared_dpll *pll; 10393 enum port port; 10394 u32 tmp; 10395 10396 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 10397 10398 if (INTEL_GEN(dev_priv) >= 12) 10399 port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); 10400 else 10401 port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); 10402 10403 if (INTEL_GEN(dev_priv) >= 11) 10404 icelake_get_ddi_pll(dev_priv, port, pipe_config); 10405 else if (IS_CANNONLAKE(dev_priv)) 10406 cannonlake_get_ddi_pll(dev_priv, port, pipe_config); 10407 else if (IS_GEN9_BC(dev_priv)) 10408 skylake_get_ddi_pll(dev_priv, port, pipe_config); 10409 else if (IS_GEN9_LP(dev_priv)) 10410 bxt_get_ddi_pll(dev_priv, port, pipe_config); 10411 else 10412 haswell_get_ddi_pll(dev_priv, port, pipe_config); 10413 10414 pll = pipe_config->shared_dpll; 10415 if (pll) { 10416 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll, 10417 &pipe_config->dpll_hw_state)); 10418 } 10419 10420 /* 10421 * Haswell has only FDI/PCH transcoder A. It is which is connected to 10422 * DDI E. So just check whether this pipe is wired to DDI E and whether 10423 * the PCH transcoder is on. 10424 */ 10425 if (INTEL_GEN(dev_priv) < 9 && 10426 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 10427 pipe_config->has_pch_encoder = true; 10428 10429 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 10430 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 10431 FDI_DP_PORT_WIDTH_SHIFT) + 1; 10432 10433 ironlake_get_fdi_m_n_config(crtc, pipe_config); 10434 } 10435 } 10436 10437 static bool haswell_get_pipe_config(struct intel_crtc *crtc, 10438 struct intel_crtc_state *pipe_config) 10439 { 10440 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10441 intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf; 10442 enum intel_display_power_domain power_domain; 10443 u64 power_domain_mask; 10444 bool active; 10445 10446 intel_crtc_init_scalers(crtc, pipe_config); 10447 10448 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 10449 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10450 if (!wf) 10451 return false; 10452 10453 wakerefs[power_domain] = wf; 10454 power_domain_mask = BIT_ULL(power_domain); 10455 10456 pipe_config->shared_dpll = NULL; 10457 10458 active = hsw_get_transcoder_state(crtc, pipe_config, 10459 &power_domain_mask, wakerefs); 10460 10461 if (IS_GEN9_LP(dev_priv) && 10462 bxt_get_dsi_transcoder_state(crtc, pipe_config, 10463 &power_domain_mask, wakerefs)) { 10464 WARN_ON(active); 10465 active = true; 10466 } 10467 10468 if (!active) 10469 goto out; 10470 10471 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 10472 INTEL_GEN(dev_priv) >= 11) { 10473 haswell_get_ddi_port_state(crtc, pipe_config); 10474 intel_get_pipe_timings(crtc, pipe_config); 10475 } 10476 10477 intel_get_pipe_src_size(crtc, pipe_config); 10478 10479 if (IS_HASWELL(dev_priv)) { 10480 u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 10481 10482 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW) 10483 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 10484 else 10485 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 10486 } else { 10487 pipe_config->output_format = 10488 bdw_get_pipemisc_output_format(crtc); 10489 10490 /* 10491 * Currently there is no interface defined to 10492 * check user preference between RGB/YCBCR444 10493 * or YCBCR420. So the only possible case for 10494 * YCBCR444 usage is driving YCBCR420 output 10495 * with LSPCON, when pipe is configured for 10496 * YCBCR444 output and LSPCON takes care of 10497 * downsampling it. 10498 */ 10499 pipe_config->lspcon_downsampling = 10500 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444; 10501 } 10502 10503 pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe)); 10504 10505 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe)); 10506 10507 if (INTEL_GEN(dev_priv) >= 9) { 10508 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe)); 10509 10510 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) 10511 pipe_config->gamma_enable = true; 10512 10513 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE) 10514 pipe_config->csc_enable = true; 10515 } else { 10516 i9xx_get_pipe_color_config(pipe_config); 10517 } 10518 10519 intel_color_get_config(pipe_config); 10520 10521 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 10522 WARN_ON(power_domain_mask & BIT_ULL(power_domain)); 10523 10524 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10525 if (wf) { 10526 wakerefs[power_domain] = wf; 10527 power_domain_mask |= BIT_ULL(power_domain); 10528 10529 if (INTEL_GEN(dev_priv) >= 9) 10530 skylake_get_pfit_config(crtc, pipe_config); 10531 else 10532 ironlake_get_pfit_config(crtc, pipe_config); 10533 } 10534 10535 if (hsw_crtc_supports_ips(crtc)) { 10536 if (IS_HASWELL(dev_priv)) 10537 pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE; 10538 else { 10539 /* 10540 * We cannot readout IPS state on broadwell, set to 10541 * true so we can set it to a defined state on first 10542 * commit. 10543 */ 10544 pipe_config->ips_enabled = true; 10545 } 10546 } 10547 10548 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 10549 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 10550 pipe_config->pixel_multiplier = 10551 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 10552 } else { 10553 pipe_config->pixel_multiplier = 1; 10554 } 10555 10556 out: 10557 for_each_power_domain(power_domain, power_domain_mask) 10558 intel_display_power_put(dev_priv, 10559 power_domain, wakerefs[power_domain]); 10560 10561 return active; 10562 } 10563 10564 static u32 intel_cursor_base(const struct intel_plane_state *plane_state) 10565 { 10566 struct drm_i915_private *dev_priv = 10567 to_i915(plane_state->base.plane->dev); 10568 const struct drm_framebuffer *fb = plane_state->base.fb; 10569 const struct drm_i915_gem_object *obj = intel_fb_obj(fb); 10570 u32 base; 10571 10572 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) 10573 base = obj->phys_handle->busaddr; 10574 else 10575 base = intel_plane_ggtt_offset(plane_state); 10576 10577 base += plane_state->color_plane[0].offset; 10578 10579 /* ILK+ do this automagically */ 10580 if (HAS_GMCH(dev_priv) && 10581 plane_state->base.rotation & DRM_MODE_ROTATE_180) 10582 base += (plane_state->base.crtc_h * 10583 plane_state->base.crtc_w - 1) * fb->format->cpp[0]; 10584 10585 return base; 10586 } 10587 10588 static u32 intel_cursor_position(const struct intel_plane_state *plane_state) 10589 { 10590 int x = plane_state->base.crtc_x; 10591 int y = plane_state->base.crtc_y; 10592 u32 pos = 0; 10593 10594 if (x < 0) { 10595 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 10596 x = -x; 10597 } 10598 pos |= x << CURSOR_X_SHIFT; 10599 10600 if (y < 0) { 10601 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 10602 y = -y; 10603 } 10604 pos |= y << CURSOR_Y_SHIFT; 10605 10606 return pos; 10607 } 10608 10609 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) 10610 { 10611 const struct drm_mode_config *config = 10612 &plane_state->base.plane->dev->mode_config; 10613 int width = plane_state->base.crtc_w; 10614 int height = plane_state->base.crtc_h; 10615 10616 return width > 0 && width <= config->cursor_width && 10617 height > 0 && height <= config->cursor_height; 10618 } 10619 10620 static int intel_cursor_check_surface(struct intel_plane_state *plane_state) 10621 { 10622 int src_x, src_y; 10623 u32 offset; 10624 int ret; 10625 10626 ret = intel_plane_compute_gtt(plane_state); 10627 if (ret) 10628 return ret; 10629 10630 if (!plane_state->base.visible) 10631 return 0; 10632 10633 src_x = plane_state->base.src_x >> 16; 10634 src_y = plane_state->base.src_y >> 16; 10635 10636 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 10637 offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 10638 plane_state, 0); 10639 10640 if (src_x != 0 || src_y != 0) { 10641 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n"); 10642 return -EINVAL; 10643 } 10644 10645 plane_state->color_plane[0].offset = offset; 10646 10647 return 0; 10648 } 10649 10650 static int intel_check_cursor(struct intel_crtc_state *crtc_state, 10651 struct intel_plane_state *plane_state) 10652 { 10653 const struct drm_framebuffer *fb = plane_state->base.fb; 10654 int ret; 10655 10656 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) { 10657 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 10658 return -EINVAL; 10659 } 10660 10661 ret = drm_atomic_helper_check_plane_state(&plane_state->base, 10662 &crtc_state->base, 10663 DRM_PLANE_HELPER_NO_SCALING, 10664 DRM_PLANE_HELPER_NO_SCALING, 10665 true, true); 10666 if (ret) 10667 return ret; 10668 10669 ret = intel_cursor_check_surface(plane_state); 10670 if (ret) 10671 return ret; 10672 10673 if (!plane_state->base.visible) 10674 return 0; 10675 10676 ret = intel_plane_check_src_coordinates(plane_state); 10677 if (ret) 10678 return ret; 10679 10680 return 0; 10681 } 10682 10683 static unsigned int 10684 i845_cursor_max_stride(struct intel_plane *plane, 10685 u32 pixel_format, u64 modifier, 10686 unsigned int rotation) 10687 { 10688 return 2048; 10689 } 10690 10691 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 10692 { 10693 u32 cntl = 0; 10694 10695 if (crtc_state->gamma_enable) 10696 cntl |= CURSOR_GAMMA_ENABLE; 10697 10698 return cntl; 10699 } 10700 10701 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, 10702 const struct intel_plane_state *plane_state) 10703 { 10704 return CURSOR_ENABLE | 10705 CURSOR_FORMAT_ARGB | 10706 CURSOR_STRIDE(plane_state->color_plane[0].stride); 10707 } 10708 10709 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) 10710 { 10711 int width = plane_state->base.crtc_w; 10712 10713 /* 10714 * 845g/865g are only limited by the width of their cursors, 10715 * the height is arbitrary up to the precision of the register. 10716 */ 10717 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64); 10718 } 10719 10720 static int i845_check_cursor(struct intel_crtc_state *crtc_state, 10721 struct intel_plane_state *plane_state) 10722 { 10723 const struct drm_framebuffer *fb = plane_state->base.fb; 10724 int ret; 10725 10726 ret = intel_check_cursor(crtc_state, plane_state); 10727 if (ret) 10728 return ret; 10729 10730 /* if we want to turn off the cursor ignore width and height */ 10731 if (!fb) 10732 return 0; 10733 10734 /* Check for which cursor types we support */ 10735 if (!i845_cursor_size_ok(plane_state)) { 10736 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 10737 plane_state->base.crtc_w, 10738 plane_state->base.crtc_h); 10739 return -EINVAL; 10740 } 10741 10742 WARN_ON(plane_state->base.visible && 10743 plane_state->color_plane[0].stride != fb->pitches[0]); 10744 10745 switch (fb->pitches[0]) { 10746 case 256: 10747 case 512: 10748 case 1024: 10749 case 2048: 10750 break; 10751 default: 10752 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n", 10753 fb->pitches[0]); 10754 return -EINVAL; 10755 } 10756 10757 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state); 10758 10759 return 0; 10760 } 10761 10762 static void i845_update_cursor(struct intel_plane *plane, 10763 const struct intel_crtc_state *crtc_state, 10764 const struct intel_plane_state *plane_state) 10765 { 10766 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 10767 u32 cntl = 0, base = 0, pos = 0, size = 0; 10768 unsigned long irqflags; 10769 10770 if (plane_state && plane_state->base.visible) { 10771 unsigned int width = plane_state->base.crtc_w; 10772 unsigned int height = plane_state->base.crtc_h; 10773 10774 cntl = plane_state->ctl | 10775 i845_cursor_ctl_crtc(crtc_state); 10776 10777 size = (height << 12) | width; 10778 10779 base = intel_cursor_base(plane_state); 10780 pos = intel_cursor_position(plane_state); 10781 } 10782 10783 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 10784 10785 /* On these chipsets we can only modify the base/size/stride 10786 * whilst the cursor is disabled. 10787 */ 10788 if (plane->cursor.base != base || 10789 plane->cursor.size != size || 10790 plane->cursor.cntl != cntl) { 10791 I915_WRITE_FW(CURCNTR(PIPE_A), 0); 10792 I915_WRITE_FW(CURBASE(PIPE_A), base); 10793 I915_WRITE_FW(CURSIZE, size); 10794 I915_WRITE_FW(CURPOS(PIPE_A), pos); 10795 I915_WRITE_FW(CURCNTR(PIPE_A), cntl); 10796 10797 plane->cursor.base = base; 10798 plane->cursor.size = size; 10799 plane->cursor.cntl = cntl; 10800 } else { 10801 I915_WRITE_FW(CURPOS(PIPE_A), pos); 10802 } 10803 10804 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 10805 } 10806 10807 static void i845_disable_cursor(struct intel_plane *plane, 10808 const struct intel_crtc_state *crtc_state) 10809 { 10810 i845_update_cursor(plane, crtc_state, NULL); 10811 } 10812 10813 static bool i845_cursor_get_hw_state(struct intel_plane *plane, 10814 enum pipe *pipe) 10815 { 10816 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 10817 enum intel_display_power_domain power_domain; 10818 intel_wakeref_t wakeref; 10819 bool ret; 10820 10821 power_domain = POWER_DOMAIN_PIPE(PIPE_A); 10822 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 10823 if (!wakeref) 10824 return false; 10825 10826 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 10827 10828 *pipe = PIPE_A; 10829 10830 intel_display_power_put(dev_priv, power_domain, wakeref); 10831 10832 return ret; 10833 } 10834 10835 static unsigned int 10836 i9xx_cursor_max_stride(struct intel_plane *plane, 10837 u32 pixel_format, u64 modifier, 10838 unsigned int rotation) 10839 { 10840 return plane->base.dev->mode_config.cursor_width * 4; 10841 } 10842 10843 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 10844 { 10845 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 10846 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10847 u32 cntl = 0; 10848 10849 if (INTEL_GEN(dev_priv) >= 11) 10850 return cntl; 10851 10852 if (crtc_state->gamma_enable) 10853 cntl = MCURSOR_GAMMA_ENABLE; 10854 10855 if (crtc_state->csc_enable) 10856 cntl |= MCURSOR_PIPE_CSC_ENABLE; 10857 10858 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 10859 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); 10860 10861 return cntl; 10862 } 10863 10864 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, 10865 const struct intel_plane_state *plane_state) 10866 { 10867 struct drm_i915_private *dev_priv = 10868 to_i915(plane_state->base.plane->dev); 10869 u32 cntl = 0; 10870 10871 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 10872 cntl |= MCURSOR_TRICKLE_FEED_DISABLE; 10873 10874 switch (plane_state->base.crtc_w) { 10875 case 64: 10876 cntl |= MCURSOR_MODE_64_ARGB_AX; 10877 break; 10878 case 128: 10879 cntl |= MCURSOR_MODE_128_ARGB_AX; 10880 break; 10881 case 256: 10882 cntl |= MCURSOR_MODE_256_ARGB_AX; 10883 break; 10884 default: 10885 MISSING_CASE(plane_state->base.crtc_w); 10886 return 0; 10887 } 10888 10889 if (plane_state->base.rotation & DRM_MODE_ROTATE_180) 10890 cntl |= MCURSOR_ROTATE_180; 10891 10892 return cntl; 10893 } 10894 10895 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) 10896 { 10897 struct drm_i915_private *dev_priv = 10898 to_i915(plane_state->base.plane->dev); 10899 int width = plane_state->base.crtc_w; 10900 int height = plane_state->base.crtc_h; 10901 10902 if (!intel_cursor_size_ok(plane_state)) 10903 return false; 10904 10905 /* Cursor width is limited to a few power-of-two sizes */ 10906 switch (width) { 10907 case 256: 10908 case 128: 10909 case 64: 10910 break; 10911 default: 10912 return false; 10913 } 10914 10915 /* 10916 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor 10917 * height from 8 lines up to the cursor width, when the 10918 * cursor is not rotated. Everything else requires square 10919 * cursors. 10920 */ 10921 if (HAS_CUR_FBC(dev_priv) && 10922 plane_state->base.rotation & DRM_MODE_ROTATE_0) { 10923 if (height < 8 || height > width) 10924 return false; 10925 } else { 10926 if (height != width) 10927 return false; 10928 } 10929 10930 return true; 10931 } 10932 10933 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, 10934 struct intel_plane_state *plane_state) 10935 { 10936 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 10937 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 10938 const struct drm_framebuffer *fb = plane_state->base.fb; 10939 enum pipe pipe = plane->pipe; 10940 int ret; 10941 10942 ret = intel_check_cursor(crtc_state, plane_state); 10943 if (ret) 10944 return ret; 10945 10946 /* if we want to turn off the cursor ignore width and height */ 10947 if (!fb) 10948 return 0; 10949 10950 /* Check for which cursor types we support */ 10951 if (!i9xx_cursor_size_ok(plane_state)) { 10952 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 10953 plane_state->base.crtc_w, 10954 plane_state->base.crtc_h); 10955 return -EINVAL; 10956 } 10957 10958 WARN_ON(plane_state->base.visible && 10959 plane_state->color_plane[0].stride != fb->pitches[0]); 10960 10961 if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) { 10962 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n", 10963 fb->pitches[0], plane_state->base.crtc_w); 10964 return -EINVAL; 10965 } 10966 10967 /* 10968 * There's something wrong with the cursor on CHV pipe C. 10969 * If it straddles the left edge of the screen then 10970 * moving it away from the edge or disabling it often 10971 * results in a pipe underrun, and often that can lead to 10972 * dead pipe (constant underrun reported, and it scans 10973 * out just a solid color). To recover from that, the 10974 * display power well must be turned off and on again. 10975 * Refuse the put the cursor into that compromised position. 10976 */ 10977 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && 10978 plane_state->base.visible && plane_state->base.crtc_x < 0) { 10979 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); 10980 return -EINVAL; 10981 } 10982 10983 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state); 10984 10985 return 0; 10986 } 10987 10988 static void i9xx_update_cursor(struct intel_plane *plane, 10989 const struct intel_crtc_state *crtc_state, 10990 const struct intel_plane_state *plane_state) 10991 { 10992 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 10993 enum pipe pipe = plane->pipe; 10994 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0; 10995 unsigned long irqflags; 10996 10997 if (plane_state && plane_state->base.visible) { 10998 cntl = plane_state->ctl | 10999 i9xx_cursor_ctl_crtc(crtc_state); 11000 11001 if (plane_state->base.crtc_h != plane_state->base.crtc_w) 11002 fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1); 11003 11004 base = intel_cursor_base(plane_state); 11005 pos = intel_cursor_position(plane_state); 11006 } 11007 11008 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 11009 11010 /* 11011 * On some platforms writing CURCNTR first will also 11012 * cause CURPOS to be armed by the CURBASE write. 11013 * Without the CURCNTR write the CURPOS write would 11014 * arm itself. Thus we always update CURCNTR before 11015 * CURPOS. 11016 * 11017 * On other platforms CURPOS always requires the 11018 * CURBASE write to arm the update. Additonally 11019 * a write to any of the cursor register will cancel 11020 * an already armed cursor update. Thus leaving out 11021 * the CURBASE write after CURPOS could lead to a 11022 * cursor that doesn't appear to move, or even change 11023 * shape. Thus we always write CURBASE. 11024 * 11025 * The other registers are armed by by the CURBASE write 11026 * except when the plane is getting enabled at which time 11027 * the CURCNTR write arms the update. 11028 */ 11029 11030 if (INTEL_GEN(dev_priv) >= 9) 11031 skl_write_cursor_wm(plane, crtc_state); 11032 11033 if (plane->cursor.base != base || 11034 plane->cursor.size != fbc_ctl || 11035 plane->cursor.cntl != cntl) { 11036 if (HAS_CUR_FBC(dev_priv)) 11037 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl); 11038 I915_WRITE_FW(CURCNTR(pipe), cntl); 11039 I915_WRITE_FW(CURPOS(pipe), pos); 11040 I915_WRITE_FW(CURBASE(pipe), base); 11041 11042 plane->cursor.base = base; 11043 plane->cursor.size = fbc_ctl; 11044 plane->cursor.cntl = cntl; 11045 } else { 11046 I915_WRITE_FW(CURPOS(pipe), pos); 11047 I915_WRITE_FW(CURBASE(pipe), base); 11048 } 11049 11050 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 11051 } 11052 11053 static void i9xx_disable_cursor(struct intel_plane *plane, 11054 const struct intel_crtc_state *crtc_state) 11055 { 11056 i9xx_update_cursor(plane, crtc_state, NULL); 11057 } 11058 11059 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, 11060 enum pipe *pipe) 11061 { 11062 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11063 enum intel_display_power_domain power_domain; 11064 intel_wakeref_t wakeref; 11065 bool ret; 11066 u32 val; 11067 11068 /* 11069 * Not 100% correct for planes that can move between pipes, 11070 * but that's only the case for gen2-3 which don't have any 11071 * display power wells. 11072 */ 11073 power_domain = POWER_DOMAIN_PIPE(plane->pipe); 11074 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 11075 if (!wakeref) 11076 return false; 11077 11078 val = I915_READ(CURCNTR(plane->pipe)); 11079 11080 ret = val & MCURSOR_MODE; 11081 11082 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 11083 *pipe = plane->pipe; 11084 else 11085 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >> 11086 MCURSOR_PIPE_SELECT_SHIFT; 11087 11088 intel_display_power_put(dev_priv, power_domain, wakeref); 11089 11090 return ret; 11091 } 11092 11093 /* VESA 640x480x72Hz mode to set on the pipe */ 11094 static const struct drm_display_mode load_detect_mode = { 11095 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 11096 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 11097 }; 11098 11099 struct drm_framebuffer * 11100 intel_framebuffer_create(struct drm_i915_gem_object *obj, 11101 struct drm_mode_fb_cmd2 *mode_cmd) 11102 { 11103 struct intel_framebuffer *intel_fb; 11104 int ret; 11105 11106 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 11107 if (!intel_fb) 11108 return ERR_PTR(-ENOMEM); 11109 11110 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd); 11111 if (ret) 11112 goto err; 11113 11114 return &intel_fb->base; 11115 11116 err: 11117 kfree(intel_fb); 11118 return ERR_PTR(ret); 11119 } 11120 11121 static int intel_modeset_disable_planes(struct drm_atomic_state *state, 11122 struct drm_crtc *crtc) 11123 { 11124 struct drm_plane *plane; 11125 struct drm_plane_state *plane_state; 11126 int ret, i; 11127 11128 ret = drm_atomic_add_affected_planes(state, crtc); 11129 if (ret) 11130 return ret; 11131 11132 for_each_new_plane_in_state(state, plane, plane_state, i) { 11133 if (plane_state->crtc != crtc) 11134 continue; 11135 11136 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); 11137 if (ret) 11138 return ret; 11139 11140 drm_atomic_set_fb_for_plane(plane_state, NULL); 11141 } 11142 11143 return 0; 11144 } 11145 11146 int intel_get_load_detect_pipe(struct drm_connector *connector, 11147 const struct drm_display_mode *mode, 11148 struct intel_load_detect_pipe *old, 11149 struct drm_modeset_acquire_ctx *ctx) 11150 { 11151 struct intel_crtc *intel_crtc; 11152 struct intel_encoder *intel_encoder = 11153 intel_attached_encoder(connector); 11154 struct drm_crtc *possible_crtc; 11155 struct drm_encoder *encoder = &intel_encoder->base; 11156 struct drm_crtc *crtc = NULL; 11157 struct drm_device *dev = encoder->dev; 11158 struct drm_i915_private *dev_priv = to_i915(dev); 11159 struct drm_mode_config *config = &dev->mode_config; 11160 struct drm_atomic_state *state = NULL, *restore_state = NULL; 11161 struct drm_connector_state *connector_state; 11162 struct intel_crtc_state *crtc_state; 11163 int ret, i = -1; 11164 11165 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 11166 connector->base.id, connector->name, 11167 encoder->base.id, encoder->name); 11168 11169 old->restore_state = NULL; 11170 11171 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); 11172 11173 /* 11174 * Algorithm gets a little messy: 11175 * 11176 * - if the connector already has an assigned crtc, use it (but make 11177 * sure it's on first) 11178 * 11179 * - try to find the first unused crtc that can drive this connector, 11180 * and use that if we find one 11181 */ 11182 11183 /* See if we already have a CRTC for this connector */ 11184 if (connector->state->crtc) { 11185 crtc = connector->state->crtc; 11186 11187 ret = drm_modeset_lock(&crtc->mutex, ctx); 11188 if (ret) 11189 goto fail; 11190 11191 /* Make sure the crtc and connector are running */ 11192 goto found; 11193 } 11194 11195 /* Find an unused one (if possible) */ 11196 for_each_crtc(dev, possible_crtc) { 11197 i++; 11198 if (!(encoder->possible_crtcs & (1 << i))) 11199 continue; 11200 11201 ret = drm_modeset_lock(&possible_crtc->mutex, ctx); 11202 if (ret) 11203 goto fail; 11204 11205 if (possible_crtc->state->enable) { 11206 drm_modeset_unlock(&possible_crtc->mutex); 11207 continue; 11208 } 11209 11210 crtc = possible_crtc; 11211 break; 11212 } 11213 11214 /* 11215 * If we didn't find an unused CRTC, don't use any. 11216 */ 11217 if (!crtc) { 11218 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 11219 ret = -ENODEV; 11220 goto fail; 11221 } 11222 11223 found: 11224 intel_crtc = to_intel_crtc(crtc); 11225 11226 state = drm_atomic_state_alloc(dev); 11227 restore_state = drm_atomic_state_alloc(dev); 11228 if (!state || !restore_state) { 11229 ret = -ENOMEM; 11230 goto fail; 11231 } 11232 11233 state->acquire_ctx = ctx; 11234 restore_state->acquire_ctx = ctx; 11235 11236 connector_state = drm_atomic_get_connector_state(state, connector); 11237 if (IS_ERR(connector_state)) { 11238 ret = PTR_ERR(connector_state); 11239 goto fail; 11240 } 11241 11242 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc); 11243 if (ret) 11244 goto fail; 11245 11246 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 11247 if (IS_ERR(crtc_state)) { 11248 ret = PTR_ERR(crtc_state); 11249 goto fail; 11250 } 11251 11252 crtc_state->base.active = crtc_state->base.enable = true; 11253 11254 if (!mode) 11255 mode = &load_detect_mode; 11256 11257 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode); 11258 if (ret) 11259 goto fail; 11260 11261 ret = intel_modeset_disable_planes(state, crtc); 11262 if (ret) 11263 goto fail; 11264 11265 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); 11266 if (!ret) 11267 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc)); 11268 if (!ret) 11269 ret = drm_atomic_add_affected_planes(restore_state, crtc); 11270 if (ret) { 11271 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret); 11272 goto fail; 11273 } 11274 11275 ret = drm_atomic_commit(state); 11276 if (ret) { 11277 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 11278 goto fail; 11279 } 11280 11281 old->restore_state = restore_state; 11282 drm_atomic_state_put(state); 11283 11284 /* let the connector get through one full cycle before testing */ 11285 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 11286 return true; 11287 11288 fail: 11289 if (state) { 11290 drm_atomic_state_put(state); 11291 state = NULL; 11292 } 11293 if (restore_state) { 11294 drm_atomic_state_put(restore_state); 11295 restore_state = NULL; 11296 } 11297 11298 if (ret == -EDEADLK) 11299 return ret; 11300 11301 return false; 11302 } 11303 11304 void intel_release_load_detect_pipe(struct drm_connector *connector, 11305 struct intel_load_detect_pipe *old, 11306 struct drm_modeset_acquire_ctx *ctx) 11307 { 11308 struct intel_encoder *intel_encoder = 11309 intel_attached_encoder(connector); 11310 struct drm_encoder *encoder = &intel_encoder->base; 11311 struct drm_atomic_state *state = old->restore_state; 11312 int ret; 11313 11314 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 11315 connector->base.id, connector->name, 11316 encoder->base.id, encoder->name); 11317 11318 if (!state) 11319 return; 11320 11321 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 11322 if (ret) 11323 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret); 11324 drm_atomic_state_put(state); 11325 } 11326 11327 static int i9xx_pll_refclk(struct drm_device *dev, 11328 const struct intel_crtc_state *pipe_config) 11329 { 11330 struct drm_i915_private *dev_priv = to_i915(dev); 11331 u32 dpll = pipe_config->dpll_hw_state.dpll; 11332 11333 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 11334 return dev_priv->vbt.lvds_ssc_freq; 11335 else if (HAS_PCH_SPLIT(dev_priv)) 11336 return 120000; 11337 else if (!IS_GEN(dev_priv, 2)) 11338 return 96000; 11339 else 11340 return 48000; 11341 } 11342 11343 /* Returns the clock of the currently programmed mode of the given pipe. */ 11344 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 11345 struct intel_crtc_state *pipe_config) 11346 { 11347 struct drm_device *dev = crtc->base.dev; 11348 struct drm_i915_private *dev_priv = to_i915(dev); 11349 enum pipe pipe = crtc->pipe; 11350 u32 dpll = pipe_config->dpll_hw_state.dpll; 11351 u32 fp; 11352 struct dpll clock; 11353 int port_clock; 11354 int refclk = i9xx_pll_refclk(dev, pipe_config); 11355 11356 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 11357 fp = pipe_config->dpll_hw_state.fp0; 11358 else 11359 fp = pipe_config->dpll_hw_state.fp1; 11360 11361 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 11362 if (IS_PINEVIEW(dev_priv)) { 11363 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 11364 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 11365 } else { 11366 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 11367 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 11368 } 11369 11370 if (!IS_GEN(dev_priv, 2)) { 11371 if (IS_PINEVIEW(dev_priv)) 11372 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 11373 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 11374 else 11375 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 11376 DPLL_FPA01_P1_POST_DIV_SHIFT); 11377 11378 switch (dpll & DPLL_MODE_MASK) { 11379 case DPLLB_MODE_DAC_SERIAL: 11380 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 11381 5 : 10; 11382 break; 11383 case DPLLB_MODE_LVDS: 11384 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 11385 7 : 14; 11386 break; 11387 default: 11388 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 11389 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 11390 return; 11391 } 11392 11393 if (IS_PINEVIEW(dev_priv)) 11394 port_clock = pnv_calc_dpll_params(refclk, &clock); 11395 else 11396 port_clock = i9xx_calc_dpll_params(refclk, &clock); 11397 } else { 11398 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS); 11399 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 11400 11401 if (is_lvds) { 11402 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 11403 DPLL_FPA01_P1_POST_DIV_SHIFT); 11404 11405 if (lvds & LVDS_CLKB_POWER_UP) 11406 clock.p2 = 7; 11407 else 11408 clock.p2 = 14; 11409 } else { 11410 if (dpll & PLL_P1_DIVIDE_BY_TWO) 11411 clock.p1 = 2; 11412 else { 11413 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 11414 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 11415 } 11416 if (dpll & PLL_P2_DIVIDE_BY_4) 11417 clock.p2 = 4; 11418 else 11419 clock.p2 = 2; 11420 } 11421 11422 port_clock = i9xx_calc_dpll_params(refclk, &clock); 11423 } 11424 11425 /* 11426 * This value includes pixel_multiplier. We will use 11427 * port_clock to compute adjusted_mode.crtc_clock in the 11428 * encoder's get_config() function. 11429 */ 11430 pipe_config->port_clock = port_clock; 11431 } 11432 11433 int intel_dotclock_calculate(int link_freq, 11434 const struct intel_link_m_n *m_n) 11435 { 11436 /* 11437 * The calculation for the data clock is: 11438 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 11439 * But we want to avoid losing precison if possible, so: 11440 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 11441 * 11442 * and the link clock is simpler: 11443 * link_clock = (m * link_clock) / n 11444 */ 11445 11446 if (!m_n->link_n) 11447 return 0; 11448 11449 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); 11450 } 11451 11452 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 11453 struct intel_crtc_state *pipe_config) 11454 { 11455 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11456 11457 /* read out port_clock from the DPLL */ 11458 i9xx_crtc_clock_get(crtc, pipe_config); 11459 11460 /* 11461 * In case there is an active pipe without active ports, 11462 * we may need some idea for the dotclock anyway. 11463 * Calculate one based on the FDI configuration. 11464 */ 11465 pipe_config->base.adjusted_mode.crtc_clock = 11466 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 11467 &pipe_config->fdi_m_n); 11468 } 11469 11470 /* Returns the currently programmed mode of the given encoder. */ 11471 struct drm_display_mode * 11472 intel_encoder_current_mode(struct intel_encoder *encoder) 11473 { 11474 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 11475 struct intel_crtc_state *crtc_state; 11476 struct drm_display_mode *mode; 11477 struct intel_crtc *crtc; 11478 enum pipe pipe; 11479 11480 if (!encoder->get_hw_state(encoder, &pipe)) 11481 return NULL; 11482 11483 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 11484 11485 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 11486 if (!mode) 11487 return NULL; 11488 11489 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 11490 if (!crtc_state) { 11491 kfree(mode); 11492 return NULL; 11493 } 11494 11495 crtc_state->base.crtc = &crtc->base; 11496 11497 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) { 11498 kfree(crtc_state); 11499 kfree(mode); 11500 return NULL; 11501 } 11502 11503 encoder->get_config(encoder, crtc_state); 11504 11505 intel_mode_from_pipe_config(mode, crtc_state); 11506 11507 kfree(crtc_state); 11508 11509 return mode; 11510 } 11511 11512 static void intel_crtc_destroy(struct drm_crtc *crtc) 11513 { 11514 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11515 11516 drm_crtc_cleanup(crtc); 11517 kfree(intel_crtc); 11518 } 11519 11520 /** 11521 * intel_wm_need_update - Check whether watermarks need updating 11522 * @cur: current plane state 11523 * @new: new plane state 11524 * 11525 * Check current plane state versus the new one to determine whether 11526 * watermarks need to be recalculated. 11527 * 11528 * Returns true or false. 11529 */ 11530 static bool intel_wm_need_update(const struct intel_plane_state *cur, 11531 struct intel_plane_state *new) 11532 { 11533 /* Update watermarks on tiling or size changes. */ 11534 if (new->base.visible != cur->base.visible) 11535 return true; 11536 11537 if (!cur->base.fb || !new->base.fb) 11538 return false; 11539 11540 if (cur->base.fb->modifier != new->base.fb->modifier || 11541 cur->base.rotation != new->base.rotation || 11542 drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) || 11543 drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) || 11544 drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) || 11545 drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst)) 11546 return true; 11547 11548 return false; 11549 } 11550 11551 static bool needs_scaling(const struct intel_plane_state *state) 11552 { 11553 int src_w = drm_rect_width(&state->base.src) >> 16; 11554 int src_h = drm_rect_height(&state->base.src) >> 16; 11555 int dst_w = drm_rect_width(&state->base.dst); 11556 int dst_h = drm_rect_height(&state->base.dst); 11557 11558 return (src_w != dst_w || src_h != dst_h); 11559 } 11560 11561 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, 11562 struct intel_crtc_state *crtc_state, 11563 const struct intel_plane_state *old_plane_state, 11564 struct intel_plane_state *plane_state) 11565 { 11566 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 11567 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 11568 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11569 bool mode_changed = needs_modeset(crtc_state); 11570 bool was_crtc_enabled = old_crtc_state->base.active; 11571 bool is_crtc_enabled = crtc_state->base.active; 11572 bool turn_off, turn_on, visible, was_visible; 11573 int ret; 11574 11575 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { 11576 ret = skl_update_scaler_plane(crtc_state, plane_state); 11577 if (ret) 11578 return ret; 11579 } 11580 11581 was_visible = old_plane_state->base.visible; 11582 visible = plane_state->base.visible; 11583 11584 if (!was_crtc_enabled && WARN_ON(was_visible)) 11585 was_visible = false; 11586 11587 /* 11588 * Visibility is calculated as if the crtc was on, but 11589 * after scaler setup everything depends on it being off 11590 * when the crtc isn't active. 11591 * 11592 * FIXME this is wrong for watermarks. Watermarks should also 11593 * be computed as if the pipe would be active. Perhaps move 11594 * per-plane wm computation to the .check_plane() hook, and 11595 * only combine the results from all planes in the current place? 11596 */ 11597 if (!is_crtc_enabled) { 11598 plane_state->base.visible = visible = false; 11599 crtc_state->active_planes &= ~BIT(plane->id); 11600 crtc_state->data_rate[plane->id] = 0; 11601 } 11602 11603 if (!was_visible && !visible) 11604 return 0; 11605 11606 turn_off = was_visible && (!visible || mode_changed); 11607 turn_on = visible && (!was_visible || mode_changed); 11608 11609 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", 11610 crtc->base.base.id, crtc->base.name, 11611 plane->base.base.id, plane->base.name, 11612 was_visible, visible, 11613 turn_off, turn_on, mode_changed); 11614 11615 if (turn_on) { 11616 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 11617 crtc_state->update_wm_pre = true; 11618 11619 /* must disable cxsr around plane enable/disable */ 11620 if (plane->id != PLANE_CURSOR) 11621 crtc_state->disable_cxsr = true; 11622 } else if (turn_off) { 11623 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 11624 crtc_state->update_wm_post = true; 11625 11626 /* must disable cxsr around plane enable/disable */ 11627 if (plane->id != PLANE_CURSOR) 11628 crtc_state->disable_cxsr = true; 11629 } else if (intel_wm_need_update(old_plane_state, plane_state)) { 11630 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) { 11631 /* FIXME bollocks */ 11632 crtc_state->update_wm_pre = true; 11633 crtc_state->update_wm_post = true; 11634 } 11635 } 11636 11637 if (visible || was_visible) 11638 crtc_state->fb_bits |= plane->frontbuffer_bit; 11639 11640 /* 11641 * ILK/SNB DVSACNTR/Sprite Enable 11642 * IVB SPR_CTL/Sprite Enable 11643 * "When in Self Refresh Big FIFO mode, a write to enable the 11644 * plane will be internally buffered and delayed while Big FIFO 11645 * mode is exiting." 11646 * 11647 * Which means that enabling the sprite can take an extra frame 11648 * when we start in big FIFO mode (LP1+). Thus we need to drop 11649 * down to LP0 and wait for vblank in order to make sure the 11650 * sprite gets enabled on the next vblank after the register write. 11651 * Doing otherwise would risk enabling the sprite one frame after 11652 * we've already signalled flip completion. We can resume LP1+ 11653 * once the sprite has been enabled. 11654 * 11655 * 11656 * WaCxSRDisabledForSpriteScaling:ivb 11657 * IVB SPR_SCALE/Scaling Enable 11658 * "Low Power watermarks must be disabled for at least one 11659 * frame before enabling sprite scaling, and kept disabled 11660 * until sprite scaling is disabled." 11661 * 11662 * ILK/SNB DVSASCALE/Scaling Enable 11663 * "When in Self Refresh Big FIFO mode, scaling enable will be 11664 * masked off while Big FIFO mode is exiting." 11665 * 11666 * Despite the w/a only being listed for IVB we assume that 11667 * the ILK/SNB note has similar ramifications, hence we apply 11668 * the w/a on all three platforms. 11669 * 11670 * With experimental results seems this is needed also for primary 11671 * plane, not only sprite plane. 11672 */ 11673 if (plane->id != PLANE_CURSOR && 11674 (IS_GEN_RANGE(dev_priv, 5, 6) || 11675 IS_IVYBRIDGE(dev_priv)) && 11676 (turn_on || (!needs_scaling(old_plane_state) && 11677 needs_scaling(plane_state)))) 11678 crtc_state->disable_lp_wm = true; 11679 11680 return 0; 11681 } 11682 11683 static bool encoders_cloneable(const struct intel_encoder *a, 11684 const struct intel_encoder *b) 11685 { 11686 /* masks could be asymmetric, so check both ways */ 11687 return a == b || (a->cloneable & (1 << b->type) && 11688 b->cloneable & (1 << a->type)); 11689 } 11690 11691 static bool check_single_encoder_cloning(struct drm_atomic_state *state, 11692 struct intel_crtc *crtc, 11693 struct intel_encoder *encoder) 11694 { 11695 struct intel_encoder *source_encoder; 11696 struct drm_connector *connector; 11697 struct drm_connector_state *connector_state; 11698 int i; 11699 11700 for_each_new_connector_in_state(state, connector, connector_state, i) { 11701 if (connector_state->crtc != &crtc->base) 11702 continue; 11703 11704 source_encoder = 11705 to_intel_encoder(connector_state->best_encoder); 11706 if (!encoders_cloneable(encoder, source_encoder)) 11707 return false; 11708 } 11709 11710 return true; 11711 } 11712 11713 static int icl_add_linked_planes(struct intel_atomic_state *state) 11714 { 11715 struct intel_plane *plane, *linked; 11716 struct intel_plane_state *plane_state, *linked_plane_state; 11717 int i; 11718 11719 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 11720 linked = plane_state->planar_linked_plane; 11721 11722 if (!linked) 11723 continue; 11724 11725 linked_plane_state = intel_atomic_get_plane_state(state, linked); 11726 if (IS_ERR(linked_plane_state)) 11727 return PTR_ERR(linked_plane_state); 11728 11729 WARN_ON(linked_plane_state->planar_linked_plane != plane); 11730 WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave); 11731 } 11732 11733 return 0; 11734 } 11735 11736 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) 11737 { 11738 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 11739 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11740 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state); 11741 struct intel_plane *plane, *linked; 11742 struct intel_plane_state *plane_state; 11743 int i; 11744 11745 if (INTEL_GEN(dev_priv) < 11) 11746 return 0; 11747 11748 /* 11749 * Destroy all old plane links and make the slave plane invisible 11750 * in the crtc_state->active_planes mask. 11751 */ 11752 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 11753 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane) 11754 continue; 11755 11756 plane_state->planar_linked_plane = NULL; 11757 if (plane_state->planar_slave && !plane_state->base.visible) { 11758 crtc_state->active_planes &= ~BIT(plane->id); 11759 crtc_state->update_planes |= BIT(plane->id); 11760 } 11761 11762 plane_state->planar_slave = false; 11763 } 11764 11765 if (!crtc_state->nv12_planes) 11766 return 0; 11767 11768 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 11769 struct intel_plane_state *linked_state = NULL; 11770 11771 if (plane->pipe != crtc->pipe || 11772 !(crtc_state->nv12_planes & BIT(plane->id))) 11773 continue; 11774 11775 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { 11776 if (!icl_is_nv12_y_plane(linked->id)) 11777 continue; 11778 11779 if (crtc_state->active_planes & BIT(linked->id)) 11780 continue; 11781 11782 linked_state = intel_atomic_get_plane_state(state, linked); 11783 if (IS_ERR(linked_state)) 11784 return PTR_ERR(linked_state); 11785 11786 break; 11787 } 11788 11789 if (!linked_state) { 11790 DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n", 11791 hweight8(crtc_state->nv12_planes)); 11792 11793 return -EINVAL; 11794 } 11795 11796 plane_state->planar_linked_plane = linked; 11797 11798 linked_state->planar_slave = true; 11799 linked_state->planar_linked_plane = plane; 11800 crtc_state->active_planes |= BIT(linked->id); 11801 crtc_state->update_planes |= BIT(linked->id); 11802 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name); 11803 } 11804 11805 return 0; 11806 } 11807 11808 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) 11809 { 11810 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 11811 struct intel_atomic_state *state = 11812 to_intel_atomic_state(new_crtc_state->base.state); 11813 const struct intel_crtc_state *old_crtc_state = 11814 intel_atomic_get_old_crtc_state(state, crtc); 11815 11816 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; 11817 } 11818 11819 static int intel_crtc_atomic_check(struct drm_crtc *_crtc, 11820 struct drm_crtc_state *_crtc_state) 11821 { 11822 struct intel_crtc *crtc = to_intel_crtc(_crtc); 11823 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11824 struct intel_crtc_state *crtc_state = 11825 to_intel_crtc_state(_crtc_state); 11826 int ret; 11827 bool mode_changed = needs_modeset(crtc_state); 11828 11829 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) && 11830 mode_changed && !crtc_state->base.active) 11831 crtc_state->update_wm_post = true; 11832 11833 if (mode_changed && crtc_state->base.enable && 11834 dev_priv->display.crtc_compute_clock && 11835 !WARN_ON(crtc_state->shared_dpll)) { 11836 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state); 11837 if (ret) 11838 return ret; 11839 } 11840 11841 /* 11842 * May need to update pipe gamma enable bits 11843 * when C8 planes are getting enabled/disabled. 11844 */ 11845 if (c8_planes_changed(crtc_state)) 11846 crtc_state->base.color_mgmt_changed = true; 11847 11848 if (mode_changed || crtc_state->update_pipe || 11849 crtc_state->base.color_mgmt_changed) { 11850 ret = intel_color_check(crtc_state); 11851 if (ret) 11852 return ret; 11853 } 11854 11855 ret = 0; 11856 if (dev_priv->display.compute_pipe_wm) { 11857 ret = dev_priv->display.compute_pipe_wm(crtc_state); 11858 if (ret) { 11859 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n"); 11860 return ret; 11861 } 11862 } 11863 11864 if (dev_priv->display.compute_intermediate_wm) { 11865 if (WARN_ON(!dev_priv->display.compute_pipe_wm)) 11866 return 0; 11867 11868 /* 11869 * Calculate 'intermediate' watermarks that satisfy both the 11870 * old state and the new state. We can program these 11871 * immediately. 11872 */ 11873 ret = dev_priv->display.compute_intermediate_wm(crtc_state); 11874 if (ret) { 11875 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); 11876 return ret; 11877 } 11878 } 11879 11880 if (INTEL_GEN(dev_priv) >= 9) { 11881 if (mode_changed || crtc_state->update_pipe) 11882 ret = skl_update_scaler_crtc(crtc_state); 11883 11884 if (!ret) 11885 ret = icl_check_nv12_planes(crtc_state); 11886 if (!ret) 11887 ret = skl_check_pipe_max_pixel_rate(crtc, crtc_state); 11888 if (!ret) 11889 ret = intel_atomic_setup_scalers(dev_priv, crtc, 11890 crtc_state); 11891 } 11892 11893 if (HAS_IPS(dev_priv)) 11894 crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state); 11895 11896 return ret; 11897 } 11898 11899 static const struct drm_crtc_helper_funcs intel_helper_funcs = { 11900 .atomic_check = intel_crtc_atomic_check, 11901 }; 11902 11903 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 11904 { 11905 struct intel_connector *connector; 11906 struct drm_connector_list_iter conn_iter; 11907 11908 drm_connector_list_iter_begin(dev, &conn_iter); 11909 for_each_intel_connector_iter(connector, &conn_iter) { 11910 if (connector->base.state->crtc) 11911 drm_connector_put(&connector->base); 11912 11913 if (connector->base.encoder) { 11914 connector->base.state->best_encoder = 11915 connector->base.encoder; 11916 connector->base.state->crtc = 11917 connector->base.encoder->crtc; 11918 11919 drm_connector_get(&connector->base); 11920 } else { 11921 connector->base.state->best_encoder = NULL; 11922 connector->base.state->crtc = NULL; 11923 } 11924 } 11925 drm_connector_list_iter_end(&conn_iter); 11926 } 11927 11928 static int 11929 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 11930 struct intel_crtc_state *pipe_config) 11931 { 11932 struct drm_connector *connector = conn_state->connector; 11933 const struct drm_display_info *info = &connector->display_info; 11934 int bpp; 11935 11936 switch (conn_state->max_bpc) { 11937 case 6 ... 7: 11938 bpp = 6 * 3; 11939 break; 11940 case 8 ... 9: 11941 bpp = 8 * 3; 11942 break; 11943 case 10 ... 11: 11944 bpp = 10 * 3; 11945 break; 11946 case 12: 11947 bpp = 12 * 3; 11948 break; 11949 default: 11950 return -EINVAL; 11951 } 11952 11953 if (bpp < pipe_config->pipe_bpp) { 11954 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of " 11955 "EDID bpp %d, requested bpp %d, max platform bpp %d\n", 11956 connector->base.id, connector->name, 11957 bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc, 11958 pipe_config->pipe_bpp); 11959 11960 pipe_config->pipe_bpp = bpp; 11961 } 11962 11963 return 0; 11964 } 11965 11966 static int 11967 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 11968 struct intel_crtc_state *pipe_config) 11969 { 11970 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11971 struct drm_atomic_state *state = pipe_config->base.state; 11972 struct drm_connector *connector; 11973 struct drm_connector_state *connector_state; 11974 int bpp, i; 11975 11976 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 11977 IS_CHERRYVIEW(dev_priv))) 11978 bpp = 10*3; 11979 else if (INTEL_GEN(dev_priv) >= 5) 11980 bpp = 12*3; 11981 else 11982 bpp = 8*3; 11983 11984 pipe_config->pipe_bpp = bpp; 11985 11986 /* Clamp display bpp to connector max bpp */ 11987 for_each_new_connector_in_state(state, connector, connector_state, i) { 11988 int ret; 11989 11990 if (connector_state->crtc != &crtc->base) 11991 continue; 11992 11993 ret = compute_sink_pipe_bpp(connector_state, pipe_config); 11994 if (ret) 11995 return ret; 11996 } 11997 11998 return 0; 11999 } 12000 12001 static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 12002 { 12003 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 12004 "type: 0x%x flags: 0x%x\n", 12005 mode->crtc_clock, 12006 mode->crtc_hdisplay, mode->crtc_hsync_start, 12007 mode->crtc_hsync_end, mode->crtc_htotal, 12008 mode->crtc_vdisplay, mode->crtc_vsync_start, 12009 mode->crtc_vsync_end, mode->crtc_vtotal, 12010 mode->type, mode->flags); 12011 } 12012 12013 static inline void 12014 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config, 12015 const char *id, unsigned int lane_count, 12016 const struct intel_link_m_n *m_n) 12017 { 12018 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 12019 id, lane_count, 12020 m_n->gmch_m, m_n->gmch_n, 12021 m_n->link_m, m_n->link_n, m_n->tu); 12022 } 12023 12024 static void 12025 intel_dump_infoframe(struct drm_i915_private *dev_priv, 12026 const union hdmi_infoframe *frame) 12027 { 12028 if ((drm_debug & DRM_UT_KMS) == 0) 12029 return; 12030 12031 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame); 12032 } 12033 12034 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x 12035 12036 static const char * const output_type_str[] = { 12037 OUTPUT_TYPE(UNUSED), 12038 OUTPUT_TYPE(ANALOG), 12039 OUTPUT_TYPE(DVO), 12040 OUTPUT_TYPE(SDVO), 12041 OUTPUT_TYPE(LVDS), 12042 OUTPUT_TYPE(TVOUT), 12043 OUTPUT_TYPE(HDMI), 12044 OUTPUT_TYPE(DP), 12045 OUTPUT_TYPE(EDP), 12046 OUTPUT_TYPE(DSI), 12047 OUTPUT_TYPE(DDI), 12048 OUTPUT_TYPE(DP_MST), 12049 }; 12050 12051 #undef OUTPUT_TYPE 12052 12053 static void snprintf_output_types(char *buf, size_t len, 12054 unsigned int output_types) 12055 { 12056 char *str = buf; 12057 int i; 12058 12059 str[0] = '\0'; 12060 12061 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) { 12062 int r; 12063 12064 if ((output_types & BIT(i)) == 0) 12065 continue; 12066 12067 r = snprintf(str, len, "%s%s", 12068 str != buf ? "," : "", output_type_str[i]); 12069 if (r >= len) 12070 break; 12071 str += r; 12072 len -= r; 12073 12074 output_types &= ~BIT(i); 12075 } 12076 12077 WARN_ON_ONCE(output_types != 0); 12078 } 12079 12080 static const char * const output_format_str[] = { 12081 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid", 12082 [INTEL_OUTPUT_FORMAT_RGB] = "RGB", 12083 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0", 12084 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4", 12085 }; 12086 12087 static const char *output_formats(enum intel_output_format format) 12088 { 12089 if (format >= ARRAY_SIZE(output_format_str)) 12090 format = INTEL_OUTPUT_FORMAT_INVALID; 12091 return output_format_str[format]; 12092 } 12093 12094 static void intel_dump_plane_state(const struct intel_plane_state *plane_state) 12095 { 12096 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 12097 const struct drm_framebuffer *fb = plane_state->base.fb; 12098 struct drm_format_name_buf format_name; 12099 12100 if (!fb) { 12101 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n", 12102 plane->base.base.id, plane->base.name, 12103 yesno(plane_state->base.visible)); 12104 return; 12105 } 12106 12107 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n", 12108 plane->base.base.id, plane->base.name, 12109 fb->base.id, fb->width, fb->height, 12110 drm_get_format_name(fb->format->format, &format_name), 12111 yesno(plane_state->base.visible)); 12112 DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n", 12113 plane_state->base.rotation, plane_state->scaler_id); 12114 if (plane_state->base.visible) 12115 DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n", 12116 DRM_RECT_FP_ARG(&plane_state->base.src), 12117 DRM_RECT_ARG(&plane_state->base.dst)); 12118 } 12119 12120 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, 12121 struct intel_atomic_state *state, 12122 const char *context) 12123 { 12124 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); 12125 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12126 const struct intel_plane_state *plane_state; 12127 struct intel_plane *plane; 12128 char buf[64]; 12129 int i; 12130 12131 DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n", 12132 crtc->base.base.id, crtc->base.name, 12133 yesno(pipe_config->base.enable), context); 12134 12135 if (!pipe_config->base.enable) 12136 goto dump_planes; 12137 12138 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types); 12139 DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n", 12140 yesno(pipe_config->base.active), 12141 buf, pipe_config->output_types, 12142 output_formats(pipe_config->output_format)); 12143 12144 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", 12145 transcoder_name(pipe_config->cpu_transcoder), 12146 pipe_config->pipe_bpp, pipe_config->dither); 12147 12148 if (pipe_config->has_pch_encoder) 12149 intel_dump_m_n_config(pipe_config, "fdi", 12150 pipe_config->fdi_lanes, 12151 &pipe_config->fdi_m_n); 12152 12153 if (intel_crtc_has_dp_encoder(pipe_config)) { 12154 intel_dump_m_n_config(pipe_config, "dp m_n", 12155 pipe_config->lane_count, &pipe_config->dp_m_n); 12156 if (pipe_config->has_drrs) 12157 intel_dump_m_n_config(pipe_config, "dp m2_n2", 12158 pipe_config->lane_count, 12159 &pipe_config->dp_m2_n2); 12160 } 12161 12162 DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n", 12163 pipe_config->has_audio, pipe_config->has_infoframe, 12164 pipe_config->infoframes.enable); 12165 12166 if (pipe_config->infoframes.enable & 12167 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) 12168 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp); 12169 if (pipe_config->infoframes.enable & 12170 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) 12171 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi); 12172 if (pipe_config->infoframes.enable & 12173 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD)) 12174 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd); 12175 if (pipe_config->infoframes.enable & 12176 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR)) 12177 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi); 12178 12179 DRM_DEBUG_KMS("requested mode:\n"); 12180 drm_mode_debug_printmodeline(&pipe_config->base.mode); 12181 DRM_DEBUG_KMS("adjusted mode:\n"); 12182 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode); 12183 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode); 12184 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n", 12185 pipe_config->port_clock, 12186 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 12187 pipe_config->pixel_rate); 12188 12189 if (INTEL_GEN(dev_priv) >= 9) 12190 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 12191 crtc->num_scalers, 12192 pipe_config->scaler_state.scaler_users, 12193 pipe_config->scaler_state.scaler_id); 12194 12195 if (HAS_GMCH(dev_priv)) 12196 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 12197 pipe_config->gmch_pfit.control, 12198 pipe_config->gmch_pfit.pgm_ratios, 12199 pipe_config->gmch_pfit.lvds_border_bits); 12200 else 12201 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n", 12202 pipe_config->pch_pfit.pos, 12203 pipe_config->pch_pfit.size, 12204 enableddisabled(pipe_config->pch_pfit.enabled), 12205 yesno(pipe_config->pch_pfit.force_thru)); 12206 12207 DRM_DEBUG_KMS("ips: %i, double wide: %i\n", 12208 pipe_config->ips_enabled, pipe_config->double_wide); 12209 12210 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state); 12211 12212 if (IS_CHERRYVIEW(dev_priv)) 12213 DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", 12214 pipe_config->cgm_mode, pipe_config->gamma_mode, 12215 pipe_config->gamma_enable, pipe_config->csc_enable); 12216 else 12217 DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", 12218 pipe_config->csc_mode, pipe_config->gamma_mode, 12219 pipe_config->gamma_enable, pipe_config->csc_enable); 12220 12221 dump_planes: 12222 if (!state) 12223 return; 12224 12225 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12226 if (plane->pipe == crtc->pipe) 12227 intel_dump_plane_state(plane_state); 12228 } 12229 } 12230 12231 static bool check_digital_port_conflicts(struct intel_atomic_state *state) 12232 { 12233 struct drm_device *dev = state->base.dev; 12234 struct drm_connector *connector; 12235 struct drm_connector_list_iter conn_iter; 12236 unsigned int used_ports = 0; 12237 unsigned int used_mst_ports = 0; 12238 bool ret = true; 12239 12240 /* 12241 * Walk the connector list instead of the encoder 12242 * list to detect the problem on ddi platforms 12243 * where there's just one encoder per digital port. 12244 */ 12245 drm_connector_list_iter_begin(dev, &conn_iter); 12246 drm_for_each_connector_iter(connector, &conn_iter) { 12247 struct drm_connector_state *connector_state; 12248 struct intel_encoder *encoder; 12249 12250 connector_state = 12251 drm_atomic_get_new_connector_state(&state->base, 12252 connector); 12253 if (!connector_state) 12254 connector_state = connector->state; 12255 12256 if (!connector_state->best_encoder) 12257 continue; 12258 12259 encoder = to_intel_encoder(connector_state->best_encoder); 12260 12261 WARN_ON(!connector_state->crtc); 12262 12263 switch (encoder->type) { 12264 unsigned int port_mask; 12265 case INTEL_OUTPUT_DDI: 12266 if (WARN_ON(!HAS_DDI(to_i915(dev)))) 12267 break; 12268 /* else, fall through */ 12269 case INTEL_OUTPUT_DP: 12270 case INTEL_OUTPUT_HDMI: 12271 case INTEL_OUTPUT_EDP: 12272 port_mask = 1 << encoder->port; 12273 12274 /* the same port mustn't appear more than once */ 12275 if (used_ports & port_mask) 12276 ret = false; 12277 12278 used_ports |= port_mask; 12279 break; 12280 case INTEL_OUTPUT_DP_MST: 12281 used_mst_ports |= 12282 1 << encoder->port; 12283 break; 12284 default: 12285 break; 12286 } 12287 } 12288 drm_connector_list_iter_end(&conn_iter); 12289 12290 /* can't mix MST and SST/HDMI on the same port */ 12291 if (used_ports & used_mst_ports) 12292 return false; 12293 12294 return ret; 12295 } 12296 12297 static int 12298 clear_intel_crtc_state(struct intel_crtc_state *crtc_state) 12299 { 12300 struct drm_i915_private *dev_priv = 12301 to_i915(crtc_state->base.crtc->dev); 12302 struct intel_crtc_state *saved_state; 12303 12304 saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL); 12305 if (!saved_state) 12306 return -ENOMEM; 12307 12308 /* FIXME: before the switch to atomic started, a new pipe_config was 12309 * kzalloc'd. Code that depends on any field being zero should be 12310 * fixed, so that the crtc_state can be safely duplicated. For now, 12311 * only fields that are know to not cause problems are preserved. */ 12312 12313 saved_state->scaler_state = crtc_state->scaler_state; 12314 saved_state->shared_dpll = crtc_state->shared_dpll; 12315 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 12316 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 12317 sizeof(saved_state->icl_port_dplls)); 12318 saved_state->crc_enabled = crtc_state->crc_enabled; 12319 if (IS_G4X(dev_priv) || 12320 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 12321 saved_state->wm = crtc_state->wm; 12322 12323 /* Keep base drm_crtc_state intact, only clear our extended struct */ 12324 BUILD_BUG_ON(offsetof(struct intel_crtc_state, base)); 12325 memcpy(&crtc_state->base + 1, &saved_state->base + 1, 12326 sizeof(*crtc_state) - sizeof(crtc_state->base)); 12327 12328 kfree(saved_state); 12329 return 0; 12330 } 12331 12332 static int 12333 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config) 12334 { 12335 struct drm_crtc *crtc = pipe_config->base.crtc; 12336 struct drm_atomic_state *state = pipe_config->base.state; 12337 struct intel_encoder *encoder; 12338 struct drm_connector *connector; 12339 struct drm_connector_state *connector_state; 12340 int base_bpp, ret; 12341 int i; 12342 bool retry = true; 12343 12344 ret = clear_intel_crtc_state(pipe_config); 12345 if (ret) 12346 return ret; 12347 12348 pipe_config->cpu_transcoder = 12349 (enum transcoder) to_intel_crtc(crtc)->pipe; 12350 12351 /* 12352 * Sanitize sync polarity flags based on requested ones. If neither 12353 * positive or negative polarity is requested, treat this as meaning 12354 * negative polarity. 12355 */ 12356 if (!(pipe_config->base.adjusted_mode.flags & 12357 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 12358 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 12359 12360 if (!(pipe_config->base.adjusted_mode.flags & 12361 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 12362 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 12363 12364 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 12365 pipe_config); 12366 if (ret) 12367 return ret; 12368 12369 base_bpp = pipe_config->pipe_bpp; 12370 12371 /* 12372 * Determine the real pipe dimensions. Note that stereo modes can 12373 * increase the actual pipe size due to the frame doubling and 12374 * insertion of additional space for blanks between the frame. This 12375 * is stored in the crtc timings. We use the requested mode to do this 12376 * computation to clearly distinguish it from the adjusted mode, which 12377 * can be changed by the connectors in the below retry loop. 12378 */ 12379 drm_mode_get_hv_timing(&pipe_config->base.mode, 12380 &pipe_config->pipe_src_w, 12381 &pipe_config->pipe_src_h); 12382 12383 for_each_new_connector_in_state(state, connector, connector_state, i) { 12384 if (connector_state->crtc != crtc) 12385 continue; 12386 12387 encoder = to_intel_encoder(connector_state->best_encoder); 12388 12389 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { 12390 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 12391 return -EINVAL; 12392 } 12393 12394 /* 12395 * Determine output_types before calling the .compute_config() 12396 * hooks so that the hooks can use this information safely. 12397 */ 12398 if (encoder->compute_output_type) 12399 pipe_config->output_types |= 12400 BIT(encoder->compute_output_type(encoder, pipe_config, 12401 connector_state)); 12402 else 12403 pipe_config->output_types |= BIT(encoder->type); 12404 } 12405 12406 encoder_retry: 12407 /* Ensure the port clock defaults are reset when retrying. */ 12408 pipe_config->port_clock = 0; 12409 pipe_config->pixel_multiplier = 1; 12410 12411 /* Fill in default crtc timings, allow encoders to overwrite them. */ 12412 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode, 12413 CRTC_STEREO_DOUBLE); 12414 12415 /* Pass our mode to the connectors and the CRTC to give them a chance to 12416 * adjust it according to limitations or connector properties, and also 12417 * a chance to reject the mode entirely. 12418 */ 12419 for_each_new_connector_in_state(state, connector, connector_state, i) { 12420 if (connector_state->crtc != crtc) 12421 continue; 12422 12423 encoder = to_intel_encoder(connector_state->best_encoder); 12424 ret = encoder->compute_config(encoder, pipe_config, 12425 connector_state); 12426 if (ret < 0) { 12427 if (ret != -EDEADLK) 12428 DRM_DEBUG_KMS("Encoder config failure: %d\n", 12429 ret); 12430 return ret; 12431 } 12432 } 12433 12434 /* Set default port clock if not overwritten by the encoder. Needs to be 12435 * done afterwards in case the encoder adjusts the mode. */ 12436 if (!pipe_config->port_clock) 12437 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock 12438 * pipe_config->pixel_multiplier; 12439 12440 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 12441 if (ret == -EDEADLK) 12442 return ret; 12443 if (ret < 0) { 12444 DRM_DEBUG_KMS("CRTC fixup failed\n"); 12445 return ret; 12446 } 12447 12448 if (ret == RETRY) { 12449 if (WARN(!retry, "loop in pipe configuration computation\n")) 12450 return -EINVAL; 12451 12452 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 12453 retry = false; 12454 goto encoder_retry; 12455 } 12456 12457 /* Dithering seems to not pass-through bits correctly when it should, so 12458 * only enable it on 6bpc panels and when its not a compliance 12459 * test requesting 6bpc video pattern. 12460 */ 12461 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) && 12462 !pipe_config->dither_force_disable; 12463 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 12464 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 12465 12466 return 0; 12467 } 12468 12469 bool intel_fuzzy_clock_check(int clock1, int clock2) 12470 { 12471 int diff; 12472 12473 if (clock1 == clock2) 12474 return true; 12475 12476 if (!clock1 || !clock2) 12477 return false; 12478 12479 diff = abs(clock1 - clock2); 12480 12481 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 12482 return true; 12483 12484 return false; 12485 } 12486 12487 static bool 12488 intel_compare_m_n(unsigned int m, unsigned int n, 12489 unsigned int m2, unsigned int n2, 12490 bool exact) 12491 { 12492 if (m == m2 && n == n2) 12493 return true; 12494 12495 if (exact || !m || !n || !m2 || !n2) 12496 return false; 12497 12498 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 12499 12500 if (n > n2) { 12501 while (n > n2) { 12502 m2 <<= 1; 12503 n2 <<= 1; 12504 } 12505 } else if (n < n2) { 12506 while (n < n2) { 12507 m <<= 1; 12508 n <<= 1; 12509 } 12510 } 12511 12512 if (n != n2) 12513 return false; 12514 12515 return intel_fuzzy_clock_check(m, m2); 12516 } 12517 12518 static bool 12519 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 12520 const struct intel_link_m_n *m2_n2, 12521 bool exact) 12522 { 12523 return m_n->tu == m2_n2->tu && 12524 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, 12525 m2_n2->gmch_m, m2_n2->gmch_n, exact) && 12526 intel_compare_m_n(m_n->link_m, m_n->link_n, 12527 m2_n2->link_m, m2_n2->link_n, exact); 12528 } 12529 12530 static bool 12531 intel_compare_infoframe(const union hdmi_infoframe *a, 12532 const union hdmi_infoframe *b) 12533 { 12534 return memcmp(a, b, sizeof(*a)) == 0; 12535 } 12536 12537 static void 12538 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, 12539 bool fastset, const char *name, 12540 const union hdmi_infoframe *a, 12541 const union hdmi_infoframe *b) 12542 { 12543 if (fastset) { 12544 if ((drm_debug & DRM_UT_KMS) == 0) 12545 return; 12546 12547 DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name); 12548 DRM_DEBUG_KMS("expected:\n"); 12549 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); 12550 DRM_DEBUG_KMS("found:\n"); 12551 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); 12552 } else { 12553 DRM_ERROR("mismatch in %s infoframe\n", name); 12554 DRM_ERROR("expected:\n"); 12555 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); 12556 DRM_ERROR("found:\n"); 12557 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); 12558 } 12559 } 12560 12561 static void __printf(3, 4) 12562 pipe_config_mismatch(bool fastset, const char *name, const char *format, ...) 12563 { 12564 struct va_format vaf; 12565 va_list args; 12566 12567 va_start(args, format); 12568 vaf.fmt = format; 12569 vaf.va = &args; 12570 12571 if (fastset) 12572 DRM_DEBUG_KMS("fastset mismatch in %s %pV\n", name, &vaf); 12573 else 12574 DRM_ERROR("mismatch in %s %pV\n", name, &vaf); 12575 12576 va_end(args); 12577 } 12578 12579 static bool fastboot_enabled(struct drm_i915_private *dev_priv) 12580 { 12581 if (i915_modparams.fastboot != -1) 12582 return i915_modparams.fastboot; 12583 12584 /* Enable fastboot by default on Skylake and newer */ 12585 if (INTEL_GEN(dev_priv) >= 9) 12586 return true; 12587 12588 /* Enable fastboot by default on VLV and CHV */ 12589 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 12590 return true; 12591 12592 /* Disabled by default on all others */ 12593 return false; 12594 } 12595 12596 static bool 12597 intel_pipe_config_compare(const struct intel_crtc_state *current_config, 12598 const struct intel_crtc_state *pipe_config, 12599 bool fastset) 12600 { 12601 struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev); 12602 bool ret = true; 12603 u32 bp_gamma = 0; 12604 bool fixup_inherited = fastset && 12605 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) && 12606 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED); 12607 12608 if (fixup_inherited && !fastboot_enabled(dev_priv)) { 12609 DRM_DEBUG_KMS("initial modeset and fastboot not set\n"); 12610 ret = false; 12611 } 12612 12613 #define PIPE_CONF_CHECK_X(name) do { \ 12614 if (current_config->name != pipe_config->name) { \ 12615 pipe_config_mismatch(fastset, __stringify(name), \ 12616 "(expected 0x%08x, found 0x%08x)\n", \ 12617 current_config->name, \ 12618 pipe_config->name); \ 12619 ret = false; \ 12620 } \ 12621 } while (0) 12622 12623 #define PIPE_CONF_CHECK_I(name) do { \ 12624 if (current_config->name != pipe_config->name) { \ 12625 pipe_config_mismatch(fastset, __stringify(name), \ 12626 "(expected %i, found %i)\n", \ 12627 current_config->name, \ 12628 pipe_config->name); \ 12629 ret = false; \ 12630 } \ 12631 } while (0) 12632 12633 #define PIPE_CONF_CHECK_BOOL(name) do { \ 12634 if (current_config->name != pipe_config->name) { \ 12635 pipe_config_mismatch(fastset, __stringify(name), \ 12636 "(expected %s, found %s)\n", \ 12637 yesno(current_config->name), \ 12638 yesno(pipe_config->name)); \ 12639 ret = false; \ 12640 } \ 12641 } while (0) 12642 12643 /* 12644 * Checks state where we only read out the enabling, but not the entire 12645 * state itself (like full infoframes or ELD for audio). These states 12646 * require a full modeset on bootup to fix up. 12647 */ 12648 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \ 12649 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \ 12650 PIPE_CONF_CHECK_BOOL(name); \ 12651 } else { \ 12652 pipe_config_mismatch(fastset, __stringify(name), \ 12653 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \ 12654 yesno(current_config->name), \ 12655 yesno(pipe_config->name)); \ 12656 ret = false; \ 12657 } \ 12658 } while (0) 12659 12660 #define PIPE_CONF_CHECK_P(name) do { \ 12661 if (current_config->name != pipe_config->name) { \ 12662 pipe_config_mismatch(fastset, __stringify(name), \ 12663 "(expected %p, found %p)\n", \ 12664 current_config->name, \ 12665 pipe_config->name); \ 12666 ret = false; \ 12667 } \ 12668 } while (0) 12669 12670 #define PIPE_CONF_CHECK_M_N(name) do { \ 12671 if (!intel_compare_link_m_n(¤t_config->name, \ 12672 &pipe_config->name,\ 12673 !fastset)) { \ 12674 pipe_config_mismatch(fastset, __stringify(name), \ 12675 "(expected tu %i gmch %i/%i link %i/%i, " \ 12676 "found tu %i, gmch %i/%i link %i/%i)\n", \ 12677 current_config->name.tu, \ 12678 current_config->name.gmch_m, \ 12679 current_config->name.gmch_n, \ 12680 current_config->name.link_m, \ 12681 current_config->name.link_n, \ 12682 pipe_config->name.tu, \ 12683 pipe_config->name.gmch_m, \ 12684 pipe_config->name.gmch_n, \ 12685 pipe_config->name.link_m, \ 12686 pipe_config->name.link_n); \ 12687 ret = false; \ 12688 } \ 12689 } while (0) 12690 12691 /* This is required for BDW+ where there is only one set of registers for 12692 * switching between high and low RR. 12693 * This macro can be used whenever a comparison has to be made between one 12694 * hw state and multiple sw state variables. 12695 */ 12696 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \ 12697 if (!intel_compare_link_m_n(¤t_config->name, \ 12698 &pipe_config->name, !fastset) && \ 12699 !intel_compare_link_m_n(¤t_config->alt_name, \ 12700 &pipe_config->name, !fastset)) { \ 12701 pipe_config_mismatch(fastset, __stringify(name), \ 12702 "(expected tu %i gmch %i/%i link %i/%i, " \ 12703 "or tu %i gmch %i/%i link %i/%i, " \ 12704 "found tu %i, gmch %i/%i link %i/%i)\n", \ 12705 current_config->name.tu, \ 12706 current_config->name.gmch_m, \ 12707 current_config->name.gmch_n, \ 12708 current_config->name.link_m, \ 12709 current_config->name.link_n, \ 12710 current_config->alt_name.tu, \ 12711 current_config->alt_name.gmch_m, \ 12712 current_config->alt_name.gmch_n, \ 12713 current_config->alt_name.link_m, \ 12714 current_config->alt_name.link_n, \ 12715 pipe_config->name.tu, \ 12716 pipe_config->name.gmch_m, \ 12717 pipe_config->name.gmch_n, \ 12718 pipe_config->name.link_m, \ 12719 pipe_config->name.link_n); \ 12720 ret = false; \ 12721 } \ 12722 } while (0) 12723 12724 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 12725 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 12726 pipe_config_mismatch(fastset, __stringify(name), \ 12727 "(%x) (expected %i, found %i)\n", \ 12728 (mask), \ 12729 current_config->name & (mask), \ 12730 pipe_config->name & (mask)); \ 12731 ret = false; \ 12732 } \ 12733 } while (0) 12734 12735 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \ 12736 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 12737 pipe_config_mismatch(fastset, __stringify(name), \ 12738 "(expected %i, found %i)\n", \ 12739 current_config->name, \ 12740 pipe_config->name); \ 12741 ret = false; \ 12742 } \ 12743 } while (0) 12744 12745 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 12746 if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 12747 &pipe_config->infoframes.name)) { \ 12748 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \ 12749 ¤t_config->infoframes.name, \ 12750 &pipe_config->infoframes.name); \ 12751 ret = false; \ 12752 } \ 12753 } while (0) 12754 12755 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \ 12756 if (current_config->name1 != pipe_config->name1) { \ 12757 pipe_config_mismatch(fastset, __stringify(name1), \ 12758 "(expected %i, found %i, won't compare lut values)\n", \ 12759 current_config->name1, \ 12760 pipe_config->name1); \ 12761 ret = false;\ 12762 } else { \ 12763 if (!intel_color_lut_equal(current_config->name2, \ 12764 pipe_config->name2, pipe_config->name1, \ 12765 bit_precision)) { \ 12766 pipe_config_mismatch(fastset, __stringify(name2), \ 12767 "hw_state doesn't match sw_state\n"); \ 12768 ret = false; \ 12769 } \ 12770 } \ 12771 } while (0) 12772 12773 #define PIPE_CONF_QUIRK(quirk) \ 12774 ((current_config->quirks | pipe_config->quirks) & (quirk)) 12775 12776 PIPE_CONF_CHECK_I(cpu_transcoder); 12777 12778 PIPE_CONF_CHECK_BOOL(has_pch_encoder); 12779 PIPE_CONF_CHECK_I(fdi_lanes); 12780 PIPE_CONF_CHECK_M_N(fdi_m_n); 12781 12782 PIPE_CONF_CHECK_I(lane_count); 12783 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 12784 12785 if (INTEL_GEN(dev_priv) < 8) { 12786 PIPE_CONF_CHECK_M_N(dp_m_n); 12787 12788 if (current_config->has_drrs) 12789 PIPE_CONF_CHECK_M_N(dp_m2_n2); 12790 } else 12791 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 12792 12793 PIPE_CONF_CHECK_X(output_types); 12794 12795 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); 12796 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); 12797 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start); 12798 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end); 12799 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start); 12800 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end); 12801 12802 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay); 12803 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal); 12804 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start); 12805 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end); 12806 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start); 12807 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); 12808 12809 PIPE_CONF_CHECK_I(pixel_multiplier); 12810 PIPE_CONF_CHECK_I(output_format); 12811 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 12812 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 12813 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 12814 PIPE_CONF_CHECK_BOOL(limited_color_range); 12815 12816 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 12817 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 12818 PIPE_CONF_CHECK_BOOL(has_infoframe); 12819 PIPE_CONF_CHECK_BOOL(fec_enable); 12820 12821 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); 12822 12823 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12824 DRM_MODE_FLAG_INTERLACE); 12825 12826 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 12827 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12828 DRM_MODE_FLAG_PHSYNC); 12829 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12830 DRM_MODE_FLAG_NHSYNC); 12831 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12832 DRM_MODE_FLAG_PVSYNC); 12833 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12834 DRM_MODE_FLAG_NVSYNC); 12835 } 12836 12837 PIPE_CONF_CHECK_X(gmch_pfit.control); 12838 /* pfit ratios are autocomputed by the hw on gen4+ */ 12839 if (INTEL_GEN(dev_priv) < 4) 12840 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 12841 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 12842 12843 /* 12844 * Changing the EDP transcoder input mux 12845 * (A_ONOFF vs. A_ON) requires a full modeset. 12846 */ 12847 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 12848 12849 if (!fastset) { 12850 PIPE_CONF_CHECK_I(pipe_src_w); 12851 PIPE_CONF_CHECK_I(pipe_src_h); 12852 12853 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 12854 if (current_config->pch_pfit.enabled) { 12855 PIPE_CONF_CHECK_X(pch_pfit.pos); 12856 PIPE_CONF_CHECK_X(pch_pfit.size); 12857 } 12858 12859 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 12860 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); 12861 12862 PIPE_CONF_CHECK_X(gamma_mode); 12863 if (IS_CHERRYVIEW(dev_priv)) 12864 PIPE_CONF_CHECK_X(cgm_mode); 12865 else 12866 PIPE_CONF_CHECK_X(csc_mode); 12867 PIPE_CONF_CHECK_BOOL(gamma_enable); 12868 PIPE_CONF_CHECK_BOOL(csc_enable); 12869 12870 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config); 12871 if (bp_gamma) 12872 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, base.gamma_lut, bp_gamma); 12873 12874 } 12875 12876 PIPE_CONF_CHECK_BOOL(double_wide); 12877 12878 PIPE_CONF_CHECK_P(shared_dpll); 12879 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 12880 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 12881 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 12882 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 12883 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 12884 PIPE_CONF_CHECK_X(dpll_hw_state.spll); 12885 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 12886 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 12887 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 12888 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0); 12889 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0); 12890 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4); 12891 PIPE_CONF_CHECK_X(dpll_hw_state.pll0); 12892 PIPE_CONF_CHECK_X(dpll_hw_state.pll1); 12893 PIPE_CONF_CHECK_X(dpll_hw_state.pll2); 12894 PIPE_CONF_CHECK_X(dpll_hw_state.pll3); 12895 PIPE_CONF_CHECK_X(dpll_hw_state.pll6); 12896 PIPE_CONF_CHECK_X(dpll_hw_state.pll8); 12897 PIPE_CONF_CHECK_X(dpll_hw_state.pll9); 12898 PIPE_CONF_CHECK_X(dpll_hw_state.pll10); 12899 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12); 12900 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl); 12901 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1); 12902 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl); 12903 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0); 12904 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1); 12905 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf); 12906 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock); 12907 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc); 12908 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias); 12909 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); 12910 12911 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 12912 PIPE_CONF_CHECK_X(dsi_pll.div); 12913 12914 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) 12915 PIPE_CONF_CHECK_I(pipe_bpp); 12916 12917 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock); 12918 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 12919 12920 PIPE_CONF_CHECK_I(min_voltage_level); 12921 12922 PIPE_CONF_CHECK_X(infoframes.enable); 12923 PIPE_CONF_CHECK_X(infoframes.gcp); 12924 PIPE_CONF_CHECK_INFOFRAME(avi); 12925 PIPE_CONF_CHECK_INFOFRAME(spd); 12926 PIPE_CONF_CHECK_INFOFRAME(hdmi); 12927 PIPE_CONF_CHECK_INFOFRAME(drm); 12928 12929 #undef PIPE_CONF_CHECK_X 12930 #undef PIPE_CONF_CHECK_I 12931 #undef PIPE_CONF_CHECK_BOOL 12932 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE 12933 #undef PIPE_CONF_CHECK_P 12934 #undef PIPE_CONF_CHECK_FLAGS 12935 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 12936 #undef PIPE_CONF_CHECK_COLOR_LUT 12937 #undef PIPE_CONF_QUIRK 12938 12939 return ret; 12940 } 12941 12942 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, 12943 const struct intel_crtc_state *pipe_config) 12944 { 12945 if (pipe_config->has_pch_encoder) { 12946 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 12947 &pipe_config->fdi_m_n); 12948 int dotclock = pipe_config->base.adjusted_mode.crtc_clock; 12949 12950 /* 12951 * FDI already provided one idea for the dotclock. 12952 * Yell if the encoder disagrees. 12953 */ 12954 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock), 12955 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 12956 fdi_dotclock, dotclock); 12957 } 12958 } 12959 12960 static void verify_wm_state(struct intel_crtc *crtc, 12961 struct intel_crtc_state *new_crtc_state) 12962 { 12963 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12964 struct skl_hw_state { 12965 struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; 12966 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES]; 12967 struct skl_ddb_allocation ddb; 12968 struct skl_pipe_wm wm; 12969 } *hw; 12970 struct skl_ddb_allocation *sw_ddb; 12971 struct skl_pipe_wm *sw_wm; 12972 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; 12973 const enum pipe pipe = crtc->pipe; 12974 int plane, level, max_level = ilk_wm_max_level(dev_priv); 12975 12976 if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->base.active) 12977 return; 12978 12979 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 12980 if (!hw) 12981 return; 12982 12983 skl_pipe_wm_get_hw_state(crtc, &hw->wm); 12984 sw_wm = &new_crtc_state->wm.skl.optimal; 12985 12986 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv); 12987 12988 skl_ddb_get_hw_state(dev_priv, &hw->ddb); 12989 sw_ddb = &dev_priv->wm.skl_hw.ddb; 12990 12991 if (INTEL_GEN(dev_priv) >= 11 && 12992 hw->ddb.enabled_slices != sw_ddb->enabled_slices) 12993 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n", 12994 sw_ddb->enabled_slices, 12995 hw->ddb.enabled_slices); 12996 12997 /* planes */ 12998 for_each_universal_plane(dev_priv, pipe, plane) { 12999 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 13000 13001 hw_plane_wm = &hw->wm.planes[plane]; 13002 sw_plane_wm = &sw_wm->planes[plane]; 13003 13004 /* Watermarks */ 13005 for (level = 0; level <= max_level; level++) { 13006 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 13007 &sw_plane_wm->wm[level])) 13008 continue; 13009 13010 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 13011 pipe_name(pipe), plane + 1, level, 13012 sw_plane_wm->wm[level].plane_en, 13013 sw_plane_wm->wm[level].plane_res_b, 13014 sw_plane_wm->wm[level].plane_res_l, 13015 hw_plane_wm->wm[level].plane_en, 13016 hw_plane_wm->wm[level].plane_res_b, 13017 hw_plane_wm->wm[level].plane_res_l); 13018 } 13019 13020 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 13021 &sw_plane_wm->trans_wm)) { 13022 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 13023 pipe_name(pipe), plane + 1, 13024 sw_plane_wm->trans_wm.plane_en, 13025 sw_plane_wm->trans_wm.plane_res_b, 13026 sw_plane_wm->trans_wm.plane_res_l, 13027 hw_plane_wm->trans_wm.plane_en, 13028 hw_plane_wm->trans_wm.plane_res_b, 13029 hw_plane_wm->trans_wm.plane_res_l); 13030 } 13031 13032 /* DDB */ 13033 hw_ddb_entry = &hw->ddb_y[plane]; 13034 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane]; 13035 13036 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 13037 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", 13038 pipe_name(pipe), plane + 1, 13039 sw_ddb_entry->start, sw_ddb_entry->end, 13040 hw_ddb_entry->start, hw_ddb_entry->end); 13041 } 13042 } 13043 13044 /* 13045 * cursor 13046 * If the cursor plane isn't active, we may not have updated it's ddb 13047 * allocation. In that case since the ddb allocation will be updated 13048 * once the plane becomes visible, we can skip this check 13049 */ 13050 if (1) { 13051 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 13052 13053 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR]; 13054 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR]; 13055 13056 /* Watermarks */ 13057 for (level = 0; level <= max_level; level++) { 13058 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 13059 &sw_plane_wm->wm[level])) 13060 continue; 13061 13062 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 13063 pipe_name(pipe), level, 13064 sw_plane_wm->wm[level].plane_en, 13065 sw_plane_wm->wm[level].plane_res_b, 13066 sw_plane_wm->wm[level].plane_res_l, 13067 hw_plane_wm->wm[level].plane_en, 13068 hw_plane_wm->wm[level].plane_res_b, 13069 hw_plane_wm->wm[level].plane_res_l); 13070 } 13071 13072 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 13073 &sw_plane_wm->trans_wm)) { 13074 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 13075 pipe_name(pipe), 13076 sw_plane_wm->trans_wm.plane_en, 13077 sw_plane_wm->trans_wm.plane_res_b, 13078 sw_plane_wm->trans_wm.plane_res_l, 13079 hw_plane_wm->trans_wm.plane_en, 13080 hw_plane_wm->trans_wm.plane_res_b, 13081 hw_plane_wm->trans_wm.plane_res_l); 13082 } 13083 13084 /* DDB */ 13085 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR]; 13086 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; 13087 13088 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 13089 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", 13090 pipe_name(pipe), 13091 sw_ddb_entry->start, sw_ddb_entry->end, 13092 hw_ddb_entry->start, hw_ddb_entry->end); 13093 } 13094 } 13095 13096 kfree(hw); 13097 } 13098 13099 static void 13100 verify_connector_state(struct intel_atomic_state *state, 13101 struct intel_crtc *crtc) 13102 { 13103 struct drm_connector *connector; 13104 struct drm_connector_state *new_conn_state; 13105 int i; 13106 13107 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { 13108 struct drm_encoder *encoder = connector->encoder; 13109 struct intel_crtc_state *crtc_state = NULL; 13110 13111 if (new_conn_state->crtc != &crtc->base) 13112 continue; 13113 13114 if (crtc) 13115 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 13116 13117 intel_connector_verify_state(crtc_state, new_conn_state); 13118 13119 I915_STATE_WARN(new_conn_state->best_encoder != encoder, 13120 "connector's atomic encoder doesn't match legacy encoder\n"); 13121 } 13122 } 13123 13124 static void 13125 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state) 13126 { 13127 struct intel_encoder *encoder; 13128 struct drm_connector *connector; 13129 struct drm_connector_state *old_conn_state, *new_conn_state; 13130 int i; 13131 13132 for_each_intel_encoder(&dev_priv->drm, encoder) { 13133 bool enabled = false, found = false; 13134 enum pipe pipe; 13135 13136 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 13137 encoder->base.base.id, 13138 encoder->base.name); 13139 13140 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state, 13141 new_conn_state, i) { 13142 if (old_conn_state->best_encoder == &encoder->base) 13143 found = true; 13144 13145 if (new_conn_state->best_encoder != &encoder->base) 13146 continue; 13147 found = enabled = true; 13148 13149 I915_STATE_WARN(new_conn_state->crtc != 13150 encoder->base.crtc, 13151 "connector's crtc doesn't match encoder crtc\n"); 13152 } 13153 13154 if (!found) 13155 continue; 13156 13157 I915_STATE_WARN(!!encoder->base.crtc != enabled, 13158 "encoder's enabled state mismatch " 13159 "(expected %i, found %i)\n", 13160 !!encoder->base.crtc, enabled); 13161 13162 if (!encoder->base.crtc) { 13163 bool active; 13164 13165 active = encoder->get_hw_state(encoder, &pipe); 13166 I915_STATE_WARN(active, 13167 "encoder detached but still enabled on pipe %c.\n", 13168 pipe_name(pipe)); 13169 } 13170 } 13171 } 13172 13173 static void 13174 verify_crtc_state(struct intel_crtc *crtc, 13175 struct intel_crtc_state *old_crtc_state, 13176 struct intel_crtc_state *new_crtc_state) 13177 { 13178 struct drm_device *dev = crtc->base.dev; 13179 struct drm_i915_private *dev_priv = to_i915(dev); 13180 struct intel_encoder *encoder; 13181 struct intel_crtc_state *pipe_config; 13182 struct drm_atomic_state *state; 13183 bool active; 13184 13185 state = old_crtc_state->base.state; 13186 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->base); 13187 pipe_config = old_crtc_state; 13188 memset(pipe_config, 0, sizeof(*pipe_config)); 13189 pipe_config->base.crtc = &crtc->base; 13190 pipe_config->base.state = state; 13191 13192 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name); 13193 13194 active = dev_priv->display.get_pipe_config(crtc, pipe_config); 13195 13196 /* we keep both pipes enabled on 830 */ 13197 if (IS_I830(dev_priv)) 13198 active = new_crtc_state->base.active; 13199 13200 I915_STATE_WARN(new_crtc_state->base.active != active, 13201 "crtc active state doesn't match with hw state " 13202 "(expected %i, found %i)\n", new_crtc_state->base.active, active); 13203 13204 I915_STATE_WARN(crtc->active != new_crtc_state->base.active, 13205 "transitional active state does not match atomic hw state " 13206 "(expected %i, found %i)\n", new_crtc_state->base.active, crtc->active); 13207 13208 for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 13209 enum pipe pipe; 13210 13211 active = encoder->get_hw_state(encoder, &pipe); 13212 I915_STATE_WARN(active != new_crtc_state->base.active, 13213 "[ENCODER:%i] active %i with crtc active %i\n", 13214 encoder->base.base.id, active, new_crtc_state->base.active); 13215 13216 I915_STATE_WARN(active && crtc->pipe != pipe, 13217 "Encoder connected to wrong pipe %c\n", 13218 pipe_name(pipe)); 13219 13220 if (active) 13221 encoder->get_config(encoder, pipe_config); 13222 } 13223 13224 intel_crtc_compute_pixel_rate(pipe_config); 13225 13226 if (!new_crtc_state->base.active) 13227 return; 13228 13229 intel_pipe_config_sanity_check(dev_priv, pipe_config); 13230 13231 if (!intel_pipe_config_compare(new_crtc_state, 13232 pipe_config, false)) { 13233 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 13234 intel_dump_pipe_config(pipe_config, NULL, "[hw state]"); 13235 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]"); 13236 } 13237 } 13238 13239 static void 13240 intel_verify_planes(struct intel_atomic_state *state) 13241 { 13242 struct intel_plane *plane; 13243 const struct intel_plane_state *plane_state; 13244 int i; 13245 13246 for_each_new_intel_plane_in_state(state, plane, 13247 plane_state, i) 13248 assert_plane(plane, plane_state->planar_slave || 13249 plane_state->base.visible); 13250 } 13251 13252 static void 13253 verify_single_dpll_state(struct drm_i915_private *dev_priv, 13254 struct intel_shared_dpll *pll, 13255 struct intel_crtc *crtc, 13256 struct intel_crtc_state *new_crtc_state) 13257 { 13258 struct intel_dpll_hw_state dpll_hw_state; 13259 unsigned int crtc_mask; 13260 bool active; 13261 13262 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 13263 13264 DRM_DEBUG_KMS("%s\n", pll->info->name); 13265 13266 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state); 13267 13268 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) { 13269 I915_STATE_WARN(!pll->on && pll->active_mask, 13270 "pll in active use but not on in sw tracking\n"); 13271 I915_STATE_WARN(pll->on && !pll->active_mask, 13272 "pll is on but not used by any active crtc\n"); 13273 I915_STATE_WARN(pll->on != active, 13274 "pll on state mismatch (expected %i, found %i)\n", 13275 pll->on, active); 13276 } 13277 13278 if (!crtc) { 13279 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask, 13280 "more active pll users than references: %x vs %x\n", 13281 pll->active_mask, pll->state.crtc_mask); 13282 13283 return; 13284 } 13285 13286 crtc_mask = drm_crtc_mask(&crtc->base); 13287 13288 if (new_crtc_state->base.active) 13289 I915_STATE_WARN(!(pll->active_mask & crtc_mask), 13290 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", 13291 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); 13292 else 13293 I915_STATE_WARN(pll->active_mask & crtc_mask, 13294 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", 13295 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); 13296 13297 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), 13298 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", 13299 crtc_mask, pll->state.crtc_mask); 13300 13301 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state, 13302 &dpll_hw_state, 13303 sizeof(dpll_hw_state)), 13304 "pll hw state mismatch\n"); 13305 } 13306 13307 static void 13308 verify_shared_dpll_state(struct intel_crtc *crtc, 13309 struct intel_crtc_state *old_crtc_state, 13310 struct intel_crtc_state *new_crtc_state) 13311 { 13312 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13313 13314 if (new_crtc_state->shared_dpll) 13315 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state); 13316 13317 if (old_crtc_state->shared_dpll && 13318 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) { 13319 unsigned int crtc_mask = drm_crtc_mask(&crtc->base); 13320 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll; 13321 13322 I915_STATE_WARN(pll->active_mask & crtc_mask, 13323 "pll active mismatch (didn't expect pipe %c in active mask)\n", 13324 pipe_name(drm_crtc_index(&crtc->base))); 13325 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, 13326 "pll enabled crtcs mismatch (found %x in enabled mask)\n", 13327 pipe_name(drm_crtc_index(&crtc->base))); 13328 } 13329 } 13330 13331 static void 13332 intel_modeset_verify_crtc(struct intel_crtc *crtc, 13333 struct intel_atomic_state *state, 13334 struct intel_crtc_state *old_crtc_state, 13335 struct intel_crtc_state *new_crtc_state) 13336 { 13337 if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) 13338 return; 13339 13340 verify_wm_state(crtc, new_crtc_state); 13341 verify_connector_state(state, crtc); 13342 verify_crtc_state(crtc, old_crtc_state, new_crtc_state); 13343 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state); 13344 } 13345 13346 static void 13347 verify_disabled_dpll_state(struct drm_i915_private *dev_priv) 13348 { 13349 int i; 13350 13351 for (i = 0; i < dev_priv->num_shared_dpll; i++) 13352 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL); 13353 } 13354 13355 static void 13356 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv, 13357 struct intel_atomic_state *state) 13358 { 13359 verify_encoder_state(dev_priv, state); 13360 verify_connector_state(state, NULL); 13361 verify_disabled_dpll_state(dev_priv); 13362 } 13363 13364 static void update_scanline_offset(const struct intel_crtc_state *crtc_state) 13365 { 13366 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 13367 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13368 13369 /* 13370 * The scanline counter increments at the leading edge of hsync. 13371 * 13372 * On most platforms it starts counting from vtotal-1 on the 13373 * first active line. That means the scanline counter value is 13374 * always one less than what we would expect. Ie. just after 13375 * start of vblank, which also occurs at start of hsync (on the 13376 * last active line), the scanline counter will read vblank_start-1. 13377 * 13378 * On gen2 the scanline counter starts counting from 1 instead 13379 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 13380 * to keep the value positive), instead of adding one. 13381 * 13382 * On HSW+ the behaviour of the scanline counter depends on the output 13383 * type. For DP ports it behaves like most other platforms, but on HDMI 13384 * there's an extra 1 line difference. So we need to add two instead of 13385 * one to the value. 13386 * 13387 * On VLV/CHV DSI the scanline counter would appear to increment 13388 * approx. 1/3 of a scanline before start of vblank. Unfortunately 13389 * that means we can't tell whether we're in vblank or not while 13390 * we're on that particular line. We must still set scanline_offset 13391 * to 1 so that the vblank timestamps come out correct when we query 13392 * the scanline counter from within the vblank interrupt handler. 13393 * However if queried just before the start of vblank we'll get an 13394 * answer that's slightly in the future. 13395 */ 13396 if (IS_GEN(dev_priv, 2)) { 13397 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; 13398 int vtotal; 13399 13400 vtotal = adjusted_mode->crtc_vtotal; 13401 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 13402 vtotal /= 2; 13403 13404 crtc->scanline_offset = vtotal - 1; 13405 } else if (HAS_DDI(dev_priv) && 13406 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 13407 crtc->scanline_offset = 2; 13408 } else 13409 crtc->scanline_offset = 1; 13410 } 13411 13412 static void intel_modeset_clear_plls(struct intel_atomic_state *state) 13413 { 13414 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 13415 struct intel_crtc_state *new_crtc_state; 13416 struct intel_crtc *crtc; 13417 int i; 13418 13419 if (!dev_priv->display.crtc_compute_clock) 13420 return; 13421 13422 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 13423 if (!needs_modeset(new_crtc_state)) 13424 continue; 13425 13426 intel_release_shared_dplls(state, crtc); 13427 } 13428 } 13429 13430 /* 13431 * This implements the workaround described in the "notes" section of the mode 13432 * set sequence documentation. When going from no pipes or single pipe to 13433 * multiple pipes, and planes are enabled after the pipe, we need to wait at 13434 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 13435 */ 13436 static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state) 13437 { 13438 struct intel_crtc_state *crtc_state; 13439 struct intel_crtc *crtc; 13440 struct intel_crtc_state *first_crtc_state = NULL; 13441 struct intel_crtc_state *other_crtc_state = NULL; 13442 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 13443 int i; 13444 13445 /* look at all crtc's that are going to be enabled in during modeset */ 13446 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 13447 if (!crtc_state->base.active || 13448 !needs_modeset(crtc_state)) 13449 continue; 13450 13451 if (first_crtc_state) { 13452 other_crtc_state = crtc_state; 13453 break; 13454 } else { 13455 first_crtc_state = crtc_state; 13456 first_pipe = crtc->pipe; 13457 } 13458 } 13459 13460 /* No workaround needed? */ 13461 if (!first_crtc_state) 13462 return 0; 13463 13464 /* w/a possibly needed, check how many crtc's are already enabled. */ 13465 for_each_intel_crtc(state->base.dev, crtc) { 13466 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 13467 if (IS_ERR(crtc_state)) 13468 return PTR_ERR(crtc_state); 13469 13470 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 13471 13472 if (!crtc_state->base.active || 13473 needs_modeset(crtc_state)) 13474 continue; 13475 13476 /* 2 or more enabled crtcs means no need for w/a */ 13477 if (enabled_pipe != INVALID_PIPE) 13478 return 0; 13479 13480 enabled_pipe = crtc->pipe; 13481 } 13482 13483 if (enabled_pipe != INVALID_PIPE) 13484 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 13485 else if (other_crtc_state) 13486 other_crtc_state->hsw_workaround_pipe = first_pipe; 13487 13488 return 0; 13489 } 13490 13491 static int intel_modeset_checks(struct intel_atomic_state *state) 13492 { 13493 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 13494 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 13495 struct intel_crtc *crtc; 13496 int ret, i; 13497 13498 if (!check_digital_port_conflicts(state)) { 13499 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); 13500 return -EINVAL; 13501 } 13502 13503 /* keep the current setting */ 13504 if (!state->cdclk.force_min_cdclk_changed) 13505 state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk; 13506 13507 state->modeset = true; 13508 state->active_pipes = dev_priv->active_pipes; 13509 state->cdclk.logical = dev_priv->cdclk.logical; 13510 state->cdclk.actual = dev_priv->cdclk.actual; 13511 state->cdclk.pipe = INVALID_PIPE; 13512 13513 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13514 new_crtc_state, i) { 13515 if (new_crtc_state->base.active) 13516 state->active_pipes |= BIT(crtc->pipe); 13517 else 13518 state->active_pipes &= ~BIT(crtc->pipe); 13519 13520 if (old_crtc_state->base.active != new_crtc_state->base.active) 13521 state->active_pipe_changes |= BIT(crtc->pipe); 13522 } 13523 13524 ret = intel_modeset_calc_cdclk(state); 13525 if (ret) 13526 return ret; 13527 13528 intel_modeset_clear_plls(state); 13529 13530 if (IS_HASWELL(dev_priv)) 13531 return haswell_mode_set_planes_workaround(state); 13532 13533 return 0; 13534 } 13535 13536 /* 13537 * Handle calculation of various watermark data at the end of the atomic check 13538 * phase. The code here should be run after the per-crtc and per-plane 'check' 13539 * handlers to ensure that all derived state has been updated. 13540 */ 13541 static int calc_watermark_data(struct intel_atomic_state *state) 13542 { 13543 struct drm_device *dev = state->base.dev; 13544 struct drm_i915_private *dev_priv = to_i915(dev); 13545 13546 /* Is there platform-specific watermark information to calculate? */ 13547 if (dev_priv->display.compute_global_watermarks) 13548 return dev_priv->display.compute_global_watermarks(state); 13549 13550 return 0; 13551 } 13552 13553 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 13554 struct intel_crtc_state *new_crtc_state) 13555 { 13556 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) 13557 return; 13558 13559 new_crtc_state->base.mode_changed = false; 13560 new_crtc_state->update_pipe = true; 13561 13562 /* 13563 * If we're not doing the full modeset we want to 13564 * keep the current M/N values as they may be 13565 * sufficiently different to the computed values 13566 * to cause problems. 13567 * 13568 * FIXME: should really copy more fuzzy state here 13569 */ 13570 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n; 13571 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n; 13572 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2; 13573 new_crtc_state->has_drrs = old_crtc_state->has_drrs; 13574 } 13575 13576 /** 13577 * intel_atomic_check - validate state object 13578 * @dev: drm device 13579 * @_state: state to validate 13580 */ 13581 static int intel_atomic_check(struct drm_device *dev, 13582 struct drm_atomic_state *_state) 13583 { 13584 struct drm_i915_private *dev_priv = to_i915(dev); 13585 struct intel_atomic_state *state = to_intel_atomic_state(_state); 13586 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 13587 struct intel_crtc *crtc; 13588 int ret, i; 13589 bool any_ms = state->cdclk.force_min_cdclk_changed; 13590 13591 /* Catch I915_MODE_FLAG_INHERITED */ 13592 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13593 new_crtc_state, i) { 13594 if (new_crtc_state->base.mode.private_flags != 13595 old_crtc_state->base.mode.private_flags) 13596 new_crtc_state->base.mode_changed = true; 13597 } 13598 13599 ret = drm_atomic_helper_check_modeset(dev, &state->base); 13600 if (ret) 13601 goto fail; 13602 13603 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13604 new_crtc_state, i) { 13605 if (!needs_modeset(new_crtc_state)) 13606 continue; 13607 13608 if (!new_crtc_state->base.enable) { 13609 any_ms = true; 13610 continue; 13611 } 13612 13613 ret = intel_modeset_pipe_config(new_crtc_state); 13614 if (ret) 13615 goto fail; 13616 13617 intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 13618 13619 if (needs_modeset(new_crtc_state)) 13620 any_ms = true; 13621 } 13622 13623 ret = drm_dp_mst_atomic_check(&state->base); 13624 if (ret) 13625 goto fail; 13626 13627 if (any_ms) { 13628 ret = intel_modeset_checks(state); 13629 if (ret) 13630 goto fail; 13631 } else { 13632 state->cdclk.logical = dev_priv->cdclk.logical; 13633 } 13634 13635 ret = icl_add_linked_planes(state); 13636 if (ret) 13637 goto fail; 13638 13639 ret = drm_atomic_helper_check_planes(dev, &state->base); 13640 if (ret) 13641 goto fail; 13642 13643 intel_fbc_choose_crtc(dev_priv, state); 13644 ret = calc_watermark_data(state); 13645 if (ret) 13646 goto fail; 13647 13648 ret = intel_bw_atomic_check(state); 13649 if (ret) 13650 goto fail; 13651 13652 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13653 new_crtc_state, i) { 13654 if (!needs_modeset(new_crtc_state) && 13655 !new_crtc_state->update_pipe) 13656 continue; 13657 13658 intel_dump_pipe_config(new_crtc_state, state, 13659 needs_modeset(new_crtc_state) ? 13660 "[modeset]" : "[fastset]"); 13661 } 13662 13663 return 0; 13664 13665 fail: 13666 if (ret == -EDEADLK) 13667 return ret; 13668 13669 /* 13670 * FIXME would probably be nice to know which crtc specifically 13671 * caused the failure, in cases where we can pinpoint it. 13672 */ 13673 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13674 new_crtc_state, i) 13675 intel_dump_pipe_config(new_crtc_state, state, "[failed]"); 13676 13677 return ret; 13678 } 13679 13680 static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 13681 { 13682 return drm_atomic_helper_prepare_planes(state->base.dev, 13683 &state->base); 13684 } 13685 13686 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) 13687 { 13688 struct drm_device *dev = crtc->base.dev; 13689 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)]; 13690 13691 if (!vblank->max_vblank_count) 13692 return (u32)drm_crtc_accurate_vblank_count(&crtc->base); 13693 13694 return crtc->base.funcs->get_vblank_counter(&crtc->base); 13695 } 13696 13697 static void intel_update_crtc(struct intel_crtc *crtc, 13698 struct intel_atomic_state *state, 13699 struct intel_crtc_state *old_crtc_state, 13700 struct intel_crtc_state *new_crtc_state) 13701 { 13702 struct drm_device *dev = state->base.dev; 13703 struct drm_i915_private *dev_priv = to_i915(dev); 13704 bool modeset = needs_modeset(new_crtc_state); 13705 struct intel_plane_state *new_plane_state = 13706 intel_atomic_get_new_plane_state(state, 13707 to_intel_plane(crtc->base.primary)); 13708 13709 if (modeset) { 13710 update_scanline_offset(new_crtc_state); 13711 dev_priv->display.crtc_enable(new_crtc_state, state); 13712 13713 /* vblanks work again, re-enable pipe CRC. */ 13714 intel_crtc_enable_pipe_crc(crtc); 13715 } else { 13716 intel_pre_plane_update(old_crtc_state, new_crtc_state); 13717 13718 if (new_crtc_state->update_pipe) 13719 intel_encoders_update_pipe(crtc, new_crtc_state, state); 13720 } 13721 13722 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) 13723 intel_fbc_disable(crtc); 13724 else if (new_plane_state) 13725 intel_fbc_enable(crtc, new_crtc_state, new_plane_state); 13726 13727 intel_begin_crtc_commit(state, crtc); 13728 13729 if (INTEL_GEN(dev_priv) >= 9) 13730 skl_update_planes_on_crtc(state, crtc); 13731 else 13732 i9xx_update_planes_on_crtc(state, crtc); 13733 13734 intel_finish_crtc_commit(state, crtc); 13735 } 13736 13737 static void intel_old_crtc_state_disables(struct intel_atomic_state *state, 13738 struct intel_crtc_state *old_crtc_state, 13739 struct intel_crtc_state *new_crtc_state, 13740 struct intel_crtc *crtc) 13741 { 13742 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 13743 13744 intel_crtc_disable_planes(state, crtc); 13745 13746 /* 13747 * We need to disable pipe CRC before disabling the pipe, 13748 * or we race against vblank off. 13749 */ 13750 intel_crtc_disable_pipe_crc(crtc); 13751 13752 dev_priv->display.crtc_disable(old_crtc_state, state); 13753 crtc->active = false; 13754 intel_fbc_disable(crtc); 13755 intel_disable_shared_dpll(old_crtc_state); 13756 13757 /* 13758 * Underruns don't always raise interrupts, 13759 * so check manually. 13760 */ 13761 intel_check_cpu_fifo_underruns(dev_priv); 13762 intel_check_pch_fifo_underruns(dev_priv); 13763 13764 /* FIXME unify this for all platforms */ 13765 if (!new_crtc_state->base.active && 13766 !HAS_GMCH(dev_priv) && 13767 dev_priv->display.initial_watermarks) 13768 dev_priv->display.initial_watermarks(state, 13769 new_crtc_state); 13770 } 13771 13772 static void intel_commit_modeset_disables(struct intel_atomic_state *state) 13773 { 13774 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 13775 struct intel_crtc *crtc; 13776 int i; 13777 13778 /* 13779 * Disable CRTC/pipes in reverse order because some features(MST in 13780 * TGL+) requires master and slave relationship between pipes, so it 13781 * should always pick the lowest pipe as master as it will be enabled 13782 * first and disable in the reverse order so the master will be the 13783 * last one to be disabled. 13784 */ 13785 for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state, 13786 new_crtc_state, i) { 13787 if (!needs_modeset(new_crtc_state)) 13788 continue; 13789 13790 intel_pre_plane_update(old_crtc_state, new_crtc_state); 13791 13792 if (old_crtc_state->base.active) 13793 intel_old_crtc_state_disables(state, 13794 old_crtc_state, 13795 new_crtc_state, 13796 crtc); 13797 } 13798 } 13799 13800 static void intel_commit_modeset_enables(struct intel_atomic_state *state) 13801 { 13802 struct intel_crtc *crtc; 13803 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 13804 int i; 13805 13806 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 13807 if (!new_crtc_state->base.active) 13808 continue; 13809 13810 intel_update_crtc(crtc, state, old_crtc_state, 13811 new_crtc_state); 13812 } 13813 } 13814 13815 static void skl_commit_modeset_enables(struct intel_atomic_state *state) 13816 { 13817 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 13818 struct intel_crtc *crtc; 13819 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 13820 unsigned int updated = 0; 13821 bool progress; 13822 enum pipe pipe; 13823 int i; 13824 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; 13825 u8 required_slices = state->wm_results.ddb.enabled_slices; 13826 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 13827 13828 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) 13829 /* ignore allocations for crtc's that have been turned off. */ 13830 if (new_crtc_state->base.active) 13831 entries[i] = old_crtc_state->wm.skl.ddb; 13832 13833 /* If 2nd DBuf slice required, enable it here */ 13834 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices) 13835 icl_dbuf_slices_update(dev_priv, required_slices); 13836 13837 /* 13838 * Whenever the number of active pipes changes, we need to make sure we 13839 * update the pipes in the right order so that their ddb allocations 13840 * never overlap with eachother inbetween CRTC updates. Otherwise we'll 13841 * cause pipe underruns and other bad stuff. 13842 */ 13843 do { 13844 progress = false; 13845 13846 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 13847 bool vbl_wait = false; 13848 unsigned int cmask = drm_crtc_mask(&crtc->base); 13849 13850 pipe = crtc->pipe; 13851 13852 if (updated & cmask || !new_crtc_state->base.active) 13853 continue; 13854 13855 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 13856 entries, 13857 INTEL_NUM_PIPES(dev_priv), i)) 13858 continue; 13859 13860 updated |= cmask; 13861 entries[i] = new_crtc_state->wm.skl.ddb; 13862 13863 /* 13864 * If this is an already active pipe, it's DDB changed, 13865 * and this isn't the last pipe that needs updating 13866 * then we need to wait for a vblank to pass for the 13867 * new ddb allocation to take effect. 13868 */ 13869 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 13870 &old_crtc_state->wm.skl.ddb) && 13871 !new_crtc_state->base.active_changed && 13872 state->wm_results.dirty_pipes != updated) 13873 vbl_wait = true; 13874 13875 intel_update_crtc(crtc, state, old_crtc_state, 13876 new_crtc_state); 13877 13878 if (vbl_wait) 13879 intel_wait_for_vblank(dev_priv, pipe); 13880 13881 progress = true; 13882 } 13883 } while (progress); 13884 13885 /* If 2nd DBuf slice is no more required disable it */ 13886 if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices) 13887 icl_dbuf_slices_update(dev_priv, required_slices); 13888 } 13889 13890 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) 13891 { 13892 struct intel_atomic_state *state, *next; 13893 struct llist_node *freed; 13894 13895 freed = llist_del_all(&dev_priv->atomic_helper.free_list); 13896 llist_for_each_entry_safe(state, next, freed, freed) 13897 drm_atomic_state_put(&state->base); 13898 } 13899 13900 static void intel_atomic_helper_free_state_worker(struct work_struct *work) 13901 { 13902 struct drm_i915_private *dev_priv = 13903 container_of(work, typeof(*dev_priv), atomic_helper.free_work); 13904 13905 intel_atomic_helper_free_state(dev_priv); 13906 } 13907 13908 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 13909 { 13910 struct wait_queue_entry wait_fence, wait_reset; 13911 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); 13912 13913 init_wait_entry(&wait_fence, 0); 13914 init_wait_entry(&wait_reset, 0); 13915 for (;;) { 13916 prepare_to_wait(&intel_state->commit_ready.wait, 13917 &wait_fence, TASK_UNINTERRUPTIBLE); 13918 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags, 13919 I915_RESET_MODESET), 13920 &wait_reset, TASK_UNINTERRUPTIBLE); 13921 13922 13923 if (i915_sw_fence_done(&intel_state->commit_ready) || 13924 test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) 13925 break; 13926 13927 schedule(); 13928 } 13929 finish_wait(&intel_state->commit_ready.wait, &wait_fence); 13930 finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags, 13931 I915_RESET_MODESET), 13932 &wait_reset); 13933 } 13934 13935 static void intel_atomic_cleanup_work(struct work_struct *work) 13936 { 13937 struct drm_atomic_state *state = 13938 container_of(work, struct drm_atomic_state, commit_work); 13939 struct drm_i915_private *i915 = to_i915(state->dev); 13940 13941 drm_atomic_helper_cleanup_planes(&i915->drm, state); 13942 drm_atomic_helper_commit_cleanup_done(state); 13943 drm_atomic_state_put(state); 13944 13945 intel_atomic_helper_free_state(i915); 13946 } 13947 13948 static void intel_atomic_commit_tail(struct intel_atomic_state *state) 13949 { 13950 struct drm_device *dev = state->base.dev; 13951 struct drm_i915_private *dev_priv = to_i915(dev); 13952 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 13953 struct intel_crtc *crtc; 13954 u64 put_domains[I915_MAX_PIPES] = {}; 13955 intel_wakeref_t wakeref = 0; 13956 int i; 13957 13958 intel_atomic_commit_fence_wait(state); 13959 13960 drm_atomic_helper_wait_for_dependencies(&state->base); 13961 13962 if (state->modeset) 13963 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 13964 13965 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 13966 new_crtc_state, i) { 13967 if (needs_modeset(new_crtc_state) || 13968 new_crtc_state->update_pipe) { 13969 13970 put_domains[crtc->pipe] = 13971 modeset_get_crtc_power_domains(new_crtc_state); 13972 } 13973 } 13974 13975 intel_commit_modeset_disables(state); 13976 13977 /* FIXME: Eventually get rid of our crtc->config pointer */ 13978 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 13979 crtc->config = new_crtc_state; 13980 13981 if (state->modeset) { 13982 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); 13983 13984 intel_set_cdclk_pre_plane_update(dev_priv, 13985 &state->cdclk.actual, 13986 &dev_priv->cdclk.actual, 13987 state->cdclk.pipe); 13988 13989 /* 13990 * SKL workaround: bspec recommends we disable the SAGV when we 13991 * have more then one pipe enabled 13992 */ 13993 if (!intel_can_enable_sagv(state)) 13994 intel_disable_sagv(dev_priv); 13995 13996 intel_modeset_verify_disabled(dev_priv, state); 13997 } 13998 13999 /* Complete the events for pipes that have now been disabled */ 14000 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14001 bool modeset = needs_modeset(new_crtc_state); 14002 14003 /* Complete events for now disable pipes here. */ 14004 if (modeset && !new_crtc_state->base.active && new_crtc_state->base.event) { 14005 spin_lock_irq(&dev->event_lock); 14006 drm_crtc_send_vblank_event(&crtc->base, new_crtc_state->base.event); 14007 spin_unlock_irq(&dev->event_lock); 14008 14009 new_crtc_state->base.event = NULL; 14010 } 14011 } 14012 14013 if (state->modeset) 14014 intel_encoders_update_prepare(state); 14015 14016 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 14017 dev_priv->display.commit_modeset_enables(state); 14018 14019 if (state->modeset) { 14020 intel_encoders_update_complete(state); 14021 14022 intel_set_cdclk_post_plane_update(dev_priv, 14023 &state->cdclk.actual, 14024 &dev_priv->cdclk.actual, 14025 state->cdclk.pipe); 14026 } 14027 14028 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 14029 * already, but still need the state for the delayed optimization. To 14030 * fix this: 14031 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 14032 * - schedule that vblank worker _before_ calling hw_done 14033 * - at the start of commit_tail, cancel it _synchrously 14034 * - switch over to the vblank wait helper in the core after that since 14035 * we don't need out special handling any more. 14036 */ 14037 drm_atomic_helper_wait_for_flip_done(dev, &state->base); 14038 14039 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14040 if (new_crtc_state->base.active && 14041 !needs_modeset(new_crtc_state) && 14042 (new_crtc_state->base.color_mgmt_changed || 14043 new_crtc_state->update_pipe)) 14044 intel_color_load_luts(new_crtc_state); 14045 } 14046 14047 /* 14048 * Now that the vblank has passed, we can go ahead and program the 14049 * optimal watermarks on platforms that need two-step watermark 14050 * programming. 14051 * 14052 * TODO: Move this (and other cleanup) to an async worker eventually. 14053 */ 14054 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14055 if (dev_priv->display.optimize_watermarks) 14056 dev_priv->display.optimize_watermarks(state, 14057 new_crtc_state); 14058 } 14059 14060 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 14061 intel_post_plane_update(old_crtc_state); 14062 14063 if (put_domains[i]) 14064 modeset_put_power_domains(dev_priv, put_domains[i]); 14065 14066 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); 14067 } 14068 14069 if (state->modeset) 14070 intel_verify_planes(state); 14071 14072 if (state->modeset && intel_can_enable_sagv(state)) 14073 intel_enable_sagv(dev_priv); 14074 14075 drm_atomic_helper_commit_hw_done(&state->base); 14076 14077 if (state->modeset) { 14078 /* As one of the primary mmio accessors, KMS has a high 14079 * likelihood of triggering bugs in unclaimed access. After we 14080 * finish modesetting, see if an error has been flagged, and if 14081 * so enable debugging for the next modeset - and hope we catch 14082 * the culprit. 14083 */ 14084 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 14085 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); 14086 } 14087 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 14088 14089 /* 14090 * Defer the cleanup of the old state to a separate worker to not 14091 * impede the current task (userspace for blocking modesets) that 14092 * are executed inline. For out-of-line asynchronous modesets/flips, 14093 * deferring to a new worker seems overkill, but we would place a 14094 * schedule point (cond_resched()) here anyway to keep latencies 14095 * down. 14096 */ 14097 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work); 14098 queue_work(system_highpri_wq, &state->base.commit_work); 14099 } 14100 14101 static void intel_atomic_commit_work(struct work_struct *work) 14102 { 14103 struct intel_atomic_state *state = 14104 container_of(work, struct intel_atomic_state, base.commit_work); 14105 14106 intel_atomic_commit_tail(state); 14107 } 14108 14109 static int __i915_sw_fence_call 14110 intel_atomic_commit_ready(struct i915_sw_fence *fence, 14111 enum i915_sw_fence_notify notify) 14112 { 14113 struct intel_atomic_state *state = 14114 container_of(fence, struct intel_atomic_state, commit_ready); 14115 14116 switch (notify) { 14117 case FENCE_COMPLETE: 14118 /* we do blocking waits in the worker, nothing to do here */ 14119 break; 14120 case FENCE_FREE: 14121 { 14122 struct intel_atomic_helper *helper = 14123 &to_i915(state->base.dev)->atomic_helper; 14124 14125 if (llist_add(&state->freed, &helper->free_list)) 14126 schedule_work(&helper->free_work); 14127 break; 14128 } 14129 } 14130 14131 return NOTIFY_DONE; 14132 } 14133 14134 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 14135 { 14136 struct intel_plane_state *old_plane_state, *new_plane_state; 14137 struct intel_plane *plane; 14138 int i; 14139 14140 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 14141 new_plane_state, i) 14142 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb), 14143 to_intel_frontbuffer(new_plane_state->base.fb), 14144 plane->frontbuffer_bit); 14145 } 14146 14147 static int intel_atomic_commit(struct drm_device *dev, 14148 struct drm_atomic_state *_state, 14149 bool nonblock) 14150 { 14151 struct intel_atomic_state *state = to_intel_atomic_state(_state); 14152 struct drm_i915_private *dev_priv = to_i915(dev); 14153 int ret = 0; 14154 14155 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 14156 14157 drm_atomic_state_get(&state->base); 14158 i915_sw_fence_init(&state->commit_ready, 14159 intel_atomic_commit_ready); 14160 14161 /* 14162 * The intel_legacy_cursor_update() fast path takes care 14163 * of avoiding the vblank waits for simple cursor 14164 * movement and flips. For cursor on/off and size changes, 14165 * we want to perform the vblank waits so that watermark 14166 * updates happen during the correct frames. Gen9+ have 14167 * double buffered watermarks and so shouldn't need this. 14168 * 14169 * Unset state->legacy_cursor_update before the call to 14170 * drm_atomic_helper_setup_commit() because otherwise 14171 * drm_atomic_helper_wait_for_flip_done() is a noop and 14172 * we get FIFO underruns because we didn't wait 14173 * for vblank. 14174 * 14175 * FIXME doing watermarks and fb cleanup from a vblank worker 14176 * (assuming we had any) would solve these problems. 14177 */ 14178 if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) { 14179 struct intel_crtc_state *new_crtc_state; 14180 struct intel_crtc *crtc; 14181 int i; 14182 14183 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 14184 if (new_crtc_state->wm.need_postvbl_update || 14185 new_crtc_state->update_wm_post) 14186 state->base.legacy_cursor_update = false; 14187 } 14188 14189 ret = intel_atomic_prepare_commit(state); 14190 if (ret) { 14191 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); 14192 i915_sw_fence_commit(&state->commit_ready); 14193 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 14194 return ret; 14195 } 14196 14197 ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 14198 if (!ret) 14199 ret = drm_atomic_helper_swap_state(&state->base, true); 14200 14201 if (ret) { 14202 i915_sw_fence_commit(&state->commit_ready); 14203 14204 drm_atomic_helper_cleanup_planes(dev, &state->base); 14205 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 14206 return ret; 14207 } 14208 dev_priv->wm.distrust_bios_wm = false; 14209 intel_shared_dpll_swap_state(state); 14210 intel_atomic_track_fbs(state); 14211 14212 if (state->modeset) { 14213 memcpy(dev_priv->min_cdclk, state->min_cdclk, 14214 sizeof(state->min_cdclk)); 14215 memcpy(dev_priv->min_voltage_level, state->min_voltage_level, 14216 sizeof(state->min_voltage_level)); 14217 dev_priv->active_pipes = state->active_pipes; 14218 dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk; 14219 14220 intel_cdclk_swap_state(state); 14221 } 14222 14223 drm_atomic_state_get(&state->base); 14224 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 14225 14226 i915_sw_fence_commit(&state->commit_ready); 14227 if (nonblock && state->modeset) { 14228 queue_work(dev_priv->modeset_wq, &state->base.commit_work); 14229 } else if (nonblock) { 14230 queue_work(dev_priv->flip_wq, &state->base.commit_work); 14231 } else { 14232 if (state->modeset) 14233 flush_workqueue(dev_priv->modeset_wq); 14234 intel_atomic_commit_tail(state); 14235 } 14236 14237 return 0; 14238 } 14239 14240 struct wait_rps_boost { 14241 struct wait_queue_entry wait; 14242 14243 struct drm_crtc *crtc; 14244 struct i915_request *request; 14245 }; 14246 14247 static int do_rps_boost(struct wait_queue_entry *_wait, 14248 unsigned mode, int sync, void *key) 14249 { 14250 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); 14251 struct i915_request *rq = wait->request; 14252 14253 /* 14254 * If we missed the vblank, but the request is already running it 14255 * is reasonable to assume that it will complete before the next 14256 * vblank without our intervention, so leave RPS alone. 14257 */ 14258 if (!i915_request_started(rq)) 14259 gen6_rps_boost(rq); 14260 i915_request_put(rq); 14261 14262 drm_crtc_vblank_put(wait->crtc); 14263 14264 list_del(&wait->wait.entry); 14265 kfree(wait); 14266 return 1; 14267 } 14268 14269 static void add_rps_boost_after_vblank(struct drm_crtc *crtc, 14270 struct dma_fence *fence) 14271 { 14272 struct wait_rps_boost *wait; 14273 14274 if (!dma_fence_is_i915(fence)) 14275 return; 14276 14277 if (INTEL_GEN(to_i915(crtc->dev)) < 6) 14278 return; 14279 14280 if (drm_crtc_vblank_get(crtc)) 14281 return; 14282 14283 wait = kmalloc(sizeof(*wait), GFP_KERNEL); 14284 if (!wait) { 14285 drm_crtc_vblank_put(crtc); 14286 return; 14287 } 14288 14289 wait->request = to_request(dma_fence_get(fence)); 14290 wait->crtc = crtc; 14291 14292 wait->wait.func = do_rps_boost; 14293 wait->wait.flags = 0; 14294 14295 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); 14296 } 14297 14298 static int intel_plane_pin_fb(struct intel_plane_state *plane_state) 14299 { 14300 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 14301 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 14302 struct drm_framebuffer *fb = plane_state->base.fb; 14303 struct i915_vma *vma; 14304 14305 if (plane->id == PLANE_CURSOR && 14306 INTEL_INFO(dev_priv)->display.cursor_needs_physical) { 14307 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 14308 const int align = intel_cursor_alignment(dev_priv); 14309 int err; 14310 14311 err = i915_gem_object_attach_phys(obj, align); 14312 if (err) 14313 return err; 14314 } 14315 14316 vma = intel_pin_and_fence_fb_obj(fb, 14317 &plane_state->view, 14318 intel_plane_uses_fence(plane_state), 14319 &plane_state->flags); 14320 if (IS_ERR(vma)) 14321 return PTR_ERR(vma); 14322 14323 plane_state->vma = vma; 14324 14325 return 0; 14326 } 14327 14328 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) 14329 { 14330 struct i915_vma *vma; 14331 14332 vma = fetch_and_zero(&old_plane_state->vma); 14333 if (vma) 14334 intel_unpin_fb_vma(vma, old_plane_state->flags); 14335 } 14336 14337 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj) 14338 { 14339 struct i915_sched_attr attr = { 14340 .priority = I915_PRIORITY_DISPLAY, 14341 }; 14342 14343 i915_gem_object_wait_priority(obj, 0, &attr); 14344 } 14345 14346 /** 14347 * intel_prepare_plane_fb - Prepare fb for usage on plane 14348 * @plane: drm plane to prepare for 14349 * @new_state: the plane state being prepared 14350 * 14351 * Prepares a framebuffer for usage on a display plane. Generally this 14352 * involves pinning the underlying object and updating the frontbuffer tracking 14353 * bits. Some older platforms need special physical address handling for 14354 * cursor planes. 14355 * 14356 * Returns 0 on success, negative error code on failure. 14357 */ 14358 int 14359 intel_prepare_plane_fb(struct drm_plane *plane, 14360 struct drm_plane_state *new_state) 14361 { 14362 struct intel_atomic_state *intel_state = 14363 to_intel_atomic_state(new_state->state); 14364 struct drm_i915_private *dev_priv = to_i915(plane->dev); 14365 struct drm_framebuffer *fb = new_state->fb; 14366 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 14367 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); 14368 int ret; 14369 14370 if (old_obj) { 14371 struct intel_crtc_state *crtc_state = 14372 intel_atomic_get_new_crtc_state(intel_state, 14373 to_intel_crtc(plane->state->crtc)); 14374 14375 /* Big Hammer, we also need to ensure that any pending 14376 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 14377 * current scanout is retired before unpinning the old 14378 * framebuffer. Note that we rely on userspace rendering 14379 * into the buffer attached to the pipe they are waiting 14380 * on. If not, userspace generates a GPU hang with IPEHR 14381 * point to the MI_WAIT_FOR_EVENT. 14382 * 14383 * This should only fail upon a hung GPU, in which case we 14384 * can safely continue. 14385 */ 14386 if (needs_modeset(crtc_state)) { 14387 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, 14388 old_obj->base.resv, NULL, 14389 false, 0, 14390 GFP_KERNEL); 14391 if (ret < 0) 14392 return ret; 14393 } 14394 } 14395 14396 if (new_state->fence) { /* explicit fencing */ 14397 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready, 14398 new_state->fence, 14399 I915_FENCE_TIMEOUT, 14400 GFP_KERNEL); 14401 if (ret < 0) 14402 return ret; 14403 } 14404 14405 if (!obj) 14406 return 0; 14407 14408 ret = i915_gem_object_pin_pages(obj); 14409 if (ret) 14410 return ret; 14411 14412 ret = intel_plane_pin_fb(to_intel_plane_state(new_state)); 14413 14414 i915_gem_object_unpin_pages(obj); 14415 if (ret) 14416 return ret; 14417 14418 fb_obj_bump_render_priority(obj); 14419 intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB); 14420 14421 if (!new_state->fence) { /* implicit fencing */ 14422 struct dma_fence *fence; 14423 14424 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, 14425 obj->base.resv, NULL, 14426 false, I915_FENCE_TIMEOUT, 14427 GFP_KERNEL); 14428 if (ret < 0) 14429 return ret; 14430 14431 fence = dma_resv_get_excl_rcu(obj->base.resv); 14432 if (fence) { 14433 add_rps_boost_after_vblank(new_state->crtc, fence); 14434 dma_fence_put(fence); 14435 } 14436 } else { 14437 add_rps_boost_after_vblank(new_state->crtc, new_state->fence); 14438 } 14439 14440 /* 14441 * We declare pageflips to be interactive and so merit a small bias 14442 * towards upclocking to deliver the frame on time. By only changing 14443 * the RPS thresholds to sample more regularly and aim for higher 14444 * clocks we can hopefully deliver low power workloads (like kodi) 14445 * that are not quite steady state without resorting to forcing 14446 * maximum clocks following a vblank miss (see do_rps_boost()). 14447 */ 14448 if (!intel_state->rps_interactive) { 14449 intel_rps_mark_interactive(dev_priv, true); 14450 intel_state->rps_interactive = true; 14451 } 14452 14453 return 0; 14454 } 14455 14456 /** 14457 * intel_cleanup_plane_fb - Cleans up an fb after plane use 14458 * @plane: drm plane to clean up for 14459 * @old_state: the state from the previous modeset 14460 * 14461 * Cleans up a framebuffer that has just been removed from a plane. 14462 */ 14463 void 14464 intel_cleanup_plane_fb(struct drm_plane *plane, 14465 struct drm_plane_state *old_state) 14466 { 14467 struct intel_atomic_state *intel_state = 14468 to_intel_atomic_state(old_state->state); 14469 struct drm_i915_private *dev_priv = to_i915(plane->dev); 14470 14471 if (intel_state->rps_interactive) { 14472 intel_rps_mark_interactive(dev_priv, false); 14473 intel_state->rps_interactive = false; 14474 } 14475 14476 /* Should only be called after a successful intel_prepare_plane_fb()! */ 14477 intel_plane_unpin_fb(to_intel_plane_state(old_state)); 14478 } 14479 14480 int 14481 skl_max_scale(const struct intel_crtc_state *crtc_state, 14482 const struct drm_format_info *format) 14483 { 14484 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 14485 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14486 int max_scale; 14487 int crtc_clock, max_dotclk, tmpclk1, tmpclk2; 14488 14489 if (!crtc_state->base.enable) 14490 return DRM_PLANE_HELPER_NO_SCALING; 14491 14492 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; 14493 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk; 14494 14495 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) 14496 max_dotclk *= 2; 14497 14498 if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock)) 14499 return DRM_PLANE_HELPER_NO_SCALING; 14500 14501 /* 14502 * skl max scale is lower of: 14503 * close to 3 but not 3, -1 is for that purpose 14504 * or 14505 * cdclk/crtc_clock 14506 */ 14507 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) || 14508 !drm_format_info_is_yuv_semiplanar(format)) 14509 tmpclk1 = 0x30000 - 1; 14510 else 14511 tmpclk1 = 0x20000 - 1; 14512 tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock); 14513 max_scale = min(tmpclk1, tmpclk2); 14514 14515 return max_scale; 14516 } 14517 14518 static void intel_begin_crtc_commit(struct intel_atomic_state *state, 14519 struct intel_crtc *crtc) 14520 { 14521 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14522 struct intel_crtc_state *old_crtc_state = 14523 intel_atomic_get_old_crtc_state(state, crtc); 14524 struct intel_crtc_state *new_crtc_state = 14525 intel_atomic_get_new_crtc_state(state, crtc); 14526 bool modeset = needs_modeset(new_crtc_state); 14527 14528 /* Perform vblank evasion around commit operation */ 14529 intel_pipe_update_start(new_crtc_state); 14530 14531 if (modeset) 14532 goto out; 14533 14534 if (new_crtc_state->base.color_mgmt_changed || 14535 new_crtc_state->update_pipe) 14536 intel_color_commit(new_crtc_state); 14537 14538 if (new_crtc_state->update_pipe) 14539 intel_update_pipe_config(old_crtc_state, new_crtc_state); 14540 else if (INTEL_GEN(dev_priv) >= 9) 14541 skl_detach_scalers(new_crtc_state); 14542 14543 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 14544 bdw_set_pipemisc(new_crtc_state); 14545 14546 out: 14547 if (dev_priv->display.atomic_update_watermarks) 14548 dev_priv->display.atomic_update_watermarks(state, 14549 new_crtc_state); 14550 } 14551 14552 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 14553 struct intel_crtc_state *crtc_state) 14554 { 14555 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14556 14557 if (!IS_GEN(dev_priv, 2)) 14558 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 14559 14560 if (crtc_state->has_pch_encoder) { 14561 enum pipe pch_transcoder = 14562 intel_crtc_pch_transcoder(crtc); 14563 14564 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); 14565 } 14566 } 14567 14568 static void intel_finish_crtc_commit(struct intel_atomic_state *state, 14569 struct intel_crtc *crtc) 14570 { 14571 struct intel_crtc_state *old_crtc_state = 14572 intel_atomic_get_old_crtc_state(state, crtc); 14573 struct intel_crtc_state *new_crtc_state = 14574 intel_atomic_get_new_crtc_state(state, crtc); 14575 14576 intel_pipe_update_end(new_crtc_state); 14577 14578 if (new_crtc_state->update_pipe && 14579 !needs_modeset(new_crtc_state) && 14580 old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED) 14581 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 14582 } 14583 14584 /** 14585 * intel_plane_destroy - destroy a plane 14586 * @plane: plane to destroy 14587 * 14588 * Common destruction function for all types of planes (primary, cursor, 14589 * sprite). 14590 */ 14591 void intel_plane_destroy(struct drm_plane *plane) 14592 { 14593 drm_plane_cleanup(plane); 14594 kfree(to_intel_plane(plane)); 14595 } 14596 14597 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, 14598 u32 format, u64 modifier) 14599 { 14600 switch (modifier) { 14601 case DRM_FORMAT_MOD_LINEAR: 14602 case I915_FORMAT_MOD_X_TILED: 14603 break; 14604 default: 14605 return false; 14606 } 14607 14608 switch (format) { 14609 case DRM_FORMAT_C8: 14610 case DRM_FORMAT_RGB565: 14611 case DRM_FORMAT_XRGB1555: 14612 case DRM_FORMAT_XRGB8888: 14613 return modifier == DRM_FORMAT_MOD_LINEAR || 14614 modifier == I915_FORMAT_MOD_X_TILED; 14615 default: 14616 return false; 14617 } 14618 } 14619 14620 static bool i965_plane_format_mod_supported(struct drm_plane *_plane, 14621 u32 format, u64 modifier) 14622 { 14623 switch (modifier) { 14624 case DRM_FORMAT_MOD_LINEAR: 14625 case I915_FORMAT_MOD_X_TILED: 14626 break; 14627 default: 14628 return false; 14629 } 14630 14631 switch (format) { 14632 case DRM_FORMAT_C8: 14633 case DRM_FORMAT_RGB565: 14634 case DRM_FORMAT_XRGB8888: 14635 case DRM_FORMAT_XBGR8888: 14636 case DRM_FORMAT_XRGB2101010: 14637 case DRM_FORMAT_XBGR2101010: 14638 return modifier == DRM_FORMAT_MOD_LINEAR || 14639 modifier == I915_FORMAT_MOD_X_TILED; 14640 default: 14641 return false; 14642 } 14643 } 14644 14645 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, 14646 u32 format, u64 modifier) 14647 { 14648 return modifier == DRM_FORMAT_MOD_LINEAR && 14649 format == DRM_FORMAT_ARGB8888; 14650 } 14651 14652 static const struct drm_plane_funcs i965_plane_funcs = { 14653 .update_plane = drm_atomic_helper_update_plane, 14654 .disable_plane = drm_atomic_helper_disable_plane, 14655 .destroy = intel_plane_destroy, 14656 .atomic_duplicate_state = intel_plane_duplicate_state, 14657 .atomic_destroy_state = intel_plane_destroy_state, 14658 .format_mod_supported = i965_plane_format_mod_supported, 14659 }; 14660 14661 static const struct drm_plane_funcs i8xx_plane_funcs = { 14662 .update_plane = drm_atomic_helper_update_plane, 14663 .disable_plane = drm_atomic_helper_disable_plane, 14664 .destroy = intel_plane_destroy, 14665 .atomic_duplicate_state = intel_plane_duplicate_state, 14666 .atomic_destroy_state = intel_plane_destroy_state, 14667 .format_mod_supported = i8xx_plane_format_mod_supported, 14668 }; 14669 14670 static int 14671 intel_legacy_cursor_update(struct drm_plane *plane, 14672 struct drm_crtc *crtc, 14673 struct drm_framebuffer *fb, 14674 int crtc_x, int crtc_y, 14675 unsigned int crtc_w, unsigned int crtc_h, 14676 u32 src_x, u32 src_y, 14677 u32 src_w, u32 src_h, 14678 struct drm_modeset_acquire_ctx *ctx) 14679 { 14680 struct drm_plane_state *old_plane_state, *new_plane_state; 14681 struct intel_plane *intel_plane = to_intel_plane(plane); 14682 struct intel_crtc_state *crtc_state = 14683 to_intel_crtc_state(crtc->state); 14684 struct intel_crtc_state *new_crtc_state; 14685 int ret; 14686 14687 /* 14688 * When crtc is inactive or there is a modeset pending, 14689 * wait for it to complete in the slowpath 14690 */ 14691 if (!crtc_state->base.active || needs_modeset(crtc_state) || 14692 crtc_state->update_pipe) 14693 goto slow; 14694 14695 old_plane_state = plane->state; 14696 /* 14697 * Don't do an async update if there is an outstanding commit modifying 14698 * the plane. This prevents our async update's changes from getting 14699 * overridden by a previous synchronous update's state. 14700 */ 14701 if (old_plane_state->commit && 14702 !try_wait_for_completion(&old_plane_state->commit->hw_done)) 14703 goto slow; 14704 14705 /* 14706 * If any parameters change that may affect watermarks, 14707 * take the slowpath. Only changing fb or position should be 14708 * in the fastpath. 14709 */ 14710 if (old_plane_state->crtc != crtc || 14711 old_plane_state->src_w != src_w || 14712 old_plane_state->src_h != src_h || 14713 old_plane_state->crtc_w != crtc_w || 14714 old_plane_state->crtc_h != crtc_h || 14715 !old_plane_state->fb != !fb) 14716 goto slow; 14717 14718 new_plane_state = intel_plane_duplicate_state(plane); 14719 if (!new_plane_state) 14720 return -ENOMEM; 14721 14722 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc)); 14723 if (!new_crtc_state) { 14724 ret = -ENOMEM; 14725 goto out_free; 14726 } 14727 14728 drm_atomic_set_fb_for_plane(new_plane_state, fb); 14729 14730 new_plane_state->src_x = src_x; 14731 new_plane_state->src_y = src_y; 14732 new_plane_state->src_w = src_w; 14733 new_plane_state->src_h = src_h; 14734 new_plane_state->crtc_x = crtc_x; 14735 new_plane_state->crtc_y = crtc_y; 14736 new_plane_state->crtc_w = crtc_w; 14737 new_plane_state->crtc_h = crtc_h; 14738 14739 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state, 14740 to_intel_plane_state(old_plane_state), 14741 to_intel_plane_state(new_plane_state)); 14742 if (ret) 14743 goto out_free; 14744 14745 ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state)); 14746 if (ret) 14747 goto out_free; 14748 14749 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_FLIP); 14750 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->fb), 14751 to_intel_frontbuffer(fb), 14752 intel_plane->frontbuffer_bit); 14753 14754 /* Swap plane state */ 14755 plane->state = new_plane_state; 14756 14757 /* 14758 * We cannot swap crtc_state as it may be in use by an atomic commit or 14759 * page flip that's running simultaneously. If we swap crtc_state and 14760 * destroy the old state, we will cause a use-after-free there. 14761 * 14762 * Only update active_planes, which is needed for our internal 14763 * bookkeeping. Either value will do the right thing when updating 14764 * planes atomically. If the cursor was part of the atomic update then 14765 * we would have taken the slowpath. 14766 */ 14767 crtc_state->active_planes = new_crtc_state->active_planes; 14768 14769 if (plane->state->visible) 14770 intel_update_plane(intel_plane, crtc_state, 14771 to_intel_plane_state(plane->state)); 14772 else 14773 intel_disable_plane(intel_plane, crtc_state); 14774 14775 intel_plane_unpin_fb(to_intel_plane_state(old_plane_state)); 14776 14777 out_free: 14778 if (new_crtc_state) 14779 intel_crtc_destroy_state(crtc, &new_crtc_state->base); 14780 if (ret) 14781 intel_plane_destroy_state(plane, new_plane_state); 14782 else 14783 intel_plane_destroy_state(plane, old_plane_state); 14784 return ret; 14785 14786 slow: 14787 return drm_atomic_helper_update_plane(plane, crtc, fb, 14788 crtc_x, crtc_y, crtc_w, crtc_h, 14789 src_x, src_y, src_w, src_h, ctx); 14790 } 14791 14792 static const struct drm_plane_funcs intel_cursor_plane_funcs = { 14793 .update_plane = intel_legacy_cursor_update, 14794 .disable_plane = drm_atomic_helper_disable_plane, 14795 .destroy = intel_plane_destroy, 14796 .atomic_duplicate_state = intel_plane_duplicate_state, 14797 .atomic_destroy_state = intel_plane_destroy_state, 14798 .format_mod_supported = intel_cursor_format_mod_supported, 14799 }; 14800 14801 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, 14802 enum i9xx_plane_id i9xx_plane) 14803 { 14804 if (!HAS_FBC(dev_priv)) 14805 return false; 14806 14807 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 14808 return i9xx_plane == PLANE_A; /* tied to pipe A */ 14809 else if (IS_IVYBRIDGE(dev_priv)) 14810 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B || 14811 i9xx_plane == PLANE_C; 14812 else if (INTEL_GEN(dev_priv) >= 4) 14813 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B; 14814 else 14815 return i9xx_plane == PLANE_A; 14816 } 14817 14818 static struct intel_plane * 14819 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) 14820 { 14821 struct intel_plane *plane; 14822 const struct drm_plane_funcs *plane_funcs; 14823 unsigned int supported_rotations; 14824 unsigned int possible_crtcs; 14825 const u64 *modifiers; 14826 const u32 *formats; 14827 int num_formats; 14828 int ret, zpos; 14829 14830 if (INTEL_GEN(dev_priv) >= 9) 14831 return skl_universal_plane_create(dev_priv, pipe, 14832 PLANE_PRIMARY); 14833 14834 plane = intel_plane_alloc(); 14835 if (IS_ERR(plane)) 14836 return plane; 14837 14838 plane->pipe = pipe; 14839 /* 14840 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS 14841 * port is hooked to pipe B. Hence we want plane A feeding pipe B. 14842 */ 14843 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) 14844 plane->i9xx_plane = (enum i9xx_plane_id) !pipe; 14845 else 14846 plane->i9xx_plane = (enum i9xx_plane_id) pipe; 14847 plane->id = PLANE_PRIMARY; 14848 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); 14849 14850 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane); 14851 if (plane->has_fbc) { 14852 struct intel_fbc *fbc = &dev_priv->fbc; 14853 14854 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; 14855 } 14856 14857 if (INTEL_GEN(dev_priv) >= 4) { 14858 formats = i965_primary_formats; 14859 num_formats = ARRAY_SIZE(i965_primary_formats); 14860 modifiers = i9xx_format_modifiers; 14861 14862 plane->max_stride = i9xx_plane_max_stride; 14863 plane->update_plane = i9xx_update_plane; 14864 plane->disable_plane = i9xx_disable_plane; 14865 plane->get_hw_state = i9xx_plane_get_hw_state; 14866 plane->check_plane = i9xx_plane_check; 14867 14868 plane_funcs = &i965_plane_funcs; 14869 } else { 14870 formats = i8xx_primary_formats; 14871 num_formats = ARRAY_SIZE(i8xx_primary_formats); 14872 modifiers = i9xx_format_modifiers; 14873 14874 plane->max_stride = i9xx_plane_max_stride; 14875 plane->update_plane = i9xx_update_plane; 14876 plane->disable_plane = i9xx_disable_plane; 14877 plane->get_hw_state = i9xx_plane_get_hw_state; 14878 plane->check_plane = i9xx_plane_check; 14879 14880 plane_funcs = &i8xx_plane_funcs; 14881 } 14882 14883 possible_crtcs = BIT(pipe); 14884 14885 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 14886 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 14887 possible_crtcs, plane_funcs, 14888 formats, num_formats, modifiers, 14889 DRM_PLANE_TYPE_PRIMARY, 14890 "primary %c", pipe_name(pipe)); 14891 else 14892 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 14893 possible_crtcs, plane_funcs, 14894 formats, num_formats, modifiers, 14895 DRM_PLANE_TYPE_PRIMARY, 14896 "plane %c", 14897 plane_name(plane->i9xx_plane)); 14898 if (ret) 14899 goto fail; 14900 14901 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 14902 supported_rotations = 14903 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | 14904 DRM_MODE_REFLECT_X; 14905 } else if (INTEL_GEN(dev_priv) >= 4) { 14906 supported_rotations = 14907 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; 14908 } else { 14909 supported_rotations = DRM_MODE_ROTATE_0; 14910 } 14911 14912 if (INTEL_GEN(dev_priv) >= 4) 14913 drm_plane_create_rotation_property(&plane->base, 14914 DRM_MODE_ROTATE_0, 14915 supported_rotations); 14916 14917 zpos = 0; 14918 drm_plane_create_zpos_immutable_property(&plane->base, zpos); 14919 14920 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); 14921 14922 return plane; 14923 14924 fail: 14925 intel_plane_free(plane); 14926 14927 return ERR_PTR(ret); 14928 } 14929 14930 static struct intel_plane * 14931 intel_cursor_plane_create(struct drm_i915_private *dev_priv, 14932 enum pipe pipe) 14933 { 14934 unsigned int possible_crtcs; 14935 struct intel_plane *cursor; 14936 int ret, zpos; 14937 14938 cursor = intel_plane_alloc(); 14939 if (IS_ERR(cursor)) 14940 return cursor; 14941 14942 cursor->pipe = pipe; 14943 cursor->i9xx_plane = (enum i9xx_plane_id) pipe; 14944 cursor->id = PLANE_CURSOR; 14945 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id); 14946 14947 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 14948 cursor->max_stride = i845_cursor_max_stride; 14949 cursor->update_plane = i845_update_cursor; 14950 cursor->disable_plane = i845_disable_cursor; 14951 cursor->get_hw_state = i845_cursor_get_hw_state; 14952 cursor->check_plane = i845_check_cursor; 14953 } else { 14954 cursor->max_stride = i9xx_cursor_max_stride; 14955 cursor->update_plane = i9xx_update_cursor; 14956 cursor->disable_plane = i9xx_disable_cursor; 14957 cursor->get_hw_state = i9xx_cursor_get_hw_state; 14958 cursor->check_plane = i9xx_check_cursor; 14959 } 14960 14961 cursor->cursor.base = ~0; 14962 cursor->cursor.cntl = ~0; 14963 14964 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) 14965 cursor->cursor.size = ~0; 14966 14967 possible_crtcs = BIT(pipe); 14968 14969 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 14970 possible_crtcs, &intel_cursor_plane_funcs, 14971 intel_cursor_formats, 14972 ARRAY_SIZE(intel_cursor_formats), 14973 cursor_format_modifiers, 14974 DRM_PLANE_TYPE_CURSOR, 14975 "cursor %c", pipe_name(pipe)); 14976 if (ret) 14977 goto fail; 14978 14979 if (INTEL_GEN(dev_priv) >= 4) 14980 drm_plane_create_rotation_property(&cursor->base, 14981 DRM_MODE_ROTATE_0, 14982 DRM_MODE_ROTATE_0 | 14983 DRM_MODE_ROTATE_180); 14984 14985 zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1; 14986 drm_plane_create_zpos_immutable_property(&cursor->base, zpos); 14987 14988 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 14989 14990 return cursor; 14991 14992 fail: 14993 intel_plane_free(cursor); 14994 14995 return ERR_PTR(ret); 14996 } 14997 14998 static void intel_crtc_init_scalers(struct intel_crtc *crtc, 14999 struct intel_crtc_state *crtc_state) 15000 { 15001 struct intel_crtc_scaler_state *scaler_state = 15002 &crtc_state->scaler_state; 15003 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 15004 int i; 15005 15006 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe]; 15007 if (!crtc->num_scalers) 15008 return; 15009 15010 for (i = 0; i < crtc->num_scalers; i++) { 15011 struct intel_scaler *scaler = &scaler_state->scalers[i]; 15012 15013 scaler->in_use = 0; 15014 scaler->mode = 0; 15015 } 15016 15017 scaler_state->scaler_id = -1; 15018 } 15019 15020 #define INTEL_CRTC_FUNCS \ 15021 .gamma_set = drm_atomic_helper_legacy_gamma_set, \ 15022 .set_config = drm_atomic_helper_set_config, \ 15023 .destroy = intel_crtc_destroy, \ 15024 .page_flip = drm_atomic_helper_page_flip, \ 15025 .atomic_duplicate_state = intel_crtc_duplicate_state, \ 15026 .atomic_destroy_state = intel_crtc_destroy_state, \ 15027 .set_crc_source = intel_crtc_set_crc_source, \ 15028 .verify_crc_source = intel_crtc_verify_crc_source, \ 15029 .get_crc_sources = intel_crtc_get_crc_sources 15030 15031 static const struct drm_crtc_funcs bdw_crtc_funcs = { 15032 INTEL_CRTC_FUNCS, 15033 15034 .get_vblank_counter = g4x_get_vblank_counter, 15035 .enable_vblank = bdw_enable_vblank, 15036 .disable_vblank = bdw_disable_vblank, 15037 }; 15038 15039 static const struct drm_crtc_funcs ilk_crtc_funcs = { 15040 INTEL_CRTC_FUNCS, 15041 15042 .get_vblank_counter = g4x_get_vblank_counter, 15043 .enable_vblank = ilk_enable_vblank, 15044 .disable_vblank = ilk_disable_vblank, 15045 }; 15046 15047 static const struct drm_crtc_funcs g4x_crtc_funcs = { 15048 INTEL_CRTC_FUNCS, 15049 15050 .get_vblank_counter = g4x_get_vblank_counter, 15051 .enable_vblank = i965_enable_vblank, 15052 .disable_vblank = i965_disable_vblank, 15053 }; 15054 15055 static const struct drm_crtc_funcs i965_crtc_funcs = { 15056 INTEL_CRTC_FUNCS, 15057 15058 .get_vblank_counter = i915_get_vblank_counter, 15059 .enable_vblank = i965_enable_vblank, 15060 .disable_vblank = i965_disable_vblank, 15061 }; 15062 15063 static const struct drm_crtc_funcs i915gm_crtc_funcs = { 15064 INTEL_CRTC_FUNCS, 15065 15066 .get_vblank_counter = i915_get_vblank_counter, 15067 .enable_vblank = i915gm_enable_vblank, 15068 .disable_vblank = i915gm_disable_vblank, 15069 }; 15070 15071 static const struct drm_crtc_funcs i915_crtc_funcs = { 15072 INTEL_CRTC_FUNCS, 15073 15074 .get_vblank_counter = i915_get_vblank_counter, 15075 .enable_vblank = i8xx_enable_vblank, 15076 .disable_vblank = i8xx_disable_vblank, 15077 }; 15078 15079 static const struct drm_crtc_funcs i8xx_crtc_funcs = { 15080 INTEL_CRTC_FUNCS, 15081 15082 /* no hw vblank counter */ 15083 .enable_vblank = i8xx_enable_vblank, 15084 .disable_vblank = i8xx_disable_vblank, 15085 }; 15086 15087 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) 15088 { 15089 const struct drm_crtc_funcs *funcs; 15090 struct intel_crtc *intel_crtc; 15091 struct intel_crtc_state *crtc_state = NULL; 15092 struct intel_plane *primary = NULL; 15093 struct intel_plane *cursor = NULL; 15094 int sprite, ret; 15095 15096 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); 15097 if (!intel_crtc) 15098 return -ENOMEM; 15099 15100 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 15101 if (!crtc_state) { 15102 ret = -ENOMEM; 15103 goto fail; 15104 } 15105 __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base); 15106 intel_crtc->config = crtc_state; 15107 15108 primary = intel_primary_plane_create(dev_priv, pipe); 15109 if (IS_ERR(primary)) { 15110 ret = PTR_ERR(primary); 15111 goto fail; 15112 } 15113 intel_crtc->plane_ids_mask |= BIT(primary->id); 15114 15115 for_each_sprite(dev_priv, pipe, sprite) { 15116 struct intel_plane *plane; 15117 15118 plane = intel_sprite_plane_create(dev_priv, pipe, sprite); 15119 if (IS_ERR(plane)) { 15120 ret = PTR_ERR(plane); 15121 goto fail; 15122 } 15123 intel_crtc->plane_ids_mask |= BIT(plane->id); 15124 } 15125 15126 cursor = intel_cursor_plane_create(dev_priv, pipe); 15127 if (IS_ERR(cursor)) { 15128 ret = PTR_ERR(cursor); 15129 goto fail; 15130 } 15131 intel_crtc->plane_ids_mask |= BIT(cursor->id); 15132 15133 if (HAS_GMCH(dev_priv)) { 15134 if (IS_CHERRYVIEW(dev_priv) || 15135 IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv)) 15136 funcs = &g4x_crtc_funcs; 15137 else if (IS_GEN(dev_priv, 4)) 15138 funcs = &i965_crtc_funcs; 15139 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv)) 15140 funcs = &i915gm_crtc_funcs; 15141 else if (IS_GEN(dev_priv, 3)) 15142 funcs = &i915_crtc_funcs; 15143 else 15144 funcs = &i8xx_crtc_funcs; 15145 } else { 15146 if (INTEL_GEN(dev_priv) >= 8) 15147 funcs = &bdw_crtc_funcs; 15148 else 15149 funcs = &ilk_crtc_funcs; 15150 } 15151 15152 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base, 15153 &primary->base, &cursor->base, 15154 funcs, "pipe %c", pipe_name(pipe)); 15155 if (ret) 15156 goto fail; 15157 15158 intel_crtc->pipe = pipe; 15159 15160 /* initialize shared scalers */ 15161 intel_crtc_init_scalers(intel_crtc, crtc_state); 15162 15163 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) || 15164 dev_priv->pipe_to_crtc_mapping[pipe] != NULL); 15165 dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc; 15166 15167 if (INTEL_GEN(dev_priv) < 9) { 15168 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane; 15169 15170 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 15171 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL); 15172 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc; 15173 } 15174 15175 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 15176 15177 intel_color_init(intel_crtc); 15178 15179 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 15180 15181 return 0; 15182 15183 fail: 15184 /* 15185 * drm_mode_config_cleanup() will free up any 15186 * crtcs/planes already initialized. 15187 */ 15188 kfree(crtc_state); 15189 kfree(intel_crtc); 15190 15191 return ret; 15192 } 15193 15194 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 15195 struct drm_file *file) 15196 { 15197 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 15198 struct drm_crtc *drmmode_crtc; 15199 struct intel_crtc *crtc; 15200 15201 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); 15202 if (!drmmode_crtc) 15203 return -ENOENT; 15204 15205 crtc = to_intel_crtc(drmmode_crtc); 15206 pipe_from_crtc_id->pipe = crtc->pipe; 15207 15208 return 0; 15209 } 15210 15211 static int intel_encoder_clones(struct intel_encoder *encoder) 15212 { 15213 struct drm_device *dev = encoder->base.dev; 15214 struct intel_encoder *source_encoder; 15215 int index_mask = 0; 15216 int entry = 0; 15217 15218 for_each_intel_encoder(dev, source_encoder) { 15219 if (encoders_cloneable(encoder, source_encoder)) 15220 index_mask |= (1 << entry); 15221 15222 entry++; 15223 } 15224 15225 return index_mask; 15226 } 15227 15228 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) 15229 { 15230 struct drm_device *dev = encoder->base.dev; 15231 struct intel_crtc *crtc; 15232 u32 possible_crtcs = 0; 15233 15234 for_each_intel_crtc(dev, crtc) { 15235 if (encoder->crtc_mask & BIT(crtc->pipe)) 15236 possible_crtcs |= drm_crtc_mask(&crtc->base); 15237 } 15238 15239 return possible_crtcs; 15240 } 15241 15242 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) 15243 { 15244 if (!IS_MOBILE(dev_priv)) 15245 return false; 15246 15247 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 15248 return false; 15249 15250 if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 15251 return false; 15252 15253 return true; 15254 } 15255 15256 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) 15257 { 15258 if (INTEL_GEN(dev_priv) >= 9) 15259 return false; 15260 15261 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) 15262 return false; 15263 15264 if (HAS_PCH_LPT_H(dev_priv) && 15265 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 15266 return false; 15267 15268 /* DDI E can't be used if DDI A requires 4 lanes */ 15269 if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 15270 return false; 15271 15272 if (!dev_priv->vbt.int_crt_support) 15273 return false; 15274 15275 return true; 15276 } 15277 15278 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) 15279 { 15280 int pps_num; 15281 int pps_idx; 15282 15283 if (HAS_DDI(dev_priv)) 15284 return; 15285 /* 15286 * This w/a is needed at least on CPT/PPT, but to be sure apply it 15287 * everywhere where registers can be write protected. 15288 */ 15289 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 15290 pps_num = 2; 15291 else 15292 pps_num = 1; 15293 15294 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { 15295 u32 val = I915_READ(PP_CONTROL(pps_idx)); 15296 15297 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS; 15298 I915_WRITE(PP_CONTROL(pps_idx), val); 15299 } 15300 } 15301 15302 static void intel_pps_init(struct drm_i915_private *dev_priv) 15303 { 15304 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv)) 15305 dev_priv->pps_mmio_base = PCH_PPS_BASE; 15306 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 15307 dev_priv->pps_mmio_base = VLV_PPS_BASE; 15308 else 15309 dev_priv->pps_mmio_base = PPS_BASE; 15310 15311 intel_pps_unlock_regs_wa(dev_priv); 15312 } 15313 15314 static void intel_setup_outputs(struct drm_i915_private *dev_priv) 15315 { 15316 struct intel_encoder *encoder; 15317 bool dpd_is_edp = false; 15318 15319 intel_pps_init(dev_priv); 15320 15321 if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) 15322 return; 15323 15324 if (INTEL_GEN(dev_priv) >= 12) { 15325 intel_ddi_init(dev_priv, PORT_A); 15326 intel_ddi_init(dev_priv, PORT_B); 15327 intel_ddi_init(dev_priv, PORT_D); 15328 intel_ddi_init(dev_priv, PORT_E); 15329 intel_ddi_init(dev_priv, PORT_F); 15330 intel_ddi_init(dev_priv, PORT_G); 15331 intel_ddi_init(dev_priv, PORT_H); 15332 intel_ddi_init(dev_priv, PORT_I); 15333 icl_dsi_init(dev_priv); 15334 } else if (IS_ELKHARTLAKE(dev_priv)) { 15335 intel_ddi_init(dev_priv, PORT_A); 15336 intel_ddi_init(dev_priv, PORT_B); 15337 intel_ddi_init(dev_priv, PORT_C); 15338 intel_ddi_init(dev_priv, PORT_D); 15339 icl_dsi_init(dev_priv); 15340 } else if (IS_GEN(dev_priv, 11)) { 15341 intel_ddi_init(dev_priv, PORT_A); 15342 intel_ddi_init(dev_priv, PORT_B); 15343 intel_ddi_init(dev_priv, PORT_C); 15344 intel_ddi_init(dev_priv, PORT_D); 15345 intel_ddi_init(dev_priv, PORT_E); 15346 /* 15347 * On some ICL SKUs port F is not present. No strap bits for 15348 * this, so rely on VBT. 15349 * Work around broken VBTs on SKUs known to have no port F. 15350 */ 15351 if (IS_ICL_WITH_PORT_F(dev_priv) && 15352 intel_bios_is_port_present(dev_priv, PORT_F)) 15353 intel_ddi_init(dev_priv, PORT_F); 15354 15355 icl_dsi_init(dev_priv); 15356 } else if (IS_GEN9_LP(dev_priv)) { 15357 /* 15358 * FIXME: Broxton doesn't support port detection via the 15359 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 15360 * detect the ports. 15361 */ 15362 intel_ddi_init(dev_priv, PORT_A); 15363 intel_ddi_init(dev_priv, PORT_B); 15364 intel_ddi_init(dev_priv, PORT_C); 15365 15366 vlv_dsi_init(dev_priv); 15367 } else if (HAS_DDI(dev_priv)) { 15368 int found; 15369 15370 if (intel_ddi_crt_present(dev_priv)) 15371 intel_crt_init(dev_priv); 15372 15373 /* 15374 * Haswell uses DDI functions to detect digital outputs. 15375 * On SKL pre-D0 the strap isn't connected, so we assume 15376 * it's there. 15377 */ 15378 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 15379 /* WaIgnoreDDIAStrap: skl */ 15380 if (found || IS_GEN9_BC(dev_priv)) 15381 intel_ddi_init(dev_priv, PORT_A); 15382 15383 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP 15384 * register */ 15385 found = I915_READ(SFUSE_STRAP); 15386 15387 if (found & SFUSE_STRAP_DDIB_DETECTED) 15388 intel_ddi_init(dev_priv, PORT_B); 15389 if (found & SFUSE_STRAP_DDIC_DETECTED) 15390 intel_ddi_init(dev_priv, PORT_C); 15391 if (found & SFUSE_STRAP_DDID_DETECTED) 15392 intel_ddi_init(dev_priv, PORT_D); 15393 if (found & SFUSE_STRAP_DDIF_DETECTED) 15394 intel_ddi_init(dev_priv, PORT_F); 15395 /* 15396 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 15397 */ 15398 if (IS_GEN9_BC(dev_priv) && 15399 intel_bios_is_port_present(dev_priv, PORT_E)) 15400 intel_ddi_init(dev_priv, PORT_E); 15401 15402 } else if (HAS_PCH_SPLIT(dev_priv)) { 15403 int found; 15404 15405 /* 15406 * intel_edp_init_connector() depends on this completing first, 15407 * to prevent the registration of both eDP and LVDS and the 15408 * incorrect sharing of the PPS. 15409 */ 15410 intel_lvds_init(dev_priv); 15411 intel_crt_init(dev_priv); 15412 15413 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); 15414 15415 if (ilk_has_edp_a(dev_priv)) 15416 intel_dp_init(dev_priv, DP_A, PORT_A); 15417 15418 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 15419 /* PCH SDVOB multiplex with HDMIB */ 15420 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 15421 if (!found) 15422 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 15423 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 15424 intel_dp_init(dev_priv, PCH_DP_B, PORT_B); 15425 } 15426 15427 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 15428 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 15429 15430 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 15431 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 15432 15433 if (I915_READ(PCH_DP_C) & DP_DETECTED) 15434 intel_dp_init(dev_priv, PCH_DP_C, PORT_C); 15435 15436 if (I915_READ(PCH_DP_D) & DP_DETECTED) 15437 intel_dp_init(dev_priv, PCH_DP_D, PORT_D); 15438 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 15439 bool has_edp, has_port; 15440 15441 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support) 15442 intel_crt_init(dev_priv); 15443 15444 /* 15445 * The DP_DETECTED bit is the latched state of the DDC 15446 * SDA pin at boot. However since eDP doesn't require DDC 15447 * (no way to plug in a DP->HDMI dongle) the DDC pins for 15448 * eDP ports may have been muxed to an alternate function. 15449 * Thus we can't rely on the DP_DETECTED bit alone to detect 15450 * eDP ports. Consult the VBT as well as DP_DETECTED to 15451 * detect eDP ports. 15452 * 15453 * Sadly the straps seem to be missing sometimes even for HDMI 15454 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 15455 * and VBT for the presence of the port. Additionally we can't 15456 * trust the port type the VBT declares as we've seen at least 15457 * HDMI ports that the VBT claim are DP or eDP. 15458 */ 15459 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); 15460 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 15461 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) 15462 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B); 15463 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 15464 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 15465 15466 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); 15467 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 15468 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) 15469 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C); 15470 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 15471 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 15472 15473 if (IS_CHERRYVIEW(dev_priv)) { 15474 /* 15475 * eDP not supported on port D, 15476 * so no need to worry about it 15477 */ 15478 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 15479 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) 15480 intel_dp_init(dev_priv, CHV_DP_D, PORT_D); 15481 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) 15482 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 15483 } 15484 15485 vlv_dsi_init(dev_priv); 15486 } else if (IS_PINEVIEW(dev_priv)) { 15487 intel_lvds_init(dev_priv); 15488 intel_crt_init(dev_priv); 15489 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) { 15490 bool found = false; 15491 15492 if (IS_MOBILE(dev_priv)) 15493 intel_lvds_init(dev_priv); 15494 15495 intel_crt_init(dev_priv); 15496 15497 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 15498 DRM_DEBUG_KMS("probing SDVOB\n"); 15499 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 15500 if (!found && IS_G4X(dev_priv)) { 15501 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 15502 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 15503 } 15504 15505 if (!found && IS_G4X(dev_priv)) 15506 intel_dp_init(dev_priv, DP_B, PORT_B); 15507 } 15508 15509 /* Before G4X SDVOC doesn't have its own detect register */ 15510 15511 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 15512 DRM_DEBUG_KMS("probing SDVOC\n"); 15513 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 15514 } 15515 15516 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 15517 15518 if (IS_G4X(dev_priv)) { 15519 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 15520 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 15521 } 15522 if (IS_G4X(dev_priv)) 15523 intel_dp_init(dev_priv, DP_C, PORT_C); 15524 } 15525 15526 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) 15527 intel_dp_init(dev_priv, DP_D, PORT_D); 15528 15529 if (SUPPORTS_TV(dev_priv)) 15530 intel_tv_init(dev_priv); 15531 } else if (IS_GEN(dev_priv, 2)) { 15532 if (IS_I85X(dev_priv)) 15533 intel_lvds_init(dev_priv); 15534 15535 intel_crt_init(dev_priv); 15536 intel_dvo_init(dev_priv); 15537 } 15538 15539 intel_psr_init(dev_priv); 15540 15541 for_each_intel_encoder(&dev_priv->drm, encoder) { 15542 encoder->base.possible_crtcs = 15543 intel_encoder_possible_crtcs(encoder); 15544 encoder->base.possible_clones = 15545 intel_encoder_clones(encoder); 15546 } 15547 15548 intel_init_pch_refclk(dev_priv); 15549 15550 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 15551 } 15552 15553 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 15554 { 15555 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 15556 15557 drm_framebuffer_cleanup(fb); 15558 intel_frontbuffer_put(intel_fb->frontbuffer); 15559 15560 kfree(intel_fb); 15561 } 15562 15563 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 15564 struct drm_file *file, 15565 unsigned int *handle) 15566 { 15567 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 15568 15569 if (obj->userptr.mm) { 15570 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); 15571 return -EINVAL; 15572 } 15573 15574 return drm_gem_handle_create(file, &obj->base, handle); 15575 } 15576 15577 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, 15578 struct drm_file *file, 15579 unsigned flags, unsigned color, 15580 struct drm_clip_rect *clips, 15581 unsigned num_clips) 15582 { 15583 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 15584 15585 i915_gem_object_flush_if_display(obj); 15586 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); 15587 15588 return 0; 15589 } 15590 15591 static const struct drm_framebuffer_funcs intel_fb_funcs = { 15592 .destroy = intel_user_framebuffer_destroy, 15593 .create_handle = intel_user_framebuffer_create_handle, 15594 .dirty = intel_user_framebuffer_dirty, 15595 }; 15596 15597 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, 15598 struct drm_i915_gem_object *obj, 15599 struct drm_mode_fb_cmd2 *mode_cmd) 15600 { 15601 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 15602 struct drm_framebuffer *fb = &intel_fb->base; 15603 u32 max_stride; 15604 unsigned int tiling, stride; 15605 int ret = -EINVAL; 15606 int i; 15607 15608 intel_fb->frontbuffer = intel_frontbuffer_get(obj); 15609 if (!intel_fb->frontbuffer) 15610 return -ENOMEM; 15611 15612 i915_gem_object_lock(obj); 15613 tiling = i915_gem_object_get_tiling(obj); 15614 stride = i915_gem_object_get_stride(obj); 15615 i915_gem_object_unlock(obj); 15616 15617 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 15618 /* 15619 * If there's a fence, enforce that 15620 * the fb modifier and tiling mode match. 15621 */ 15622 if (tiling != I915_TILING_NONE && 15623 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 15624 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n"); 15625 goto err; 15626 } 15627 } else { 15628 if (tiling == I915_TILING_X) { 15629 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 15630 } else if (tiling == I915_TILING_Y) { 15631 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n"); 15632 goto err; 15633 } 15634 } 15635 15636 if (!drm_any_plane_has_format(&dev_priv->drm, 15637 mode_cmd->pixel_format, 15638 mode_cmd->modifier[0])) { 15639 struct drm_format_name_buf format_name; 15640 15641 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n", 15642 drm_get_format_name(mode_cmd->pixel_format, 15643 &format_name), 15644 mode_cmd->modifier[0]); 15645 goto err; 15646 } 15647 15648 /* 15649 * gen2/3 display engine uses the fence if present, 15650 * so the tiling mode must match the fb modifier exactly. 15651 */ 15652 if (INTEL_GEN(dev_priv) < 4 && 15653 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 15654 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n"); 15655 goto err; 15656 } 15657 15658 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format, 15659 mode_cmd->modifier[0]); 15660 if (mode_cmd->pitches[0] > max_stride) { 15661 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n", 15662 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? 15663 "tiled" : "linear", 15664 mode_cmd->pitches[0], max_stride); 15665 goto err; 15666 } 15667 15668 /* 15669 * If there's a fence, enforce that 15670 * the fb pitch and fence stride match. 15671 */ 15672 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) { 15673 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n", 15674 mode_cmd->pitches[0], stride); 15675 goto err; 15676 } 15677 15678 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 15679 if (mode_cmd->offsets[0] != 0) 15680 goto err; 15681 15682 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd); 15683 15684 for (i = 0; i < fb->format->num_planes; i++) { 15685 u32 stride_alignment; 15686 15687 if (mode_cmd->handles[i] != mode_cmd->handles[0]) { 15688 DRM_DEBUG_KMS("bad plane %d handle\n", i); 15689 goto err; 15690 } 15691 15692 stride_alignment = intel_fb_stride_alignment(fb, i); 15693 15694 /* 15695 * Display WA #0531: skl,bxt,kbl,glk 15696 * 15697 * Render decompression and plane width > 3840 15698 * combined with horizontal panning requires the 15699 * plane stride to be a multiple of 4. We'll just 15700 * require the entire fb to accommodate that to avoid 15701 * potential runtime errors at plane configuration time. 15702 */ 15703 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 && 15704 is_ccs_modifier(fb->modifier)) 15705 stride_alignment *= 4; 15706 15707 if (fb->pitches[i] & (stride_alignment - 1)) { 15708 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n", 15709 i, fb->pitches[i], stride_alignment); 15710 goto err; 15711 } 15712 15713 fb->obj[i] = &obj->base; 15714 } 15715 15716 ret = intel_fill_fb_info(dev_priv, fb); 15717 if (ret) 15718 goto err; 15719 15720 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs); 15721 if (ret) { 15722 DRM_ERROR("framebuffer init failed %d\n", ret); 15723 goto err; 15724 } 15725 15726 return 0; 15727 15728 err: 15729 intel_frontbuffer_put(intel_fb->frontbuffer); 15730 return ret; 15731 } 15732 15733 static struct drm_framebuffer * 15734 intel_user_framebuffer_create(struct drm_device *dev, 15735 struct drm_file *filp, 15736 const struct drm_mode_fb_cmd2 *user_mode_cmd) 15737 { 15738 struct drm_framebuffer *fb; 15739 struct drm_i915_gem_object *obj; 15740 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 15741 15742 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); 15743 if (!obj) 15744 return ERR_PTR(-ENOENT); 15745 15746 fb = intel_framebuffer_create(obj, &mode_cmd); 15747 i915_gem_object_put(obj); 15748 15749 return fb; 15750 } 15751 15752 static void intel_atomic_state_free(struct drm_atomic_state *state) 15753 { 15754 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 15755 15756 drm_atomic_state_default_release(state); 15757 15758 i915_sw_fence_fini(&intel_state->commit_ready); 15759 15760 kfree(state); 15761 } 15762 15763 static enum drm_mode_status 15764 intel_mode_valid(struct drm_device *dev, 15765 const struct drm_display_mode *mode) 15766 { 15767 struct drm_i915_private *dev_priv = to_i915(dev); 15768 int hdisplay_max, htotal_max; 15769 int vdisplay_max, vtotal_max; 15770 15771 /* 15772 * Can't reject DBLSCAN here because Xorg ddxen can add piles 15773 * of DBLSCAN modes to the output's mode list when they detect 15774 * the scaling mode property on the connector. And they don't 15775 * ask the kernel to validate those modes in any way until 15776 * modeset time at which point the client gets a protocol error. 15777 * So in order to not upset those clients we silently ignore the 15778 * DBLSCAN flag on such connectors. For other connectors we will 15779 * reject modes with the DBLSCAN flag in encoder->compute_config(). 15780 * And we always reject DBLSCAN modes in connector->mode_valid() 15781 * as we never want such modes on the connector's mode list. 15782 */ 15783 15784 if (mode->vscan > 1) 15785 return MODE_NO_VSCAN; 15786 15787 if (mode->flags & DRM_MODE_FLAG_HSKEW) 15788 return MODE_H_ILLEGAL; 15789 15790 if (mode->flags & (DRM_MODE_FLAG_CSYNC | 15791 DRM_MODE_FLAG_NCSYNC | 15792 DRM_MODE_FLAG_PCSYNC)) 15793 return MODE_HSYNC; 15794 15795 if (mode->flags & (DRM_MODE_FLAG_BCAST | 15796 DRM_MODE_FLAG_PIXMUX | 15797 DRM_MODE_FLAG_CLKDIV2)) 15798 return MODE_BAD; 15799 15800 /* Transcoder timing limits */ 15801 if (INTEL_GEN(dev_priv) >= 11) { 15802 hdisplay_max = 16384; 15803 vdisplay_max = 8192; 15804 htotal_max = 16384; 15805 vtotal_max = 8192; 15806 } else if (INTEL_GEN(dev_priv) >= 9 || 15807 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 15808 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 15809 vdisplay_max = 4096; 15810 htotal_max = 8192; 15811 vtotal_max = 8192; 15812 } else if (INTEL_GEN(dev_priv) >= 3) { 15813 hdisplay_max = 4096; 15814 vdisplay_max = 4096; 15815 htotal_max = 8192; 15816 vtotal_max = 8192; 15817 } else { 15818 hdisplay_max = 2048; 15819 vdisplay_max = 2048; 15820 htotal_max = 4096; 15821 vtotal_max = 4096; 15822 } 15823 15824 if (mode->hdisplay > hdisplay_max || 15825 mode->hsync_start > htotal_max || 15826 mode->hsync_end > htotal_max || 15827 mode->htotal > htotal_max) 15828 return MODE_H_ILLEGAL; 15829 15830 if (mode->vdisplay > vdisplay_max || 15831 mode->vsync_start > vtotal_max || 15832 mode->vsync_end > vtotal_max || 15833 mode->vtotal > vtotal_max) 15834 return MODE_V_ILLEGAL; 15835 15836 return MODE_OK; 15837 } 15838 15839 enum drm_mode_status 15840 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, 15841 const struct drm_display_mode *mode) 15842 { 15843 int plane_width_max, plane_height_max; 15844 15845 /* 15846 * intel_mode_valid() should be 15847 * sufficient on older platforms. 15848 */ 15849 if (INTEL_GEN(dev_priv) < 9) 15850 return MODE_OK; 15851 15852 /* 15853 * Most people will probably want a fullscreen 15854 * plane so let's not advertize modes that are 15855 * too big for that. 15856 */ 15857 if (INTEL_GEN(dev_priv) >= 11) { 15858 plane_width_max = 5120; 15859 plane_height_max = 4320; 15860 } else { 15861 plane_width_max = 5120; 15862 plane_height_max = 4096; 15863 } 15864 15865 if (mode->hdisplay > plane_width_max) 15866 return MODE_H_ILLEGAL; 15867 15868 if (mode->vdisplay > plane_height_max) 15869 return MODE_V_ILLEGAL; 15870 15871 return MODE_OK; 15872 } 15873 15874 static const struct drm_mode_config_funcs intel_mode_funcs = { 15875 .fb_create = intel_user_framebuffer_create, 15876 .get_format_info = intel_get_format_info, 15877 .output_poll_changed = intel_fbdev_output_poll_changed, 15878 .mode_valid = intel_mode_valid, 15879 .atomic_check = intel_atomic_check, 15880 .atomic_commit = intel_atomic_commit, 15881 .atomic_state_alloc = intel_atomic_state_alloc, 15882 .atomic_state_clear = intel_atomic_state_clear, 15883 .atomic_state_free = intel_atomic_state_free, 15884 }; 15885 15886 /** 15887 * intel_init_display_hooks - initialize the display modesetting hooks 15888 * @dev_priv: device private 15889 */ 15890 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 15891 { 15892 intel_init_cdclk_hooks(dev_priv); 15893 15894 if (INTEL_GEN(dev_priv) >= 9) { 15895 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 15896 dev_priv->display.get_initial_plane_config = 15897 skylake_get_initial_plane_config; 15898 dev_priv->display.crtc_compute_clock = 15899 haswell_crtc_compute_clock; 15900 dev_priv->display.crtc_enable = haswell_crtc_enable; 15901 dev_priv->display.crtc_disable = haswell_crtc_disable; 15902 } else if (HAS_DDI(dev_priv)) { 15903 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 15904 dev_priv->display.get_initial_plane_config = 15905 i9xx_get_initial_plane_config; 15906 dev_priv->display.crtc_compute_clock = 15907 haswell_crtc_compute_clock; 15908 dev_priv->display.crtc_enable = haswell_crtc_enable; 15909 dev_priv->display.crtc_disable = haswell_crtc_disable; 15910 } else if (HAS_PCH_SPLIT(dev_priv)) { 15911 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 15912 dev_priv->display.get_initial_plane_config = 15913 i9xx_get_initial_plane_config; 15914 dev_priv->display.crtc_compute_clock = 15915 ironlake_crtc_compute_clock; 15916 dev_priv->display.crtc_enable = ironlake_crtc_enable; 15917 dev_priv->display.crtc_disable = ironlake_crtc_disable; 15918 } else if (IS_CHERRYVIEW(dev_priv)) { 15919 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15920 dev_priv->display.get_initial_plane_config = 15921 i9xx_get_initial_plane_config; 15922 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock; 15923 dev_priv->display.crtc_enable = valleyview_crtc_enable; 15924 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15925 } else if (IS_VALLEYVIEW(dev_priv)) { 15926 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15927 dev_priv->display.get_initial_plane_config = 15928 i9xx_get_initial_plane_config; 15929 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock; 15930 dev_priv->display.crtc_enable = valleyview_crtc_enable; 15931 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15932 } else if (IS_G4X(dev_priv)) { 15933 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15934 dev_priv->display.get_initial_plane_config = 15935 i9xx_get_initial_plane_config; 15936 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock; 15937 dev_priv->display.crtc_enable = i9xx_crtc_enable; 15938 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15939 } else if (IS_PINEVIEW(dev_priv)) { 15940 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15941 dev_priv->display.get_initial_plane_config = 15942 i9xx_get_initial_plane_config; 15943 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock; 15944 dev_priv->display.crtc_enable = i9xx_crtc_enable; 15945 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15946 } else if (!IS_GEN(dev_priv, 2)) { 15947 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15948 dev_priv->display.get_initial_plane_config = 15949 i9xx_get_initial_plane_config; 15950 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 15951 dev_priv->display.crtc_enable = i9xx_crtc_enable; 15952 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15953 } else { 15954 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15955 dev_priv->display.get_initial_plane_config = 15956 i9xx_get_initial_plane_config; 15957 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock; 15958 dev_priv->display.crtc_enable = i9xx_crtc_enable; 15959 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15960 } 15961 15962 if (IS_GEN(dev_priv, 5)) { 15963 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 15964 } else if (IS_GEN(dev_priv, 6)) { 15965 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 15966 } else if (IS_IVYBRIDGE(dev_priv)) { 15967 /* FIXME: detect B0+ stepping and use auto training */ 15968 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 15969 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 15970 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 15971 } 15972 15973 if (INTEL_GEN(dev_priv) >= 9) 15974 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables; 15975 else 15976 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables; 15977 15978 } 15979 15980 void intel_modeset_init_hw(struct drm_i915_private *i915) 15981 { 15982 intel_update_cdclk(i915); 15983 intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK"); 15984 i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw; 15985 } 15986 15987 /* 15988 * Calculate what we think the watermarks should be for the state we've read 15989 * out of the hardware and then immediately program those watermarks so that 15990 * we ensure the hardware settings match our internal state. 15991 * 15992 * We can calculate what we think WM's should be by creating a duplicate of the 15993 * current state (which was constructed during hardware readout) and running it 15994 * through the atomic check code to calculate new watermark values in the 15995 * state object. 15996 */ 15997 static void sanitize_watermarks(struct drm_device *dev) 15998 { 15999 struct drm_i915_private *dev_priv = to_i915(dev); 16000 struct drm_atomic_state *state; 16001 struct intel_atomic_state *intel_state; 16002 struct intel_crtc *crtc; 16003 struct intel_crtc_state *crtc_state; 16004 struct drm_modeset_acquire_ctx ctx; 16005 int ret; 16006 int i; 16007 16008 /* Only supported on platforms that use atomic watermark design */ 16009 if (!dev_priv->display.optimize_watermarks) 16010 return; 16011 16012 /* 16013 * We need to hold connection_mutex before calling duplicate_state so 16014 * that the connector loop is protected. 16015 */ 16016 drm_modeset_acquire_init(&ctx, 0); 16017 retry: 16018 ret = drm_modeset_lock_all_ctx(dev, &ctx); 16019 if (ret == -EDEADLK) { 16020 drm_modeset_backoff(&ctx); 16021 goto retry; 16022 } else if (WARN_ON(ret)) { 16023 goto fail; 16024 } 16025 16026 state = drm_atomic_helper_duplicate_state(dev, &ctx); 16027 if (WARN_ON(IS_ERR(state))) 16028 goto fail; 16029 16030 intel_state = to_intel_atomic_state(state); 16031 16032 /* 16033 * Hardware readout is the only time we don't want to calculate 16034 * intermediate watermarks (since we don't trust the current 16035 * watermarks). 16036 */ 16037 if (!HAS_GMCH(dev_priv)) 16038 intel_state->skip_intermediate_wm = true; 16039 16040 ret = intel_atomic_check(dev, state); 16041 if (ret) { 16042 /* 16043 * If we fail here, it means that the hardware appears to be 16044 * programmed in a way that shouldn't be possible, given our 16045 * understanding of watermark requirements. This might mean a 16046 * mistake in the hardware readout code or a mistake in the 16047 * watermark calculations for a given platform. Raise a WARN 16048 * so that this is noticeable. 16049 * 16050 * If this actually happens, we'll have to just leave the 16051 * BIOS-programmed watermarks untouched and hope for the best. 16052 */ 16053 WARN(true, "Could not determine valid watermarks for inherited state\n"); 16054 goto put_state; 16055 } 16056 16057 /* Write calculated watermark values back */ 16058 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { 16059 crtc_state->wm.need_postvbl_update = true; 16060 dev_priv->display.optimize_watermarks(intel_state, crtc_state); 16061 16062 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; 16063 } 16064 16065 put_state: 16066 drm_atomic_state_put(state); 16067 fail: 16068 drm_modeset_drop_locks(&ctx); 16069 drm_modeset_acquire_fini(&ctx); 16070 } 16071 16072 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv) 16073 { 16074 if (IS_GEN(dev_priv, 5)) { 16075 u32 fdi_pll_clk = 16076 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; 16077 16078 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000; 16079 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) { 16080 dev_priv->fdi_pll_freq = 270000; 16081 } else { 16082 return; 16083 } 16084 16085 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq); 16086 } 16087 16088 static int intel_initial_commit(struct drm_device *dev) 16089 { 16090 struct drm_atomic_state *state = NULL; 16091 struct drm_modeset_acquire_ctx ctx; 16092 struct drm_crtc *crtc; 16093 struct drm_crtc_state *crtc_state; 16094 int ret = 0; 16095 16096 state = drm_atomic_state_alloc(dev); 16097 if (!state) 16098 return -ENOMEM; 16099 16100 drm_modeset_acquire_init(&ctx, 0); 16101 16102 retry: 16103 state->acquire_ctx = &ctx; 16104 16105 drm_for_each_crtc(crtc, dev) { 16106 crtc_state = drm_atomic_get_crtc_state(state, crtc); 16107 if (IS_ERR(crtc_state)) { 16108 ret = PTR_ERR(crtc_state); 16109 goto out; 16110 } 16111 16112 if (crtc_state->active) { 16113 ret = drm_atomic_add_affected_planes(state, crtc); 16114 if (ret) 16115 goto out; 16116 16117 /* 16118 * FIXME hack to force a LUT update to avoid the 16119 * plane update forcing the pipe gamma on without 16120 * having a proper LUT loaded. Remove once we 16121 * have readout for pipe gamma enable. 16122 */ 16123 crtc_state->color_mgmt_changed = true; 16124 } 16125 } 16126 16127 ret = drm_atomic_commit(state); 16128 16129 out: 16130 if (ret == -EDEADLK) { 16131 drm_atomic_state_clear(state); 16132 drm_modeset_backoff(&ctx); 16133 goto retry; 16134 } 16135 16136 drm_atomic_state_put(state); 16137 16138 drm_modeset_drop_locks(&ctx); 16139 drm_modeset_acquire_fini(&ctx); 16140 16141 return ret; 16142 } 16143 16144 static void intel_mode_config_init(struct drm_i915_private *i915) 16145 { 16146 struct drm_mode_config *mode_config = &i915->drm.mode_config; 16147 16148 drm_mode_config_init(&i915->drm); 16149 16150 mode_config->min_width = 0; 16151 mode_config->min_height = 0; 16152 16153 mode_config->preferred_depth = 24; 16154 mode_config->prefer_shadow = 1; 16155 16156 mode_config->allow_fb_modifiers = true; 16157 16158 mode_config->funcs = &intel_mode_funcs; 16159 16160 /* 16161 * Maximum framebuffer dimensions, chosen to match 16162 * the maximum render engine surface size on gen4+. 16163 */ 16164 if (INTEL_GEN(i915) >= 7) { 16165 mode_config->max_width = 16384; 16166 mode_config->max_height = 16384; 16167 } else if (INTEL_GEN(i915) >= 4) { 16168 mode_config->max_width = 8192; 16169 mode_config->max_height = 8192; 16170 } else if (IS_GEN(i915, 3)) { 16171 mode_config->max_width = 4096; 16172 mode_config->max_height = 4096; 16173 } else { 16174 mode_config->max_width = 2048; 16175 mode_config->max_height = 2048; 16176 } 16177 16178 if (IS_I845G(i915) || IS_I865G(i915)) { 16179 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512; 16180 mode_config->cursor_height = 1023; 16181 } else if (IS_GEN(i915, 2)) { 16182 mode_config->cursor_width = 64; 16183 mode_config->cursor_height = 64; 16184 } else { 16185 mode_config->cursor_width = 256; 16186 mode_config->cursor_height = 256; 16187 } 16188 } 16189 16190 int intel_modeset_init(struct drm_i915_private *i915) 16191 { 16192 struct drm_device *dev = &i915->drm; 16193 enum pipe pipe; 16194 struct intel_crtc *crtc; 16195 int ret; 16196 16197 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); 16198 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI | 16199 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); 16200 16201 intel_mode_config_init(i915); 16202 16203 ret = intel_bw_init(i915); 16204 if (ret) 16205 return ret; 16206 16207 init_llist_head(&i915->atomic_helper.free_list); 16208 INIT_WORK(&i915->atomic_helper.free_work, 16209 intel_atomic_helper_free_state_worker); 16210 16211 intel_init_quirks(i915); 16212 16213 intel_fbc_init(i915); 16214 16215 intel_init_pm(i915); 16216 16217 intel_panel_sanitize_ssc(i915); 16218 16219 intel_gmbus_setup(i915); 16220 16221 DRM_DEBUG_KMS("%d display pipe%s available.\n", 16222 INTEL_NUM_PIPES(i915), 16223 INTEL_NUM_PIPES(i915) > 1 ? "s" : ""); 16224 16225 if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) { 16226 for_each_pipe(i915, pipe) { 16227 ret = intel_crtc_init(i915, pipe); 16228 if (ret) { 16229 drm_mode_config_cleanup(dev); 16230 return ret; 16231 } 16232 } 16233 } 16234 16235 intel_shared_dpll_init(dev); 16236 intel_update_fdi_pll_freq(i915); 16237 16238 intel_update_czclk(i915); 16239 intel_modeset_init_hw(i915); 16240 16241 intel_hdcp_component_init(i915); 16242 16243 if (i915->max_cdclk_freq == 0) 16244 intel_update_max_cdclk(i915); 16245 16246 /* Just disable it once at startup */ 16247 intel_vga_disable(i915); 16248 intel_setup_outputs(i915); 16249 16250 drm_modeset_lock_all(dev); 16251 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); 16252 drm_modeset_unlock_all(dev); 16253 16254 for_each_intel_crtc(dev, crtc) { 16255 struct intel_initial_plane_config plane_config = {}; 16256 16257 if (!crtc->active) 16258 continue; 16259 16260 /* 16261 * Note that reserving the BIOS fb up front prevents us 16262 * from stuffing other stolen allocations like the ring 16263 * on top. This prevents some ugliness at boot time, and 16264 * can even allow for smooth boot transitions if the BIOS 16265 * fb is large enough for the active pipe configuration. 16266 */ 16267 i915->display.get_initial_plane_config(crtc, &plane_config); 16268 16269 /* 16270 * If the fb is shared between multiple heads, we'll 16271 * just get the first one. 16272 */ 16273 intel_find_initial_plane_obj(crtc, &plane_config); 16274 } 16275 16276 /* 16277 * Make sure hardware watermarks really match the state we read out. 16278 * Note that we need to do this after reconstructing the BIOS fb's 16279 * since the watermark calculation done here will use pstate->fb. 16280 */ 16281 if (!HAS_GMCH(i915)) 16282 sanitize_watermarks(dev); 16283 16284 /* 16285 * Force all active planes to recompute their states. So that on 16286 * mode_setcrtc after probe, all the intel_plane_state variables 16287 * are already calculated and there is no assert_plane warnings 16288 * during bootup. 16289 */ 16290 ret = intel_initial_commit(dev); 16291 if (ret) 16292 DRM_DEBUG_KMS("Initial commit in probe failed.\n"); 16293 16294 return 0; 16295 } 16296 16297 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 16298 { 16299 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 16300 /* 640x480@60Hz, ~25175 kHz */ 16301 struct dpll clock = { 16302 .m1 = 18, 16303 .m2 = 7, 16304 .p1 = 13, 16305 .p2 = 4, 16306 .n = 2, 16307 }; 16308 u32 dpll, fp; 16309 int i; 16310 16311 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154); 16312 16313 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 16314 pipe_name(pipe), clock.vco, clock.dot); 16315 16316 fp = i9xx_dpll_compute_fp(&clock); 16317 dpll = DPLL_DVO_2X_MODE | 16318 DPLL_VGA_MODE_DIS | 16319 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 16320 PLL_P2_DIVIDE_BY_4 | 16321 PLL_REF_INPUT_DREFCLK | 16322 DPLL_VCO_ENABLE; 16323 16324 I915_WRITE(FP0(pipe), fp); 16325 I915_WRITE(FP1(pipe), fp); 16326 16327 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); 16328 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); 16329 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); 16330 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16)); 16331 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16)); 16332 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); 16333 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); 16334 16335 /* 16336 * Apparently we need to have VGA mode enabled prior to changing 16337 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 16338 * dividers, even though the register value does change. 16339 */ 16340 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); 16341 I915_WRITE(DPLL(pipe), dpll); 16342 16343 /* Wait for the clocks to stabilize. */ 16344 POSTING_READ(DPLL(pipe)); 16345 udelay(150); 16346 16347 /* The pixel multiplier can only be updated once the 16348 * DPLL is enabled and the clocks are stable. 16349 * 16350 * So write it again. 16351 */ 16352 I915_WRITE(DPLL(pipe), dpll); 16353 16354 /* We do this three times for luck */ 16355 for (i = 0; i < 3 ; i++) { 16356 I915_WRITE(DPLL(pipe), dpll); 16357 POSTING_READ(DPLL(pipe)); 16358 udelay(150); /* wait for warmup */ 16359 } 16360 16361 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE); 16362 POSTING_READ(PIPECONF(pipe)); 16363 16364 intel_wait_for_pipe_scanline_moving(crtc); 16365 } 16366 16367 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 16368 { 16369 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 16370 16371 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n", 16372 pipe_name(pipe)); 16373 16374 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE); 16375 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE); 16376 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE); 16377 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE); 16378 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE); 16379 16380 I915_WRITE(PIPECONF(pipe), 0); 16381 POSTING_READ(PIPECONF(pipe)); 16382 16383 intel_wait_for_pipe_scanline_stopped(crtc); 16384 16385 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 16386 POSTING_READ(DPLL(pipe)); 16387 } 16388 16389 static void 16390 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) 16391 { 16392 struct intel_crtc *crtc; 16393 16394 if (INTEL_GEN(dev_priv) >= 4) 16395 return; 16396 16397 for_each_intel_crtc(&dev_priv->drm, crtc) { 16398 struct intel_plane *plane = 16399 to_intel_plane(crtc->base.primary); 16400 struct intel_crtc *plane_crtc; 16401 enum pipe pipe; 16402 16403 if (!plane->get_hw_state(plane, &pipe)) 16404 continue; 16405 16406 if (pipe == crtc->pipe) 16407 continue; 16408 16409 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", 16410 plane->base.base.id, plane->base.name); 16411 16412 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 16413 intel_plane_disable_noatomic(plane_crtc, plane); 16414 } 16415 } 16416 16417 static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 16418 { 16419 struct drm_device *dev = crtc->base.dev; 16420 struct intel_encoder *encoder; 16421 16422 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 16423 return true; 16424 16425 return false; 16426 } 16427 16428 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder) 16429 { 16430 struct drm_device *dev = encoder->base.dev; 16431 struct intel_connector *connector; 16432 16433 for_each_connector_on_encoder(dev, &encoder->base, connector) 16434 return connector; 16435 16436 return NULL; 16437 } 16438 16439 static bool has_pch_trancoder(struct drm_i915_private *dev_priv, 16440 enum pipe pch_transcoder) 16441 { 16442 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 16443 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A); 16444 } 16445 16446 static void intel_sanitize_crtc(struct intel_crtc *crtc, 16447 struct drm_modeset_acquire_ctx *ctx) 16448 { 16449 struct drm_device *dev = crtc->base.dev; 16450 struct drm_i915_private *dev_priv = to_i915(dev); 16451 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); 16452 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 16453 16454 /* Clear any frame start delays used for debugging left by the BIOS */ 16455 if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) { 16456 i915_reg_t reg = PIPECONF(cpu_transcoder); 16457 16458 I915_WRITE(reg, 16459 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 16460 } 16461 16462 if (crtc_state->base.active) { 16463 struct intel_plane *plane; 16464 16465 /* Disable everything but the primary plane */ 16466 for_each_intel_plane_on_crtc(dev, crtc, plane) { 16467 const struct intel_plane_state *plane_state = 16468 to_intel_plane_state(plane->base.state); 16469 16470 if (plane_state->base.visible && 16471 plane->base.type != DRM_PLANE_TYPE_PRIMARY) 16472 intel_plane_disable_noatomic(crtc, plane); 16473 } 16474 16475 /* 16476 * Disable any background color set by the BIOS, but enable the 16477 * gamma and CSC to match how we program our planes. 16478 */ 16479 if (INTEL_GEN(dev_priv) >= 9) 16480 I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe), 16481 SKL_BOTTOM_COLOR_GAMMA_ENABLE | 16482 SKL_BOTTOM_COLOR_CSC_ENABLE); 16483 } 16484 16485 /* Adjust the state of the output pipe according to whether we 16486 * have active connectors/encoders. */ 16487 if (crtc_state->base.active && !intel_crtc_has_encoders(crtc)) 16488 intel_crtc_disable_noatomic(&crtc->base, ctx); 16489 16490 if (crtc_state->base.active || HAS_GMCH(dev_priv)) { 16491 /* 16492 * We start out with underrun reporting disabled to avoid races. 16493 * For correct bookkeeping mark this on active crtcs. 16494 * 16495 * Also on gmch platforms we dont have any hardware bits to 16496 * disable the underrun reporting. Which means we need to start 16497 * out with underrun reporting disabled also on inactive pipes, 16498 * since otherwise we'll complain about the garbage we read when 16499 * e.g. coming up after runtime pm. 16500 * 16501 * No protection against concurrent access is required - at 16502 * worst a fifo underrun happens which also sets this to false. 16503 */ 16504 crtc->cpu_fifo_underrun_disabled = true; 16505 /* 16506 * We track the PCH trancoder underrun reporting state 16507 * within the crtc. With crtc for pipe A housing the underrun 16508 * reporting state for PCH transcoder A, crtc for pipe B housing 16509 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, 16510 * and marking underrun reporting as disabled for the non-existing 16511 * PCH transcoders B and C would prevent enabling the south 16512 * error interrupt (see cpt_can_enable_serr_int()). 16513 */ 16514 if (has_pch_trancoder(dev_priv, crtc->pipe)) 16515 crtc->pch_fifo_underrun_disabled = true; 16516 } 16517 } 16518 16519 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) 16520 { 16521 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 16522 16523 /* 16524 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram 16525 * the hardware when a high res displays plugged in. DPLL P 16526 * divider is zero, and the pipe timings are bonkers. We'll 16527 * try to disable everything in that case. 16528 * 16529 * FIXME would be nice to be able to sanitize this state 16530 * without several WARNs, but for now let's take the easy 16531 * road. 16532 */ 16533 return IS_GEN(dev_priv, 6) && 16534 crtc_state->base.active && 16535 crtc_state->shared_dpll && 16536 crtc_state->port_clock == 0; 16537 } 16538 16539 static void intel_sanitize_encoder(struct intel_encoder *encoder) 16540 { 16541 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 16542 struct intel_connector *connector; 16543 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 16544 struct intel_crtc_state *crtc_state = crtc ? 16545 to_intel_crtc_state(crtc->base.state) : NULL; 16546 16547 /* We need to check both for a crtc link (meaning that the 16548 * encoder is active and trying to read from a pipe) and the 16549 * pipe itself being active. */ 16550 bool has_active_crtc = crtc_state && 16551 crtc_state->base.active; 16552 16553 if (crtc_state && has_bogus_dpll_config(crtc_state)) { 16554 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n", 16555 pipe_name(crtc->pipe)); 16556 has_active_crtc = false; 16557 } 16558 16559 connector = intel_encoder_find_connector(encoder); 16560 if (connector && !has_active_crtc) { 16561 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 16562 encoder->base.base.id, 16563 encoder->base.name); 16564 16565 /* Connector is active, but has no active pipe. This is 16566 * fallout from our resume register restoring. Disable 16567 * the encoder manually again. */ 16568 if (crtc_state) { 16569 struct drm_encoder *best_encoder; 16570 16571 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 16572 encoder->base.base.id, 16573 encoder->base.name); 16574 16575 /* avoid oopsing in case the hooks consult best_encoder */ 16576 best_encoder = connector->base.state->best_encoder; 16577 connector->base.state->best_encoder = &encoder->base; 16578 16579 if (encoder->disable) 16580 encoder->disable(encoder, crtc_state, 16581 connector->base.state); 16582 if (encoder->post_disable) 16583 encoder->post_disable(encoder, crtc_state, 16584 connector->base.state); 16585 16586 connector->base.state->best_encoder = best_encoder; 16587 } 16588 encoder->base.crtc = NULL; 16589 16590 /* Inconsistent output/port/pipe state happens presumably due to 16591 * a bug in one of the get_hw_state functions. Or someplace else 16592 * in our code, like the register restore mess on resume. Clamp 16593 * things to off as a safer default. */ 16594 16595 connector->base.dpms = DRM_MODE_DPMS_OFF; 16596 connector->base.encoder = NULL; 16597 } 16598 16599 /* notify opregion of the sanitized encoder state */ 16600 intel_opregion_notify_encoder(encoder, connector && has_active_crtc); 16601 16602 if (INTEL_GEN(dev_priv) >= 11) 16603 icl_sanitize_encoder_pll_mapping(encoder); 16604 } 16605 16606 /* FIXME read out full plane state for all planes */ 16607 static void readout_plane_state(struct drm_i915_private *dev_priv) 16608 { 16609 struct intel_plane *plane; 16610 struct intel_crtc *crtc; 16611 16612 for_each_intel_plane(&dev_priv->drm, plane) { 16613 struct intel_plane_state *plane_state = 16614 to_intel_plane_state(plane->base.state); 16615 struct intel_crtc_state *crtc_state; 16616 enum pipe pipe = PIPE_A; 16617 bool visible; 16618 16619 visible = plane->get_hw_state(plane, &pipe); 16620 16621 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 16622 crtc_state = to_intel_crtc_state(crtc->base.state); 16623 16624 intel_set_plane_visible(crtc_state, plane_state, visible); 16625 16626 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n", 16627 plane->base.base.id, plane->base.name, 16628 enableddisabled(visible), pipe_name(pipe)); 16629 } 16630 16631 for_each_intel_crtc(&dev_priv->drm, crtc) { 16632 struct intel_crtc_state *crtc_state = 16633 to_intel_crtc_state(crtc->base.state); 16634 16635 fixup_active_planes(crtc_state); 16636 } 16637 } 16638 16639 static void intel_modeset_readout_hw_state(struct drm_device *dev) 16640 { 16641 struct drm_i915_private *dev_priv = to_i915(dev); 16642 enum pipe pipe; 16643 struct intel_crtc *crtc; 16644 struct intel_encoder *encoder; 16645 struct intel_connector *connector; 16646 struct drm_connector_list_iter conn_iter; 16647 int i; 16648 16649 dev_priv->active_pipes = 0; 16650 16651 for_each_intel_crtc(dev, crtc) { 16652 struct intel_crtc_state *crtc_state = 16653 to_intel_crtc_state(crtc->base.state); 16654 16655 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base); 16656 memset(crtc_state, 0, sizeof(*crtc_state)); 16657 __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base); 16658 16659 crtc_state->base.active = crtc_state->base.enable = 16660 dev_priv->display.get_pipe_config(crtc, crtc_state); 16661 16662 crtc->base.enabled = crtc_state->base.enable; 16663 crtc->active = crtc_state->base.active; 16664 16665 if (crtc_state->base.active) 16666 dev_priv->active_pipes |= BIT(crtc->pipe); 16667 16668 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", 16669 crtc->base.base.id, crtc->base.name, 16670 enableddisabled(crtc_state->base.active)); 16671 } 16672 16673 readout_plane_state(dev_priv); 16674 16675 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 16676 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 16677 16678 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll, 16679 &pll->state.hw_state); 16680 16681 if (IS_ELKHARTLAKE(dev_priv) && pll->on && 16682 pll->info->id == DPLL_ID_EHL_DPLL4) { 16683 pll->wakeref = intel_display_power_get(dev_priv, 16684 POWER_DOMAIN_DPLL_DC_OFF); 16685 } 16686 16687 pll->state.crtc_mask = 0; 16688 for_each_intel_crtc(dev, crtc) { 16689 struct intel_crtc_state *crtc_state = 16690 to_intel_crtc_state(crtc->base.state); 16691 16692 if (crtc_state->base.active && 16693 crtc_state->shared_dpll == pll) 16694 pll->state.crtc_mask |= 1 << crtc->pipe; 16695 } 16696 pll->active_mask = pll->state.crtc_mask; 16697 16698 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", 16699 pll->info->name, pll->state.crtc_mask, pll->on); 16700 } 16701 16702 for_each_intel_encoder(dev, encoder) { 16703 pipe = 0; 16704 16705 if (encoder->get_hw_state(encoder, &pipe)) { 16706 struct intel_crtc_state *crtc_state; 16707 16708 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 16709 crtc_state = to_intel_crtc_state(crtc->base.state); 16710 16711 encoder->base.crtc = &crtc->base; 16712 encoder->get_config(encoder, crtc_state); 16713 } else { 16714 encoder->base.crtc = NULL; 16715 } 16716 16717 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 16718 encoder->base.base.id, encoder->base.name, 16719 enableddisabled(encoder->base.crtc), 16720 pipe_name(pipe)); 16721 } 16722 16723 drm_connector_list_iter_begin(dev, &conn_iter); 16724 for_each_intel_connector_iter(connector, &conn_iter) { 16725 if (connector->get_hw_state(connector)) { 16726 connector->base.dpms = DRM_MODE_DPMS_ON; 16727 16728 encoder = connector->encoder; 16729 connector->base.encoder = &encoder->base; 16730 16731 if (encoder->base.crtc && 16732 encoder->base.crtc->state->active) { 16733 /* 16734 * This has to be done during hardware readout 16735 * because anything calling .crtc_disable may 16736 * rely on the connector_mask being accurate. 16737 */ 16738 encoder->base.crtc->state->connector_mask |= 16739 drm_connector_mask(&connector->base); 16740 encoder->base.crtc->state->encoder_mask |= 16741 drm_encoder_mask(&encoder->base); 16742 } 16743 16744 } else { 16745 connector->base.dpms = DRM_MODE_DPMS_OFF; 16746 connector->base.encoder = NULL; 16747 } 16748 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 16749 connector->base.base.id, connector->base.name, 16750 enableddisabled(connector->base.encoder)); 16751 } 16752 drm_connector_list_iter_end(&conn_iter); 16753 16754 for_each_intel_crtc(dev, crtc) { 16755 struct intel_bw_state *bw_state = 16756 to_intel_bw_state(dev_priv->bw_obj.state); 16757 struct intel_crtc_state *crtc_state = 16758 to_intel_crtc_state(crtc->base.state); 16759 struct intel_plane *plane; 16760 int min_cdclk = 0; 16761 16762 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); 16763 if (crtc_state->base.active) { 16764 intel_mode_from_pipe_config(&crtc->base.mode, crtc_state); 16765 crtc->base.mode.hdisplay = crtc_state->pipe_src_w; 16766 crtc->base.mode.vdisplay = crtc_state->pipe_src_h; 16767 intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state); 16768 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); 16769 16770 /* 16771 * The initial mode needs to be set in order to keep 16772 * the atomic core happy. It wants a valid mode if the 16773 * crtc's enabled, so we do the above call. 16774 * 16775 * But we don't set all the derived state fully, hence 16776 * set a flag to indicate that a full recalculation is 16777 * needed on the next commit. 16778 */ 16779 crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED; 16780 16781 intel_crtc_compute_pixel_rate(crtc_state); 16782 16783 if (dev_priv->display.modeset_calc_cdclk) { 16784 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); 16785 if (WARN_ON(min_cdclk < 0)) 16786 min_cdclk = 0; 16787 } 16788 16789 drm_calc_timestamping_constants(&crtc->base, 16790 &crtc_state->base.adjusted_mode); 16791 update_scanline_offset(crtc_state); 16792 } 16793 16794 dev_priv->min_cdclk[crtc->pipe] = min_cdclk; 16795 dev_priv->min_voltage_level[crtc->pipe] = 16796 crtc_state->min_voltage_level; 16797 16798 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 16799 const struct intel_plane_state *plane_state = 16800 to_intel_plane_state(plane->base.state); 16801 16802 /* 16803 * FIXME don't have the fb yet, so can't 16804 * use intel_plane_data_rate() :( 16805 */ 16806 if (plane_state->base.visible) 16807 crtc_state->data_rate[plane->id] = 16808 4 * crtc_state->pixel_rate; 16809 } 16810 16811 intel_bw_crtc_update(bw_state, crtc_state); 16812 16813 intel_pipe_config_sanity_check(dev_priv, crtc_state); 16814 } 16815 } 16816 16817 static void 16818 get_encoder_power_domains(struct drm_i915_private *dev_priv) 16819 { 16820 struct intel_encoder *encoder; 16821 16822 for_each_intel_encoder(&dev_priv->drm, encoder) { 16823 struct intel_crtc_state *crtc_state; 16824 16825 if (!encoder->get_power_domains) 16826 continue; 16827 16828 /* 16829 * MST-primary and inactive encoders don't have a crtc state 16830 * and neither of these require any power domain references. 16831 */ 16832 if (!encoder->base.crtc) 16833 continue; 16834 16835 crtc_state = to_intel_crtc_state(encoder->base.crtc->state); 16836 encoder->get_power_domains(encoder, crtc_state); 16837 } 16838 } 16839 16840 static void intel_early_display_was(struct drm_i915_private *dev_priv) 16841 { 16842 /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */ 16843 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) 16844 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | 16845 DARBF_GATING_DIS); 16846 16847 if (IS_HASWELL(dev_priv)) { 16848 /* 16849 * WaRsPkgCStateDisplayPMReq:hsw 16850 * System hang if this isn't done before disabling all planes! 16851 */ 16852 I915_WRITE(CHICKEN_PAR1_1, 16853 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); 16854 } 16855 } 16856 16857 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv, 16858 enum port port, i915_reg_t hdmi_reg) 16859 { 16860 u32 val = I915_READ(hdmi_reg); 16861 16862 if (val & SDVO_ENABLE || 16863 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A)) 16864 return; 16865 16866 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n", 16867 port_name(port)); 16868 16869 val &= ~SDVO_PIPE_SEL_MASK; 16870 val |= SDVO_PIPE_SEL(PIPE_A); 16871 16872 I915_WRITE(hdmi_reg, val); 16873 } 16874 16875 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv, 16876 enum port port, i915_reg_t dp_reg) 16877 { 16878 u32 val = I915_READ(dp_reg); 16879 16880 if (val & DP_PORT_EN || 16881 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A)) 16882 return; 16883 16884 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n", 16885 port_name(port)); 16886 16887 val &= ~DP_PIPE_SEL_MASK; 16888 val |= DP_PIPE_SEL(PIPE_A); 16889 16890 I915_WRITE(dp_reg, val); 16891 } 16892 16893 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv) 16894 { 16895 /* 16896 * The BIOS may select transcoder B on some of the PCH 16897 * ports even it doesn't enable the port. This would trip 16898 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled(). 16899 * Sanitize the transcoder select bits to prevent that. We 16900 * assume that the BIOS never actually enabled the port, 16901 * because if it did we'd actually have to toggle the port 16902 * on and back off to make the transcoder A select stick 16903 * (see. intel_dp_link_down(), intel_disable_hdmi(), 16904 * intel_disable_sdvo()). 16905 */ 16906 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B); 16907 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C); 16908 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D); 16909 16910 /* PCH SDVOB multiplex with HDMIB */ 16911 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB); 16912 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC); 16913 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID); 16914 } 16915 16916 /* Scan out the current hw modeset state, 16917 * and sanitizes it to the current state 16918 */ 16919 static void 16920 intel_modeset_setup_hw_state(struct drm_device *dev, 16921 struct drm_modeset_acquire_ctx *ctx) 16922 { 16923 struct drm_i915_private *dev_priv = to_i915(dev); 16924 struct intel_crtc_state *crtc_state; 16925 struct intel_encoder *encoder; 16926 struct intel_crtc *crtc; 16927 intel_wakeref_t wakeref; 16928 int i; 16929 16930 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 16931 16932 intel_early_display_was(dev_priv); 16933 intel_modeset_readout_hw_state(dev); 16934 16935 /* HW state is read out, now we need to sanitize this mess. */ 16936 16937 /* Sanitize the TypeC port mode upfront, encoders depend on this */ 16938 for_each_intel_encoder(dev, encoder) { 16939 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 16940 16941 /* We need to sanitize only the MST primary port. */ 16942 if (encoder->type != INTEL_OUTPUT_DP_MST && 16943 intel_phy_is_tc(dev_priv, phy)) 16944 intel_tc_port_sanitize(enc_to_dig_port(&encoder->base)); 16945 } 16946 16947 get_encoder_power_domains(dev_priv); 16948 16949 if (HAS_PCH_IBX(dev_priv)) 16950 ibx_sanitize_pch_ports(dev_priv); 16951 16952 /* 16953 * intel_sanitize_plane_mapping() may need to do vblank 16954 * waits, so we need vblank interrupts restored beforehand. 16955 */ 16956 for_each_intel_crtc(&dev_priv->drm, crtc) { 16957 crtc_state = to_intel_crtc_state(crtc->base.state); 16958 16959 drm_crtc_vblank_reset(&crtc->base); 16960 16961 if (crtc_state->base.active) 16962 intel_crtc_vblank_on(crtc_state); 16963 } 16964 16965 intel_sanitize_plane_mapping(dev_priv); 16966 16967 for_each_intel_encoder(dev, encoder) 16968 intel_sanitize_encoder(encoder); 16969 16970 for_each_intel_crtc(&dev_priv->drm, crtc) { 16971 crtc_state = to_intel_crtc_state(crtc->base.state); 16972 intel_sanitize_crtc(crtc, ctx); 16973 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]"); 16974 } 16975 16976 intel_modeset_update_connector_atomic_state(dev); 16977 16978 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 16979 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 16980 16981 if (!pll->on || pll->active_mask) 16982 continue; 16983 16984 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", 16985 pll->info->name); 16986 16987 pll->info->funcs->disable(dev_priv, pll); 16988 pll->on = false; 16989 } 16990 16991 if (IS_G4X(dev_priv)) { 16992 g4x_wm_get_hw_state(dev_priv); 16993 g4x_wm_sanitize(dev_priv); 16994 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 16995 vlv_wm_get_hw_state(dev_priv); 16996 vlv_wm_sanitize(dev_priv); 16997 } else if (INTEL_GEN(dev_priv) >= 9) { 16998 skl_wm_get_hw_state(dev_priv); 16999 } else if (HAS_PCH_SPLIT(dev_priv)) { 17000 ilk_wm_get_hw_state(dev_priv); 17001 } 17002 17003 for_each_intel_crtc(dev, crtc) { 17004 u64 put_domains; 17005 17006 crtc_state = to_intel_crtc_state(crtc->base.state); 17007 put_domains = modeset_get_crtc_power_domains(crtc_state); 17008 if (WARN_ON(put_domains)) 17009 modeset_put_power_domains(dev_priv, put_domains); 17010 } 17011 17012 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); 17013 17014 intel_fbc_init_pipe_state(dev_priv); 17015 } 17016 17017 void intel_display_resume(struct drm_device *dev) 17018 { 17019 struct drm_i915_private *dev_priv = to_i915(dev); 17020 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 17021 struct drm_modeset_acquire_ctx ctx; 17022 int ret; 17023 17024 dev_priv->modeset_restore_state = NULL; 17025 if (state) 17026 state->acquire_ctx = &ctx; 17027 17028 drm_modeset_acquire_init(&ctx, 0); 17029 17030 while (1) { 17031 ret = drm_modeset_lock_all_ctx(dev, &ctx); 17032 if (ret != -EDEADLK) 17033 break; 17034 17035 drm_modeset_backoff(&ctx); 17036 } 17037 17038 if (!ret) 17039 ret = __intel_display_resume(dev, state, &ctx); 17040 17041 intel_enable_ipc(dev_priv); 17042 drm_modeset_drop_locks(&ctx); 17043 drm_modeset_acquire_fini(&ctx); 17044 17045 if (ret) 17046 DRM_ERROR("Restoring old state failed with %i\n", ret); 17047 if (state) 17048 drm_atomic_state_put(state); 17049 } 17050 17051 static void intel_hpd_poll_fini(struct drm_i915_private *i915) 17052 { 17053 struct intel_connector *connector; 17054 struct drm_connector_list_iter conn_iter; 17055 17056 /* Kill all the work that may have been queued by hpd. */ 17057 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 17058 for_each_intel_connector_iter(connector, &conn_iter) { 17059 if (connector->modeset_retry_work.func) 17060 cancel_work_sync(&connector->modeset_retry_work); 17061 if (connector->hdcp.shim) { 17062 cancel_delayed_work_sync(&connector->hdcp.check_work); 17063 cancel_work_sync(&connector->hdcp.prop_work); 17064 } 17065 } 17066 drm_connector_list_iter_end(&conn_iter); 17067 } 17068 17069 void intel_modeset_driver_remove(struct drm_i915_private *i915) 17070 { 17071 flush_workqueue(i915->flip_wq); 17072 flush_workqueue(i915->modeset_wq); 17073 17074 flush_work(&i915->atomic_helper.free_work); 17075 WARN_ON(!llist_empty(&i915->atomic_helper.free_list)); 17076 17077 /* 17078 * Interrupts and polling as the first thing to avoid creating havoc. 17079 * Too much stuff here (turning of connectors, ...) would 17080 * experience fancy races otherwise. 17081 */ 17082 intel_irq_uninstall(i915); 17083 17084 /* 17085 * Due to the hpd irq storm handling the hotplug work can re-arm the 17086 * poll handlers. Hence disable polling after hpd handling is shut down. 17087 */ 17088 intel_hpd_poll_fini(i915); 17089 17090 /* poll work can call into fbdev, hence clean that up afterwards */ 17091 intel_fbdev_fini(i915); 17092 17093 intel_unregister_dsm_handler(); 17094 17095 intel_fbc_global_disable(i915); 17096 17097 /* flush any delayed tasks or pending work */ 17098 flush_scheduled_work(); 17099 17100 intel_hdcp_component_fini(i915); 17101 17102 drm_mode_config_cleanup(&i915->drm); 17103 17104 intel_overlay_cleanup(i915); 17105 17106 intel_gmbus_teardown(i915); 17107 17108 destroy_workqueue(i915->flip_wq); 17109 destroy_workqueue(i915->modeset_wq); 17110 17111 intel_fbc_cleanup_cfb(i915); 17112 } 17113 17114 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 17115 17116 struct intel_display_error_state { 17117 17118 u32 power_well_driver; 17119 17120 struct intel_cursor_error_state { 17121 u32 control; 17122 u32 position; 17123 u32 base; 17124 u32 size; 17125 } cursor[I915_MAX_PIPES]; 17126 17127 struct intel_pipe_error_state { 17128 bool power_domain_on; 17129 u32 source; 17130 u32 stat; 17131 } pipe[I915_MAX_PIPES]; 17132 17133 struct intel_plane_error_state { 17134 u32 control; 17135 u32 stride; 17136 u32 size; 17137 u32 pos; 17138 u32 addr; 17139 u32 surface; 17140 u32 tile_offset; 17141 } plane[I915_MAX_PIPES]; 17142 17143 struct intel_transcoder_error_state { 17144 bool available; 17145 bool power_domain_on; 17146 enum transcoder cpu_transcoder; 17147 17148 u32 conf; 17149 17150 u32 htotal; 17151 u32 hblank; 17152 u32 hsync; 17153 u32 vtotal; 17154 u32 vblank; 17155 u32 vsync; 17156 } transcoder[5]; 17157 }; 17158 17159 struct intel_display_error_state * 17160 intel_display_capture_error_state(struct drm_i915_private *dev_priv) 17161 { 17162 struct intel_display_error_state *error; 17163 int transcoders[] = { 17164 TRANSCODER_A, 17165 TRANSCODER_B, 17166 TRANSCODER_C, 17167 TRANSCODER_D, 17168 TRANSCODER_EDP, 17169 }; 17170 int i; 17171 17172 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder)); 17173 17174 if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) 17175 return NULL; 17176 17177 error = kzalloc(sizeof(*error), GFP_ATOMIC); 17178 if (error == NULL) 17179 return NULL; 17180 17181 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 17182 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2); 17183 17184 for_each_pipe(dev_priv, i) { 17185 error->pipe[i].power_domain_on = 17186 __intel_display_power_is_enabled(dev_priv, 17187 POWER_DOMAIN_PIPE(i)); 17188 if (!error->pipe[i].power_domain_on) 17189 continue; 17190 17191 error->cursor[i].control = I915_READ(CURCNTR(i)); 17192 error->cursor[i].position = I915_READ(CURPOS(i)); 17193 error->cursor[i].base = I915_READ(CURBASE(i)); 17194 17195 error->plane[i].control = I915_READ(DSPCNTR(i)); 17196 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 17197 if (INTEL_GEN(dev_priv) <= 3) { 17198 error->plane[i].size = I915_READ(DSPSIZE(i)); 17199 error->plane[i].pos = I915_READ(DSPPOS(i)); 17200 } 17201 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 17202 error->plane[i].addr = I915_READ(DSPADDR(i)); 17203 if (INTEL_GEN(dev_priv) >= 4) { 17204 error->plane[i].surface = I915_READ(DSPSURF(i)); 17205 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 17206 } 17207 17208 error->pipe[i].source = I915_READ(PIPESRC(i)); 17209 17210 if (HAS_GMCH(dev_priv)) 17211 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 17212 } 17213 17214 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { 17215 enum transcoder cpu_transcoder = transcoders[i]; 17216 17217 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder]) 17218 continue; 17219 17220 error->transcoder[i].available = true; 17221 error->transcoder[i].power_domain_on = 17222 __intel_display_power_is_enabled(dev_priv, 17223 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 17224 if (!error->transcoder[i].power_domain_on) 17225 continue; 17226 17227 error->transcoder[i].cpu_transcoder = cpu_transcoder; 17228 17229 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 17230 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 17231 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 17232 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 17233 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 17234 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 17235 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 17236 } 17237 17238 return error; 17239 } 17240 17241 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 17242 17243 void 17244 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 17245 struct intel_display_error_state *error) 17246 { 17247 struct drm_i915_private *dev_priv = m->i915; 17248 int i; 17249 17250 if (!error) 17251 return; 17252 17253 err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv)); 17254 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 17255 err_printf(m, "PWR_WELL_CTL2: %08x\n", 17256 error->power_well_driver); 17257 for_each_pipe(dev_priv, i) { 17258 err_printf(m, "Pipe [%d]:\n", i); 17259 err_printf(m, " Power: %s\n", 17260 onoff(error->pipe[i].power_domain_on)); 17261 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 17262 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 17263 17264 err_printf(m, "Plane [%d]:\n", i); 17265 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 17266 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 17267 if (INTEL_GEN(dev_priv) <= 3) { 17268 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 17269 err_printf(m, " POS: %08x\n", error->plane[i].pos); 17270 } 17271 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 17272 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 17273 if (INTEL_GEN(dev_priv) >= 4) { 17274 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 17275 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 17276 } 17277 17278 err_printf(m, "Cursor [%d]:\n", i); 17279 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 17280 err_printf(m, " POS: %08x\n", error->cursor[i].position); 17281 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 17282 } 17283 17284 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { 17285 if (!error->transcoder[i].available) 17286 continue; 17287 17288 err_printf(m, "CPU transcoder: %s\n", 17289 transcoder_name(error->transcoder[i].cpu_transcoder)); 17290 err_printf(m, " Power: %s\n", 17291 onoff(error->transcoder[i].power_domain_on)); 17292 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 17293 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 17294 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 17295 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 17296 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 17297 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 17298 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 17299 } 17300 } 17301 17302 #endif 17303