1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/i2c.h> 28 #include <linux/input.h> 29 #include <linux/intel-iommu.h> 30 #include <linux/kernel.h> 31 #include <linux/module.h> 32 #include <linux/dma-resv.h> 33 #include <linux/slab.h> 34 35 #include <drm/drm_atomic.h> 36 #include <drm/drm_atomic_helper.h> 37 #include <drm/drm_atomic_uapi.h> 38 #include <drm/drm_dp_helper.h> 39 #include <drm/drm_edid.h> 40 #include <drm/drm_fourcc.h> 41 #include <drm/drm_plane_helper.h> 42 #include <drm/drm_probe_helper.h> 43 #include <drm/drm_rect.h> 44 #include <drm/i915_drm.h> 45 46 #include "display/intel_crt.h" 47 #include "display/intel_ddi.h" 48 #include "display/intel_dp.h" 49 #include "display/intel_dsi.h" 50 #include "display/intel_dvo.h" 51 #include "display/intel_gmbus.h" 52 #include "display/intel_hdmi.h" 53 #include "display/intel_lvds.h" 54 #include "display/intel_sdvo.h" 55 #include "display/intel_tv.h" 56 #include "display/intel_vdsc.h" 57 58 #include "gt/intel_rps.h" 59 60 #include "i915_drv.h" 61 #include "i915_trace.h" 62 #include "intel_acpi.h" 63 #include "intel_atomic.h" 64 #include "intel_atomic_plane.h" 65 #include "intel_bw.h" 66 #include "intel_cdclk.h" 67 #include "intel_color.h" 68 #include "intel_display_types.h" 69 #include "intel_dp_link_training.h" 70 #include "intel_fbc.h" 71 #include "intel_fbdev.h" 72 #include "intel_fifo_underrun.h" 73 #include "intel_frontbuffer.h" 74 #include "intel_hdcp.h" 75 #include "intel_hotplug.h" 76 #include "intel_overlay.h" 77 #include "intel_pipe_crc.h" 78 #include "intel_pm.h" 79 #include "intel_psr.h" 80 #include "intel_quirks.h" 81 #include "intel_sideband.h" 82 #include "intel_sprite.h" 83 #include "intel_tc.h" 84 #include "intel_vga.h" 85 86 /* Primary plane formats for gen <= 3 */ 87 static const u32 i8xx_primary_formats[] = { 88 DRM_FORMAT_C8, 89 DRM_FORMAT_XRGB1555, 90 DRM_FORMAT_RGB565, 91 DRM_FORMAT_XRGB8888, 92 }; 93 94 /* Primary plane formats for ivb (no fp16 due to hw issue) */ 95 static const u32 ivb_primary_formats[] = { 96 DRM_FORMAT_C8, 97 DRM_FORMAT_RGB565, 98 DRM_FORMAT_XRGB8888, 99 DRM_FORMAT_XBGR8888, 100 DRM_FORMAT_XRGB2101010, 101 DRM_FORMAT_XBGR2101010, 102 }; 103 104 /* Primary plane formats for gen >= 4, except ivb */ 105 static const u32 i965_primary_formats[] = { 106 DRM_FORMAT_C8, 107 DRM_FORMAT_RGB565, 108 DRM_FORMAT_XRGB8888, 109 DRM_FORMAT_XBGR8888, 110 DRM_FORMAT_XRGB2101010, 111 DRM_FORMAT_XBGR2101010, 112 DRM_FORMAT_XBGR16161616F, 113 }; 114 115 /* Primary plane formats for vlv/chv */ 116 static const u32 vlv_primary_formats[] = { 117 DRM_FORMAT_C8, 118 DRM_FORMAT_RGB565, 119 DRM_FORMAT_XRGB8888, 120 DRM_FORMAT_XBGR8888, 121 DRM_FORMAT_ARGB8888, 122 DRM_FORMAT_ABGR8888, 123 DRM_FORMAT_XRGB2101010, 124 DRM_FORMAT_XBGR2101010, 125 DRM_FORMAT_ARGB2101010, 126 DRM_FORMAT_ABGR2101010, 127 DRM_FORMAT_XBGR16161616F, 128 }; 129 130 static const u64 i9xx_format_modifiers[] = { 131 I915_FORMAT_MOD_X_TILED, 132 DRM_FORMAT_MOD_LINEAR, 133 DRM_FORMAT_MOD_INVALID 134 }; 135 136 /* Cursor formats */ 137 static const u32 intel_cursor_formats[] = { 138 DRM_FORMAT_ARGB8888, 139 }; 140 141 static const u64 cursor_format_modifiers[] = { 142 DRM_FORMAT_MOD_LINEAR, 143 DRM_FORMAT_MOD_INVALID 144 }; 145 146 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 147 struct intel_crtc_state *pipe_config); 148 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 149 struct intel_crtc_state *pipe_config); 150 151 static int intel_framebuffer_init(struct intel_framebuffer *ifb, 152 struct drm_i915_gem_object *obj, 153 struct drm_mode_fb_cmd2 *mode_cmd); 154 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state); 155 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 156 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 157 const struct intel_link_m_n *m_n, 158 const struct intel_link_m_n *m2_n2); 159 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); 160 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state); 161 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state); 162 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); 163 static void vlv_prepare_pll(struct intel_crtc *crtc, 164 const struct intel_crtc_state *pipe_config); 165 static void chv_prepare_pll(struct intel_crtc *crtc, 166 const struct intel_crtc_state *pipe_config); 167 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state); 168 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state); 169 static void intel_modeset_setup_hw_state(struct drm_device *dev, 170 struct drm_modeset_acquire_ctx *ctx); 171 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc); 172 173 struct intel_limit { 174 struct { 175 int min, max; 176 } dot, vco, n, m, m1, m2, p, p1; 177 178 struct { 179 int dot_limit; 180 int p2_slow, p2_fast; 181 } p2; 182 }; 183 184 /* returns HPLL frequency in kHz */ 185 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 186 { 187 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 188 189 /* Obtain SKU information */ 190 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 191 CCK_FUSE_HPLL_FREQ_MASK; 192 193 return vco_freq[hpll_freq] * 1000; 194 } 195 196 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 197 const char *name, u32 reg, int ref_freq) 198 { 199 u32 val; 200 int divider; 201 202 val = vlv_cck_read(dev_priv, reg); 203 divider = val & CCK_FREQUENCY_VALUES; 204 205 WARN((val & CCK_FREQUENCY_STATUS) != 206 (divider << CCK_FREQUENCY_STATUS_SHIFT), 207 "%s change in progress\n", name); 208 209 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 210 } 211 212 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 213 const char *name, u32 reg) 214 { 215 int hpll; 216 217 vlv_cck_get(dev_priv); 218 219 if (dev_priv->hpll_freq == 0) 220 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 221 222 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); 223 224 vlv_cck_put(dev_priv); 225 226 return hpll; 227 } 228 229 static void intel_update_czclk(struct drm_i915_private *dev_priv) 230 { 231 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 232 return; 233 234 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 235 CCK_CZ_CLOCK_CONTROL); 236 237 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq); 238 } 239 240 static inline u32 /* units of 100MHz */ 241 intel_fdi_link_freq(struct drm_i915_private *dev_priv, 242 const struct intel_crtc_state *pipe_config) 243 { 244 if (HAS_DDI(dev_priv)) 245 return pipe_config->port_clock; /* SPLL */ 246 else 247 return dev_priv->fdi_pll_freq; 248 } 249 250 static const struct intel_limit intel_limits_i8xx_dac = { 251 .dot = { .min = 25000, .max = 350000 }, 252 .vco = { .min = 908000, .max = 1512000 }, 253 .n = { .min = 2, .max = 16 }, 254 .m = { .min = 96, .max = 140 }, 255 .m1 = { .min = 18, .max = 26 }, 256 .m2 = { .min = 6, .max = 16 }, 257 .p = { .min = 4, .max = 128 }, 258 .p1 = { .min = 2, .max = 33 }, 259 .p2 = { .dot_limit = 165000, 260 .p2_slow = 4, .p2_fast = 2 }, 261 }; 262 263 static const struct intel_limit intel_limits_i8xx_dvo = { 264 .dot = { .min = 25000, .max = 350000 }, 265 .vco = { .min = 908000, .max = 1512000 }, 266 .n = { .min = 2, .max = 16 }, 267 .m = { .min = 96, .max = 140 }, 268 .m1 = { .min = 18, .max = 26 }, 269 .m2 = { .min = 6, .max = 16 }, 270 .p = { .min = 4, .max = 128 }, 271 .p1 = { .min = 2, .max = 33 }, 272 .p2 = { .dot_limit = 165000, 273 .p2_slow = 4, .p2_fast = 4 }, 274 }; 275 276 static const struct intel_limit intel_limits_i8xx_lvds = { 277 .dot = { .min = 25000, .max = 350000 }, 278 .vco = { .min = 908000, .max = 1512000 }, 279 .n = { .min = 2, .max = 16 }, 280 .m = { .min = 96, .max = 140 }, 281 .m1 = { .min = 18, .max = 26 }, 282 .m2 = { .min = 6, .max = 16 }, 283 .p = { .min = 4, .max = 128 }, 284 .p1 = { .min = 1, .max = 6 }, 285 .p2 = { .dot_limit = 165000, 286 .p2_slow = 14, .p2_fast = 7 }, 287 }; 288 289 static const struct intel_limit intel_limits_i9xx_sdvo = { 290 .dot = { .min = 20000, .max = 400000 }, 291 .vco = { .min = 1400000, .max = 2800000 }, 292 .n = { .min = 1, .max = 6 }, 293 .m = { .min = 70, .max = 120 }, 294 .m1 = { .min = 8, .max = 18 }, 295 .m2 = { .min = 3, .max = 7 }, 296 .p = { .min = 5, .max = 80 }, 297 .p1 = { .min = 1, .max = 8 }, 298 .p2 = { .dot_limit = 200000, 299 .p2_slow = 10, .p2_fast = 5 }, 300 }; 301 302 static const struct intel_limit intel_limits_i9xx_lvds = { 303 .dot = { .min = 20000, .max = 400000 }, 304 .vco = { .min = 1400000, .max = 2800000 }, 305 .n = { .min = 1, .max = 6 }, 306 .m = { .min = 70, .max = 120 }, 307 .m1 = { .min = 8, .max = 18 }, 308 .m2 = { .min = 3, .max = 7 }, 309 .p = { .min = 7, .max = 98 }, 310 .p1 = { .min = 1, .max = 8 }, 311 .p2 = { .dot_limit = 112000, 312 .p2_slow = 14, .p2_fast = 7 }, 313 }; 314 315 316 static const struct intel_limit intel_limits_g4x_sdvo = { 317 .dot = { .min = 25000, .max = 270000 }, 318 .vco = { .min = 1750000, .max = 3500000}, 319 .n = { .min = 1, .max = 4 }, 320 .m = { .min = 104, .max = 138 }, 321 .m1 = { .min = 17, .max = 23 }, 322 .m2 = { .min = 5, .max = 11 }, 323 .p = { .min = 10, .max = 30 }, 324 .p1 = { .min = 1, .max = 3}, 325 .p2 = { .dot_limit = 270000, 326 .p2_slow = 10, 327 .p2_fast = 10 328 }, 329 }; 330 331 static const struct intel_limit intel_limits_g4x_hdmi = { 332 .dot = { .min = 22000, .max = 400000 }, 333 .vco = { .min = 1750000, .max = 3500000}, 334 .n = { .min = 1, .max = 4 }, 335 .m = { .min = 104, .max = 138 }, 336 .m1 = { .min = 16, .max = 23 }, 337 .m2 = { .min = 5, .max = 11 }, 338 .p = { .min = 5, .max = 80 }, 339 .p1 = { .min = 1, .max = 8}, 340 .p2 = { .dot_limit = 165000, 341 .p2_slow = 10, .p2_fast = 5 }, 342 }; 343 344 static const struct intel_limit intel_limits_g4x_single_channel_lvds = { 345 .dot = { .min = 20000, .max = 115000 }, 346 .vco = { .min = 1750000, .max = 3500000 }, 347 .n = { .min = 1, .max = 3 }, 348 .m = { .min = 104, .max = 138 }, 349 .m1 = { .min = 17, .max = 23 }, 350 .m2 = { .min = 5, .max = 11 }, 351 .p = { .min = 28, .max = 112 }, 352 .p1 = { .min = 2, .max = 8 }, 353 .p2 = { .dot_limit = 0, 354 .p2_slow = 14, .p2_fast = 14 355 }, 356 }; 357 358 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { 359 .dot = { .min = 80000, .max = 224000 }, 360 .vco = { .min = 1750000, .max = 3500000 }, 361 .n = { .min = 1, .max = 3 }, 362 .m = { .min = 104, .max = 138 }, 363 .m1 = { .min = 17, .max = 23 }, 364 .m2 = { .min = 5, .max = 11 }, 365 .p = { .min = 14, .max = 42 }, 366 .p1 = { .min = 2, .max = 6 }, 367 .p2 = { .dot_limit = 0, 368 .p2_slow = 7, .p2_fast = 7 369 }, 370 }; 371 372 static const struct intel_limit intel_limits_pineview_sdvo = { 373 .dot = { .min = 20000, .max = 400000}, 374 .vco = { .min = 1700000, .max = 3500000 }, 375 /* Pineview's Ncounter is a ring counter */ 376 .n = { .min = 3, .max = 6 }, 377 .m = { .min = 2, .max = 256 }, 378 /* Pineview only has one combined m divider, which we treat as m2. */ 379 .m1 = { .min = 0, .max = 0 }, 380 .m2 = { .min = 0, .max = 254 }, 381 .p = { .min = 5, .max = 80 }, 382 .p1 = { .min = 1, .max = 8 }, 383 .p2 = { .dot_limit = 200000, 384 .p2_slow = 10, .p2_fast = 5 }, 385 }; 386 387 static const struct intel_limit intel_limits_pineview_lvds = { 388 .dot = { .min = 20000, .max = 400000 }, 389 .vco = { .min = 1700000, .max = 3500000 }, 390 .n = { .min = 3, .max = 6 }, 391 .m = { .min = 2, .max = 256 }, 392 .m1 = { .min = 0, .max = 0 }, 393 .m2 = { .min = 0, .max = 254 }, 394 .p = { .min = 7, .max = 112 }, 395 .p1 = { .min = 1, .max = 8 }, 396 .p2 = { .dot_limit = 112000, 397 .p2_slow = 14, .p2_fast = 14 }, 398 }; 399 400 /* Ironlake / Sandybridge 401 * 402 * We calculate clock using (register_value + 2) for N/M1/M2, so here 403 * the range value for them is (actual_value - 2). 404 */ 405 static const struct intel_limit intel_limits_ironlake_dac = { 406 .dot = { .min = 25000, .max = 350000 }, 407 .vco = { .min = 1760000, .max = 3510000 }, 408 .n = { .min = 1, .max = 5 }, 409 .m = { .min = 79, .max = 127 }, 410 .m1 = { .min = 12, .max = 22 }, 411 .m2 = { .min = 5, .max = 9 }, 412 .p = { .min = 5, .max = 80 }, 413 .p1 = { .min = 1, .max = 8 }, 414 .p2 = { .dot_limit = 225000, 415 .p2_slow = 10, .p2_fast = 5 }, 416 }; 417 418 static const struct intel_limit intel_limits_ironlake_single_lvds = { 419 .dot = { .min = 25000, .max = 350000 }, 420 .vco = { .min = 1760000, .max = 3510000 }, 421 .n = { .min = 1, .max = 3 }, 422 .m = { .min = 79, .max = 118 }, 423 .m1 = { .min = 12, .max = 22 }, 424 .m2 = { .min = 5, .max = 9 }, 425 .p = { .min = 28, .max = 112 }, 426 .p1 = { .min = 2, .max = 8 }, 427 .p2 = { .dot_limit = 225000, 428 .p2_slow = 14, .p2_fast = 14 }, 429 }; 430 431 static const struct intel_limit intel_limits_ironlake_dual_lvds = { 432 .dot = { .min = 25000, .max = 350000 }, 433 .vco = { .min = 1760000, .max = 3510000 }, 434 .n = { .min = 1, .max = 3 }, 435 .m = { .min = 79, .max = 127 }, 436 .m1 = { .min = 12, .max = 22 }, 437 .m2 = { .min = 5, .max = 9 }, 438 .p = { .min = 14, .max = 56 }, 439 .p1 = { .min = 2, .max = 8 }, 440 .p2 = { .dot_limit = 225000, 441 .p2_slow = 7, .p2_fast = 7 }, 442 }; 443 444 /* LVDS 100mhz refclk limits. */ 445 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = { 446 .dot = { .min = 25000, .max = 350000 }, 447 .vco = { .min = 1760000, .max = 3510000 }, 448 .n = { .min = 1, .max = 2 }, 449 .m = { .min = 79, .max = 126 }, 450 .m1 = { .min = 12, .max = 22 }, 451 .m2 = { .min = 5, .max = 9 }, 452 .p = { .min = 28, .max = 112 }, 453 .p1 = { .min = 2, .max = 8 }, 454 .p2 = { .dot_limit = 225000, 455 .p2_slow = 14, .p2_fast = 14 }, 456 }; 457 458 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = { 459 .dot = { .min = 25000, .max = 350000 }, 460 .vco = { .min = 1760000, .max = 3510000 }, 461 .n = { .min = 1, .max = 3 }, 462 .m = { .min = 79, .max = 126 }, 463 .m1 = { .min = 12, .max = 22 }, 464 .m2 = { .min = 5, .max = 9 }, 465 .p = { .min = 14, .max = 42 }, 466 .p1 = { .min = 2, .max = 6 }, 467 .p2 = { .dot_limit = 225000, 468 .p2_slow = 7, .p2_fast = 7 }, 469 }; 470 471 static const struct intel_limit intel_limits_vlv = { 472 /* 473 * These are the data rate limits (measured in fast clocks) 474 * since those are the strictest limits we have. The fast 475 * clock and actual rate limits are more relaxed, so checking 476 * them would make no difference. 477 */ 478 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 479 .vco = { .min = 4000000, .max = 6000000 }, 480 .n = { .min = 1, .max = 7 }, 481 .m1 = { .min = 2, .max = 3 }, 482 .m2 = { .min = 11, .max = 156 }, 483 .p1 = { .min = 2, .max = 3 }, 484 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 485 }; 486 487 static const struct intel_limit intel_limits_chv = { 488 /* 489 * These are the data rate limits (measured in fast clocks) 490 * since those are the strictest limits we have. The fast 491 * clock and actual rate limits are more relaxed, so checking 492 * them would make no difference. 493 */ 494 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 495 .vco = { .min = 4800000, .max = 6480000 }, 496 .n = { .min = 1, .max = 1 }, 497 .m1 = { .min = 2, .max = 2 }, 498 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 499 .p1 = { .min = 2, .max = 4 }, 500 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 501 }; 502 503 static const struct intel_limit intel_limits_bxt = { 504 /* FIXME: find real dot limits */ 505 .dot = { .min = 0, .max = INT_MAX }, 506 .vco = { .min = 4800000, .max = 6700000 }, 507 .n = { .min = 1, .max = 1 }, 508 .m1 = { .min = 2, .max = 2 }, 509 /* FIXME: find real m2 limits */ 510 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 511 .p1 = { .min = 2, .max = 4 }, 512 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 513 }; 514 515 /* WA Display #0827: Gen9:all */ 516 static void 517 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) 518 { 519 if (enable) 520 I915_WRITE(CLKGATE_DIS_PSL(pipe), 521 I915_READ(CLKGATE_DIS_PSL(pipe)) | 522 DUPS1_GATING_DIS | DUPS2_GATING_DIS); 523 else 524 I915_WRITE(CLKGATE_DIS_PSL(pipe), 525 I915_READ(CLKGATE_DIS_PSL(pipe)) & 526 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS)); 527 } 528 529 /* Wa_2006604312:icl */ 530 static void 531 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 532 bool enable) 533 { 534 if (enable) 535 I915_WRITE(CLKGATE_DIS_PSL(pipe), 536 I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS); 537 else 538 I915_WRITE(CLKGATE_DIS_PSL(pipe), 539 I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS); 540 } 541 542 static bool 543 needs_modeset(const struct intel_crtc_state *state) 544 { 545 return drm_atomic_crtc_needs_modeset(&state->uapi); 546 } 547 548 bool 549 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) 550 { 551 return (crtc_state->master_transcoder != INVALID_TRANSCODER || 552 crtc_state->sync_mode_slaves_mask); 553 } 554 555 static bool 556 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) 557 { 558 return (crtc_state->master_transcoder == INVALID_TRANSCODER && 559 crtc_state->sync_mode_slaves_mask); 560 } 561 562 static bool 563 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) 564 { 565 return crtc_state->master_transcoder != INVALID_TRANSCODER; 566 } 567 568 /* 569 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 570 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 571 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 572 * The helpers' return value is the rate of the clock that is fed to the 573 * display engine's pipe which can be the above fast dot clock rate or a 574 * divided-down version of it. 575 */ 576 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 577 static int pnv_calc_dpll_params(int refclk, struct dpll *clock) 578 { 579 clock->m = clock->m2 + 2; 580 clock->p = clock->p1 * clock->p2; 581 if (WARN_ON(clock->n == 0 || clock->p == 0)) 582 return 0; 583 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 584 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 585 586 return clock->dot; 587 } 588 589 static u32 i9xx_dpll_compute_m(struct dpll *dpll) 590 { 591 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 592 } 593 594 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock) 595 { 596 clock->m = i9xx_dpll_compute_m(clock); 597 clock->p = clock->p1 * clock->p2; 598 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 599 return 0; 600 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 601 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 602 603 return clock->dot; 604 } 605 606 static int vlv_calc_dpll_params(int refclk, struct dpll *clock) 607 { 608 clock->m = clock->m1 * clock->m2; 609 clock->p = clock->p1 * clock->p2; 610 if (WARN_ON(clock->n == 0 || clock->p == 0)) 611 return 0; 612 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 613 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 614 615 return clock->dot / 5; 616 } 617 618 int chv_calc_dpll_params(int refclk, struct dpll *clock) 619 { 620 clock->m = clock->m1 * clock->m2; 621 clock->p = clock->p1 * clock->p2; 622 if (WARN_ON(clock->n == 0 || clock->p == 0)) 623 return 0; 624 clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), 625 clock->n << 22); 626 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 627 628 return clock->dot / 5; 629 } 630 631 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 632 633 /* 634 * Returns whether the given set of divisors are valid for a given refclk with 635 * the given connectors. 636 */ 637 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv, 638 const struct intel_limit *limit, 639 const struct dpll *clock) 640 { 641 if (clock->n < limit->n.min || limit->n.max < clock->n) 642 INTELPllInvalid("n out of range\n"); 643 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 644 INTELPllInvalid("p1 out of range\n"); 645 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 646 INTELPllInvalid("m2 out of range\n"); 647 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 648 INTELPllInvalid("m1 out of range\n"); 649 650 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) && 651 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv)) 652 if (clock->m1 <= clock->m2) 653 INTELPllInvalid("m1 <= m2\n"); 654 655 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 656 !IS_GEN9_LP(dev_priv)) { 657 if (clock->p < limit->p.min || limit->p.max < clock->p) 658 INTELPllInvalid("p out of range\n"); 659 if (clock->m < limit->m.min || limit->m.max < clock->m) 660 INTELPllInvalid("m out of range\n"); 661 } 662 663 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 664 INTELPllInvalid("vco out of range\n"); 665 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 666 * connector, etc., rather than just a single range. 667 */ 668 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 669 INTELPllInvalid("dot out of range\n"); 670 671 return true; 672 } 673 674 static int 675 i9xx_select_p2_div(const struct intel_limit *limit, 676 const struct intel_crtc_state *crtc_state, 677 int target) 678 { 679 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 680 681 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 682 /* 683 * For LVDS just rely on its current settings for dual-channel. 684 * We haven't figured out how to reliably set up different 685 * single/dual channel state, if we even can. 686 */ 687 if (intel_is_dual_link_lvds(dev_priv)) 688 return limit->p2.p2_fast; 689 else 690 return limit->p2.p2_slow; 691 } else { 692 if (target < limit->p2.dot_limit) 693 return limit->p2.p2_slow; 694 else 695 return limit->p2.p2_fast; 696 } 697 } 698 699 /* 700 * Returns a set of divisors for the desired target clock with the given 701 * refclk, or FALSE. The returned values represent the clock equation: 702 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 703 * 704 * Target and reference clocks are specified in kHz. 705 * 706 * If match_clock is provided, then best_clock P divider must match the P 707 * divider from @match_clock used for LVDS downclocking. 708 */ 709 static bool 710 i9xx_find_best_dpll(const struct intel_limit *limit, 711 struct intel_crtc_state *crtc_state, 712 int target, int refclk, struct dpll *match_clock, 713 struct dpll *best_clock) 714 { 715 struct drm_device *dev = crtc_state->uapi.crtc->dev; 716 struct dpll clock; 717 int err = target; 718 719 memset(best_clock, 0, sizeof(*best_clock)); 720 721 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 722 723 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 724 clock.m1++) { 725 for (clock.m2 = limit->m2.min; 726 clock.m2 <= limit->m2.max; clock.m2++) { 727 if (clock.m2 >= clock.m1) 728 break; 729 for (clock.n = limit->n.min; 730 clock.n <= limit->n.max; clock.n++) { 731 for (clock.p1 = limit->p1.min; 732 clock.p1 <= limit->p1.max; clock.p1++) { 733 int this_err; 734 735 i9xx_calc_dpll_params(refclk, &clock); 736 if (!intel_PLL_is_valid(to_i915(dev), 737 limit, 738 &clock)) 739 continue; 740 if (match_clock && 741 clock.p != match_clock->p) 742 continue; 743 744 this_err = abs(clock.dot - target); 745 if (this_err < err) { 746 *best_clock = clock; 747 err = this_err; 748 } 749 } 750 } 751 } 752 } 753 754 return (err != target); 755 } 756 757 /* 758 * Returns a set of divisors for the desired target clock with the given 759 * refclk, or FALSE. The returned values represent the clock equation: 760 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 761 * 762 * Target and reference clocks are specified in kHz. 763 * 764 * If match_clock is provided, then best_clock P divider must match the P 765 * divider from @match_clock used for LVDS downclocking. 766 */ 767 static bool 768 pnv_find_best_dpll(const struct intel_limit *limit, 769 struct intel_crtc_state *crtc_state, 770 int target, int refclk, struct dpll *match_clock, 771 struct dpll *best_clock) 772 { 773 struct drm_device *dev = crtc_state->uapi.crtc->dev; 774 struct dpll clock; 775 int err = target; 776 777 memset(best_clock, 0, sizeof(*best_clock)); 778 779 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 780 781 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 782 clock.m1++) { 783 for (clock.m2 = limit->m2.min; 784 clock.m2 <= limit->m2.max; clock.m2++) { 785 for (clock.n = limit->n.min; 786 clock.n <= limit->n.max; clock.n++) { 787 for (clock.p1 = limit->p1.min; 788 clock.p1 <= limit->p1.max; clock.p1++) { 789 int this_err; 790 791 pnv_calc_dpll_params(refclk, &clock); 792 if (!intel_PLL_is_valid(to_i915(dev), 793 limit, 794 &clock)) 795 continue; 796 if (match_clock && 797 clock.p != match_clock->p) 798 continue; 799 800 this_err = abs(clock.dot - target); 801 if (this_err < err) { 802 *best_clock = clock; 803 err = this_err; 804 } 805 } 806 } 807 } 808 } 809 810 return (err != target); 811 } 812 813 /* 814 * Returns a set of divisors for the desired target clock with the given 815 * refclk, or FALSE. The returned values represent the clock equation: 816 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 817 * 818 * Target and reference clocks are specified in kHz. 819 * 820 * If match_clock is provided, then best_clock P divider must match the P 821 * divider from @match_clock used for LVDS downclocking. 822 */ 823 static bool 824 g4x_find_best_dpll(const struct intel_limit *limit, 825 struct intel_crtc_state *crtc_state, 826 int target, int refclk, struct dpll *match_clock, 827 struct dpll *best_clock) 828 { 829 struct drm_device *dev = crtc_state->uapi.crtc->dev; 830 struct dpll clock; 831 int max_n; 832 bool found = false; 833 /* approximately equals target * 0.00585 */ 834 int err_most = (target >> 8) + (target >> 9); 835 836 memset(best_clock, 0, sizeof(*best_clock)); 837 838 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 839 840 max_n = limit->n.max; 841 /* based on hardware requirement, prefer smaller n to precision */ 842 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 843 /* based on hardware requirement, prefere larger m1,m2 */ 844 for (clock.m1 = limit->m1.max; 845 clock.m1 >= limit->m1.min; clock.m1--) { 846 for (clock.m2 = limit->m2.max; 847 clock.m2 >= limit->m2.min; clock.m2--) { 848 for (clock.p1 = limit->p1.max; 849 clock.p1 >= limit->p1.min; clock.p1--) { 850 int this_err; 851 852 i9xx_calc_dpll_params(refclk, &clock); 853 if (!intel_PLL_is_valid(to_i915(dev), 854 limit, 855 &clock)) 856 continue; 857 858 this_err = abs(clock.dot - target); 859 if (this_err < err_most) { 860 *best_clock = clock; 861 err_most = this_err; 862 max_n = clock.n; 863 found = true; 864 } 865 } 866 } 867 } 868 } 869 return found; 870 } 871 872 /* 873 * Check if the calculated PLL configuration is more optimal compared to the 874 * best configuration and error found so far. Return the calculated error. 875 */ 876 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 877 const struct dpll *calculated_clock, 878 const struct dpll *best_clock, 879 unsigned int best_error_ppm, 880 unsigned int *error_ppm) 881 { 882 /* 883 * For CHV ignore the error and consider only the P value. 884 * Prefer a bigger P value based on HW requirements. 885 */ 886 if (IS_CHERRYVIEW(to_i915(dev))) { 887 *error_ppm = 0; 888 889 return calculated_clock->p > best_clock->p; 890 } 891 892 if (WARN_ON_ONCE(!target_freq)) 893 return false; 894 895 *error_ppm = div_u64(1000000ULL * 896 abs(target_freq - calculated_clock->dot), 897 target_freq); 898 /* 899 * Prefer a better P value over a better (smaller) error if the error 900 * is small. Ensure this preference for future configurations too by 901 * setting the error to 0. 902 */ 903 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 904 *error_ppm = 0; 905 906 return true; 907 } 908 909 return *error_ppm + 10 < best_error_ppm; 910 } 911 912 /* 913 * Returns a set of divisors for the desired target clock with the given 914 * refclk, or FALSE. The returned values represent the clock equation: 915 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 916 */ 917 static bool 918 vlv_find_best_dpll(const struct intel_limit *limit, 919 struct intel_crtc_state *crtc_state, 920 int target, int refclk, struct dpll *match_clock, 921 struct dpll *best_clock) 922 { 923 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 924 struct drm_device *dev = crtc->base.dev; 925 struct dpll clock; 926 unsigned int bestppm = 1000000; 927 /* min update 19.2 MHz */ 928 int max_n = min(limit->n.max, refclk / 19200); 929 bool found = false; 930 931 target *= 5; /* fast clock */ 932 933 memset(best_clock, 0, sizeof(*best_clock)); 934 935 /* based on hardware requirement, prefer smaller n to precision */ 936 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 937 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 938 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 939 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 940 clock.p = clock.p1 * clock.p2; 941 /* based on hardware requirement, prefer bigger m1,m2 values */ 942 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 943 unsigned int ppm; 944 945 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 946 refclk * clock.m1); 947 948 vlv_calc_dpll_params(refclk, &clock); 949 950 if (!intel_PLL_is_valid(to_i915(dev), 951 limit, 952 &clock)) 953 continue; 954 955 if (!vlv_PLL_is_optimal(dev, target, 956 &clock, 957 best_clock, 958 bestppm, &ppm)) 959 continue; 960 961 *best_clock = clock; 962 bestppm = ppm; 963 found = true; 964 } 965 } 966 } 967 } 968 969 return found; 970 } 971 972 /* 973 * Returns a set of divisors for the desired target clock with the given 974 * refclk, or FALSE. The returned values represent the clock equation: 975 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 976 */ 977 static bool 978 chv_find_best_dpll(const struct intel_limit *limit, 979 struct intel_crtc_state *crtc_state, 980 int target, int refclk, struct dpll *match_clock, 981 struct dpll *best_clock) 982 { 983 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 984 struct drm_device *dev = crtc->base.dev; 985 unsigned int best_error_ppm; 986 struct dpll clock; 987 u64 m2; 988 int found = false; 989 990 memset(best_clock, 0, sizeof(*best_clock)); 991 best_error_ppm = 1000000; 992 993 /* 994 * Based on hardware doc, the n always set to 1, and m1 always 995 * set to 2. If requires to support 200Mhz refclk, we need to 996 * revisit this because n may not 1 anymore. 997 */ 998 clock.n = 1, clock.m1 = 2; 999 target *= 5; /* fast clock */ 1000 1001 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 1002 for (clock.p2 = limit->p2.p2_fast; 1003 clock.p2 >= limit->p2.p2_slow; 1004 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 1005 unsigned int error_ppm; 1006 1007 clock.p = clock.p1 * clock.p2; 1008 1009 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22, 1010 refclk * clock.m1); 1011 1012 if (m2 > INT_MAX/clock.m1) 1013 continue; 1014 1015 clock.m2 = m2; 1016 1017 chv_calc_dpll_params(refclk, &clock); 1018 1019 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock)) 1020 continue; 1021 1022 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 1023 best_error_ppm, &error_ppm)) 1024 continue; 1025 1026 *best_clock = clock; 1027 best_error_ppm = error_ppm; 1028 found = true; 1029 } 1030 } 1031 1032 return found; 1033 } 1034 1035 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, 1036 struct dpll *best_clock) 1037 { 1038 int refclk = 100000; 1039 const struct intel_limit *limit = &intel_limits_bxt; 1040 1041 return chv_find_best_dpll(limit, crtc_state, 1042 crtc_state->port_clock, refclk, 1043 NULL, best_clock); 1044 } 1045 1046 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, 1047 enum pipe pipe) 1048 { 1049 i915_reg_t reg = PIPEDSL(pipe); 1050 u32 line1, line2; 1051 u32 line_mask; 1052 1053 if (IS_GEN(dev_priv, 2)) 1054 line_mask = DSL_LINEMASK_GEN2; 1055 else 1056 line_mask = DSL_LINEMASK_GEN3; 1057 1058 line1 = I915_READ(reg) & line_mask; 1059 msleep(5); 1060 line2 = I915_READ(reg) & line_mask; 1061 1062 return line1 != line2; 1063 } 1064 1065 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state) 1066 { 1067 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1068 enum pipe pipe = crtc->pipe; 1069 1070 /* Wait for the display line to settle/start moving */ 1071 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100)) 1072 DRM_ERROR("pipe %c scanline %s wait timed out\n", 1073 pipe_name(pipe), onoff(state)); 1074 } 1075 1076 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc) 1077 { 1078 wait_for_pipe_scanline_moving(crtc, false); 1079 } 1080 1081 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) 1082 { 1083 wait_for_pipe_scanline_moving(crtc, true); 1084 } 1085 1086 static void 1087 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 1088 { 1089 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1090 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1091 1092 if (INTEL_GEN(dev_priv) >= 4) { 1093 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 1094 i915_reg_t reg = PIPECONF(cpu_transcoder); 1095 1096 /* Wait for the Pipe State to go off */ 1097 if (intel_de_wait_for_clear(dev_priv, reg, 1098 I965_PIPECONF_ACTIVE, 100)) 1099 WARN(1, "pipe_off wait timed out\n"); 1100 } else { 1101 intel_wait_for_pipe_scanline_stopped(crtc); 1102 } 1103 } 1104 1105 /* Only for pre-ILK configs */ 1106 void assert_pll(struct drm_i915_private *dev_priv, 1107 enum pipe pipe, bool state) 1108 { 1109 u32 val; 1110 bool cur_state; 1111 1112 val = I915_READ(DPLL(pipe)); 1113 cur_state = !!(val & DPLL_VCO_ENABLE); 1114 I915_STATE_WARN(cur_state != state, 1115 "PLL state assertion failure (expected %s, current %s)\n", 1116 onoff(state), onoff(cur_state)); 1117 } 1118 1119 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1120 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1121 { 1122 u32 val; 1123 bool cur_state; 1124 1125 vlv_cck_get(dev_priv); 1126 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1127 vlv_cck_put(dev_priv); 1128 1129 cur_state = val & DSI_PLL_VCO_EN; 1130 I915_STATE_WARN(cur_state != state, 1131 "DSI PLL state assertion failure (expected %s, current %s)\n", 1132 onoff(state), onoff(cur_state)); 1133 } 1134 1135 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1136 enum pipe pipe, bool state) 1137 { 1138 bool cur_state; 1139 1140 if (HAS_DDI(dev_priv)) { 1141 /* 1142 * DDI does not have a specific FDI_TX register. 1143 * 1144 * FDI is never fed from EDP transcoder 1145 * so pipe->transcoder cast is fine here. 1146 */ 1147 enum transcoder cpu_transcoder = (enum transcoder)pipe; 1148 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1149 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1150 } else { 1151 u32 val = I915_READ(FDI_TX_CTL(pipe)); 1152 cur_state = !!(val & FDI_TX_ENABLE); 1153 } 1154 I915_STATE_WARN(cur_state != state, 1155 "FDI TX state assertion failure (expected %s, current %s)\n", 1156 onoff(state), onoff(cur_state)); 1157 } 1158 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1159 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1160 1161 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1162 enum pipe pipe, bool state) 1163 { 1164 u32 val; 1165 bool cur_state; 1166 1167 val = I915_READ(FDI_RX_CTL(pipe)); 1168 cur_state = !!(val & FDI_RX_ENABLE); 1169 I915_STATE_WARN(cur_state != state, 1170 "FDI RX state assertion failure (expected %s, current %s)\n", 1171 onoff(state), onoff(cur_state)); 1172 } 1173 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1174 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1175 1176 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1177 enum pipe pipe) 1178 { 1179 u32 val; 1180 1181 /* ILK FDI PLL is always enabled */ 1182 if (IS_GEN(dev_priv, 5)) 1183 return; 1184 1185 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1186 if (HAS_DDI(dev_priv)) 1187 return; 1188 1189 val = I915_READ(FDI_TX_CTL(pipe)); 1190 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1191 } 1192 1193 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1194 enum pipe pipe, bool state) 1195 { 1196 u32 val; 1197 bool cur_state; 1198 1199 val = I915_READ(FDI_RX_CTL(pipe)); 1200 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1201 I915_STATE_WARN(cur_state != state, 1202 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1203 onoff(state), onoff(cur_state)); 1204 } 1205 1206 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) 1207 { 1208 i915_reg_t pp_reg; 1209 u32 val; 1210 enum pipe panel_pipe = INVALID_PIPE; 1211 bool locked = true; 1212 1213 if (WARN_ON(HAS_DDI(dev_priv))) 1214 return; 1215 1216 if (HAS_PCH_SPLIT(dev_priv)) { 1217 u32 port_sel; 1218 1219 pp_reg = PP_CONTROL(0); 1220 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1221 1222 switch (port_sel) { 1223 case PANEL_PORT_SELECT_LVDS: 1224 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe); 1225 break; 1226 case PANEL_PORT_SELECT_DPA: 1227 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe); 1228 break; 1229 case PANEL_PORT_SELECT_DPC: 1230 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe); 1231 break; 1232 case PANEL_PORT_SELECT_DPD: 1233 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe); 1234 break; 1235 default: 1236 MISSING_CASE(port_sel); 1237 break; 1238 } 1239 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1240 /* presumably write lock depends on pipe, not port select */ 1241 pp_reg = PP_CONTROL(pipe); 1242 panel_pipe = pipe; 1243 } else { 1244 u32 port_sel; 1245 1246 pp_reg = PP_CONTROL(0); 1247 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1248 1249 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS); 1250 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe); 1251 } 1252 1253 val = I915_READ(pp_reg); 1254 if (!(val & PANEL_POWER_ON) || 1255 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1256 locked = false; 1257 1258 I915_STATE_WARN(panel_pipe == pipe && locked, 1259 "panel assertion failure, pipe %c regs locked\n", 1260 pipe_name(pipe)); 1261 } 1262 1263 void assert_pipe(struct drm_i915_private *dev_priv, 1264 enum transcoder cpu_transcoder, bool state) 1265 { 1266 bool cur_state; 1267 enum intel_display_power_domain power_domain; 1268 intel_wakeref_t wakeref; 1269 1270 /* we keep both pipes enabled on 830 */ 1271 if (IS_I830(dev_priv)) 1272 state = true; 1273 1274 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 1275 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 1276 if (wakeref) { 1277 u32 val = I915_READ(PIPECONF(cpu_transcoder)); 1278 cur_state = !!(val & PIPECONF_ENABLE); 1279 1280 intel_display_power_put(dev_priv, power_domain, wakeref); 1281 } else { 1282 cur_state = false; 1283 } 1284 1285 I915_STATE_WARN(cur_state != state, 1286 "transcoder %s assertion failure (expected %s, current %s)\n", 1287 transcoder_name(cpu_transcoder), 1288 onoff(state), onoff(cur_state)); 1289 } 1290 1291 static void assert_plane(struct intel_plane *plane, bool state) 1292 { 1293 enum pipe pipe; 1294 bool cur_state; 1295 1296 cur_state = plane->get_hw_state(plane, &pipe); 1297 1298 I915_STATE_WARN(cur_state != state, 1299 "%s assertion failure (expected %s, current %s)\n", 1300 plane->base.name, onoff(state), onoff(cur_state)); 1301 } 1302 1303 #define assert_plane_enabled(p) assert_plane(p, true) 1304 #define assert_plane_disabled(p) assert_plane(p, false) 1305 1306 static void assert_planes_disabled(struct intel_crtc *crtc) 1307 { 1308 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1309 struct intel_plane *plane; 1310 1311 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 1312 assert_plane_disabled(plane); 1313 } 1314 1315 static void assert_vblank_disabled(struct drm_crtc *crtc) 1316 { 1317 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1318 drm_crtc_vblank_put(crtc); 1319 } 1320 1321 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1322 enum pipe pipe) 1323 { 1324 u32 val; 1325 bool enabled; 1326 1327 val = I915_READ(PCH_TRANSCONF(pipe)); 1328 enabled = !!(val & TRANS_ENABLE); 1329 I915_STATE_WARN(enabled, 1330 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1331 pipe_name(pipe)); 1332 } 1333 1334 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1335 enum pipe pipe, enum port port, 1336 i915_reg_t dp_reg) 1337 { 1338 enum pipe port_pipe; 1339 bool state; 1340 1341 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); 1342 1343 I915_STATE_WARN(state && port_pipe == pipe, 1344 "PCH DP %c enabled on transcoder %c, should be disabled\n", 1345 port_name(port), pipe_name(pipe)); 1346 1347 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 1348 "IBX PCH DP %c still using transcoder B\n", 1349 port_name(port)); 1350 } 1351 1352 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1353 enum pipe pipe, enum port port, 1354 i915_reg_t hdmi_reg) 1355 { 1356 enum pipe port_pipe; 1357 bool state; 1358 1359 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); 1360 1361 I915_STATE_WARN(state && port_pipe == pipe, 1362 "PCH HDMI %c enabled on transcoder %c, should be disabled\n", 1363 port_name(port), pipe_name(pipe)); 1364 1365 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 1366 "IBX PCH HDMI %c still using transcoder B\n", 1367 port_name(port)); 1368 } 1369 1370 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1371 enum pipe pipe) 1372 { 1373 enum pipe port_pipe; 1374 1375 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); 1376 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); 1377 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); 1378 1379 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && 1380 port_pipe == pipe, 1381 "PCH VGA enabled on transcoder %c, should be disabled\n", 1382 pipe_name(pipe)); 1383 1384 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && 1385 port_pipe == pipe, 1386 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1387 pipe_name(pipe)); 1388 1389 /* PCH SDVOB multiplex with HDMIB */ 1390 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); 1391 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); 1392 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); 1393 } 1394 1395 static void _vlv_enable_pll(struct intel_crtc *crtc, 1396 const struct intel_crtc_state *pipe_config) 1397 { 1398 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1399 enum pipe pipe = crtc->pipe; 1400 1401 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1402 POSTING_READ(DPLL(pipe)); 1403 udelay(150); 1404 1405 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) 1406 DRM_ERROR("DPLL %d failed to lock\n", pipe); 1407 } 1408 1409 static void vlv_enable_pll(struct intel_crtc *crtc, 1410 const struct intel_crtc_state *pipe_config) 1411 { 1412 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1413 enum pipe pipe = crtc->pipe; 1414 1415 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 1416 1417 /* PLL is protected by panel, make sure we can write it */ 1418 assert_panel_unlocked(dev_priv, pipe); 1419 1420 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1421 _vlv_enable_pll(crtc, pipe_config); 1422 1423 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1424 POSTING_READ(DPLL_MD(pipe)); 1425 } 1426 1427 1428 static void _chv_enable_pll(struct intel_crtc *crtc, 1429 const struct intel_crtc_state *pipe_config) 1430 { 1431 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1432 enum pipe pipe = crtc->pipe; 1433 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1434 u32 tmp; 1435 1436 vlv_dpio_get(dev_priv); 1437 1438 /* Enable back the 10bit clock to display controller */ 1439 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1440 tmp |= DPIO_DCLKP_EN; 1441 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1442 1443 vlv_dpio_put(dev_priv); 1444 1445 /* 1446 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1447 */ 1448 udelay(1); 1449 1450 /* Enable PLL */ 1451 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1452 1453 /* Check PLL is locked */ 1454 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) 1455 DRM_ERROR("PLL %d failed to lock\n", pipe); 1456 } 1457 1458 static void chv_enable_pll(struct intel_crtc *crtc, 1459 const struct intel_crtc_state *pipe_config) 1460 { 1461 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1462 enum pipe pipe = crtc->pipe; 1463 1464 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 1465 1466 /* PLL is protected by panel, make sure we can write it */ 1467 assert_panel_unlocked(dev_priv, pipe); 1468 1469 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1470 _chv_enable_pll(crtc, pipe_config); 1471 1472 if (pipe != PIPE_A) { 1473 /* 1474 * WaPixelRepeatModeFixForC0:chv 1475 * 1476 * DPLLCMD is AWOL. Use chicken bits to propagate 1477 * the value from DPLLBMD to either pipe B or C. 1478 */ 1479 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); 1480 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md); 1481 I915_WRITE(CBR4_VLV, 0); 1482 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; 1483 1484 /* 1485 * DPLLB VGA mode also seems to cause problems. 1486 * We should always have it disabled. 1487 */ 1488 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0); 1489 } else { 1490 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1491 POSTING_READ(DPLL_MD(pipe)); 1492 } 1493 } 1494 1495 static bool i9xx_has_pps(struct drm_i915_private *dev_priv) 1496 { 1497 if (IS_I830(dev_priv)) 1498 return false; 1499 1500 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 1501 } 1502 1503 static void i9xx_enable_pll(struct intel_crtc *crtc, 1504 const struct intel_crtc_state *crtc_state) 1505 { 1506 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1507 i915_reg_t reg = DPLL(crtc->pipe); 1508 u32 dpll = crtc_state->dpll_hw_state.dpll; 1509 int i; 1510 1511 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); 1512 1513 /* PLL is protected by panel, make sure we can write it */ 1514 if (i9xx_has_pps(dev_priv)) 1515 assert_panel_unlocked(dev_priv, crtc->pipe); 1516 1517 /* 1518 * Apparently we need to have VGA mode enabled prior to changing 1519 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1520 * dividers, even though the register value does change. 1521 */ 1522 I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS); 1523 I915_WRITE(reg, dpll); 1524 1525 /* Wait for the clocks to stabilize. */ 1526 POSTING_READ(reg); 1527 udelay(150); 1528 1529 if (INTEL_GEN(dev_priv) >= 4) { 1530 I915_WRITE(DPLL_MD(crtc->pipe), 1531 crtc_state->dpll_hw_state.dpll_md); 1532 } else { 1533 /* The pixel multiplier can only be updated once the 1534 * DPLL is enabled and the clocks are stable. 1535 * 1536 * So write it again. 1537 */ 1538 I915_WRITE(reg, dpll); 1539 } 1540 1541 /* We do this three times for luck */ 1542 for (i = 0; i < 3; i++) { 1543 I915_WRITE(reg, dpll); 1544 POSTING_READ(reg); 1545 udelay(150); /* wait for warmup */ 1546 } 1547 } 1548 1549 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) 1550 { 1551 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1552 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1553 enum pipe pipe = crtc->pipe; 1554 1555 /* Don't disable pipe or pipe PLLs if needed */ 1556 if (IS_I830(dev_priv)) 1557 return; 1558 1559 /* Make sure the pipe isn't still relying on us */ 1560 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); 1561 1562 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 1563 POSTING_READ(DPLL(pipe)); 1564 } 1565 1566 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1567 { 1568 u32 val; 1569 1570 /* Make sure the pipe isn't still relying on us */ 1571 assert_pipe_disabled(dev_priv, (enum transcoder)pipe); 1572 1573 val = DPLL_INTEGRATED_REF_CLK_VLV | 1574 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1575 if (pipe != PIPE_A) 1576 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1577 1578 I915_WRITE(DPLL(pipe), val); 1579 POSTING_READ(DPLL(pipe)); 1580 } 1581 1582 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1583 { 1584 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1585 u32 val; 1586 1587 /* Make sure the pipe isn't still relying on us */ 1588 assert_pipe_disabled(dev_priv, (enum transcoder)pipe); 1589 1590 val = DPLL_SSC_REF_CLK_CHV | 1591 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1592 if (pipe != PIPE_A) 1593 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1594 1595 I915_WRITE(DPLL(pipe), val); 1596 POSTING_READ(DPLL(pipe)); 1597 1598 vlv_dpio_get(dev_priv); 1599 1600 /* Disable 10bit clock to display controller */ 1601 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1602 val &= ~DPIO_DCLKP_EN; 1603 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1604 1605 vlv_dpio_put(dev_priv); 1606 } 1607 1608 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1609 struct intel_digital_port *dport, 1610 unsigned int expected_mask) 1611 { 1612 u32 port_mask; 1613 i915_reg_t dpll_reg; 1614 1615 switch (dport->base.port) { 1616 case PORT_B: 1617 port_mask = DPLL_PORTB_READY_MASK; 1618 dpll_reg = DPLL(0); 1619 break; 1620 case PORT_C: 1621 port_mask = DPLL_PORTC_READY_MASK; 1622 dpll_reg = DPLL(0); 1623 expected_mask <<= 4; 1624 break; 1625 case PORT_D: 1626 port_mask = DPLL_PORTD_READY_MASK; 1627 dpll_reg = DPIO_PHY_STATUS; 1628 break; 1629 default: 1630 BUG(); 1631 } 1632 1633 if (intel_de_wait_for_register(dev_priv, dpll_reg, 1634 port_mask, expected_mask, 1000)) 1635 WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", 1636 dport->base.base.base.id, dport->base.base.name, 1637 I915_READ(dpll_reg) & port_mask, expected_mask); 1638 } 1639 1640 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) 1641 { 1642 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1643 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1644 enum pipe pipe = crtc->pipe; 1645 i915_reg_t reg; 1646 u32 val, pipeconf_val; 1647 1648 /* Make sure PCH DPLL is enabled */ 1649 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); 1650 1651 /* FDI must be feeding us bits for PCH ports */ 1652 assert_fdi_tx_enabled(dev_priv, pipe); 1653 assert_fdi_rx_enabled(dev_priv, pipe); 1654 1655 if (HAS_PCH_CPT(dev_priv)) { 1656 reg = TRANS_CHICKEN2(pipe); 1657 val = I915_READ(reg); 1658 /* 1659 * Workaround: Set the timing override bit 1660 * before enabling the pch transcoder. 1661 */ 1662 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1663 /* Configure frame start delay to match the CPU */ 1664 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 1665 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); 1666 I915_WRITE(reg, val); 1667 } 1668 1669 reg = PCH_TRANSCONF(pipe); 1670 val = I915_READ(reg); 1671 pipeconf_val = I915_READ(PIPECONF(pipe)); 1672 1673 if (HAS_PCH_IBX(dev_priv)) { 1674 /* Configure frame start delay to match the CPU */ 1675 val &= ~TRANS_FRAME_START_DELAY_MASK; 1676 val |= TRANS_FRAME_START_DELAY(0); 1677 1678 /* 1679 * Make the BPC in transcoder be consistent with 1680 * that in pipeconf reg. For HDMI we must use 8bpc 1681 * here for both 8bpc and 12bpc. 1682 */ 1683 val &= ~PIPECONF_BPC_MASK; 1684 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 1685 val |= PIPECONF_8BPC; 1686 else 1687 val |= pipeconf_val & PIPECONF_BPC_MASK; 1688 } 1689 1690 val &= ~TRANS_INTERLACE_MASK; 1691 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { 1692 if (HAS_PCH_IBX(dev_priv) && 1693 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 1694 val |= TRANS_LEGACY_INTERLACED_ILK; 1695 else 1696 val |= TRANS_INTERLACED; 1697 } else { 1698 val |= TRANS_PROGRESSIVE; 1699 } 1700 1701 I915_WRITE(reg, val | TRANS_ENABLE); 1702 if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100)) 1703 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 1704 } 1705 1706 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1707 enum transcoder cpu_transcoder) 1708 { 1709 u32 val, pipeconf_val; 1710 1711 /* FDI must be feeding us bits for PCH ports */ 1712 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); 1713 assert_fdi_rx_enabled(dev_priv, PIPE_A); 1714 1715 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1716 /* Workaround: set timing override bit. */ 1717 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1718 /* Configure frame start delay to match the CPU */ 1719 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 1720 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); 1721 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1722 1723 val = TRANS_ENABLE; 1724 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 1725 1726 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 1727 PIPECONF_INTERLACED_ILK) 1728 val |= TRANS_INTERLACED; 1729 else 1730 val |= TRANS_PROGRESSIVE; 1731 1732 I915_WRITE(LPT_TRANSCONF, val); 1733 if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, 1734 TRANS_STATE_ENABLE, 100)) 1735 DRM_ERROR("Failed to enable PCH transcoder\n"); 1736 } 1737 1738 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1739 enum pipe pipe) 1740 { 1741 i915_reg_t reg; 1742 u32 val; 1743 1744 /* FDI relies on the transcoder */ 1745 assert_fdi_tx_disabled(dev_priv, pipe); 1746 assert_fdi_rx_disabled(dev_priv, pipe); 1747 1748 /* Ports must be off as well */ 1749 assert_pch_ports_disabled(dev_priv, pipe); 1750 1751 reg = PCH_TRANSCONF(pipe); 1752 val = I915_READ(reg); 1753 val &= ~TRANS_ENABLE; 1754 I915_WRITE(reg, val); 1755 /* wait for PCH transcoder off, transcoder state */ 1756 if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) 1757 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 1758 1759 if (HAS_PCH_CPT(dev_priv)) { 1760 /* Workaround: Clear the timing override chicken bit again. */ 1761 reg = TRANS_CHICKEN2(pipe); 1762 val = I915_READ(reg); 1763 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1764 I915_WRITE(reg, val); 1765 } 1766 } 1767 1768 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 1769 { 1770 u32 val; 1771 1772 val = I915_READ(LPT_TRANSCONF); 1773 val &= ~TRANS_ENABLE; 1774 I915_WRITE(LPT_TRANSCONF, val); 1775 /* wait for PCH transcoder off, transcoder state */ 1776 if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, 1777 TRANS_STATE_ENABLE, 50)) 1778 DRM_ERROR("Failed to disable PCH transcoder\n"); 1779 1780 /* Workaround: clear timing override bit. */ 1781 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1782 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1783 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1784 } 1785 1786 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) 1787 { 1788 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1789 1790 if (HAS_PCH_LPT(dev_priv)) 1791 return PIPE_A; 1792 else 1793 return crtc->pipe; 1794 } 1795 1796 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state) 1797 { 1798 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1799 1800 /* 1801 * On i965gm the hardware frame counter reads 1802 * zero when the TV encoder is enabled :( 1803 */ 1804 if (IS_I965GM(dev_priv) && 1805 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT))) 1806 return 0; 1807 1808 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1809 return 0xffffffff; /* full 32 bit counter */ 1810 else if (INTEL_GEN(dev_priv) >= 3) 1811 return 0xffffff; /* only 24 bits of frame count */ 1812 else 1813 return 0; /* Gen2 doesn't have a hardware frame counter */ 1814 } 1815 1816 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) 1817 { 1818 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1819 1820 assert_vblank_disabled(&crtc->base); 1821 drm_crtc_set_max_vblank_count(&crtc->base, 1822 intel_crtc_max_vblank_count(crtc_state)); 1823 drm_crtc_vblank_on(&crtc->base); 1824 } 1825 1826 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state) 1827 { 1828 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1829 1830 drm_crtc_vblank_off(&crtc->base); 1831 assert_vblank_disabled(&crtc->base); 1832 } 1833 1834 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) 1835 { 1836 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 1837 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1838 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 1839 enum pipe pipe = crtc->pipe; 1840 i915_reg_t reg; 1841 u32 val; 1842 1843 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); 1844 1845 assert_planes_disabled(crtc); 1846 1847 /* 1848 * A pipe without a PLL won't actually be able to drive bits from 1849 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1850 * need the check. 1851 */ 1852 if (HAS_GMCH(dev_priv)) { 1853 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 1854 assert_dsi_pll_enabled(dev_priv); 1855 else 1856 assert_pll_enabled(dev_priv, pipe); 1857 } else { 1858 if (new_crtc_state->has_pch_encoder) { 1859 /* if driving the PCH, we need FDI enabled */ 1860 assert_fdi_rx_pll_enabled(dev_priv, 1861 intel_crtc_pch_transcoder(crtc)); 1862 assert_fdi_tx_pll_enabled(dev_priv, 1863 (enum pipe) cpu_transcoder); 1864 } 1865 /* FIXME: assert CPU port conditions for SNB+ */ 1866 } 1867 1868 trace_intel_pipe_enable(crtc); 1869 1870 reg = PIPECONF(cpu_transcoder); 1871 val = I915_READ(reg); 1872 if (val & PIPECONF_ENABLE) { 1873 /* we keep both pipes enabled on 830 */ 1874 WARN_ON(!IS_I830(dev_priv)); 1875 return; 1876 } 1877 1878 I915_WRITE(reg, val | PIPECONF_ENABLE); 1879 POSTING_READ(reg); 1880 1881 /* 1882 * Until the pipe starts PIPEDSL reads will return a stale value, 1883 * which causes an apparent vblank timestamp jump when PIPEDSL 1884 * resets to its proper value. That also messes up the frame count 1885 * when it's derived from the timestamps. So let's wait for the 1886 * pipe to start properly before we call drm_crtc_vblank_on() 1887 */ 1888 if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 1889 intel_wait_for_pipe_scanline_moving(crtc); 1890 } 1891 1892 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) 1893 { 1894 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1895 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1896 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 1897 enum pipe pipe = crtc->pipe; 1898 i915_reg_t reg; 1899 u32 val; 1900 1901 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); 1902 1903 /* 1904 * Make sure planes won't keep trying to pump pixels to us, 1905 * or we might hang the display. 1906 */ 1907 assert_planes_disabled(crtc); 1908 1909 trace_intel_pipe_disable(crtc); 1910 1911 reg = PIPECONF(cpu_transcoder); 1912 val = I915_READ(reg); 1913 if ((val & PIPECONF_ENABLE) == 0) 1914 return; 1915 1916 /* 1917 * Double wide has implications for planes 1918 * so best keep it disabled when not needed. 1919 */ 1920 if (old_crtc_state->double_wide) 1921 val &= ~PIPECONF_DOUBLE_WIDE; 1922 1923 /* Don't disable pipe or pipe PLLs if needed */ 1924 if (!IS_I830(dev_priv)) 1925 val &= ~PIPECONF_ENABLE; 1926 1927 I915_WRITE(reg, val); 1928 if ((val & PIPECONF_ENABLE) == 0) 1929 intel_wait_for_pipe_off(old_crtc_state); 1930 } 1931 1932 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) 1933 { 1934 return IS_GEN(dev_priv, 2) ? 2048 : 4096; 1935 } 1936 1937 static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane) 1938 { 1939 if (!is_ccs_modifier(fb->modifier)) 1940 return false; 1941 1942 return plane >= fb->format->num_planes / 2; 1943 } 1944 1945 static bool is_gen12_ccs_modifier(u64 modifier) 1946 { 1947 return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS; 1948 } 1949 1950 static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane) 1951 { 1952 return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane); 1953 } 1954 1955 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane) 1956 { 1957 if (is_ccs_modifier(fb->modifier)) 1958 return is_ccs_plane(fb, plane); 1959 1960 return plane == 1; 1961 } 1962 1963 static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane) 1964 { 1965 WARN_ON(!is_ccs_modifier(fb->modifier) || 1966 (main_plane && main_plane >= fb->format->num_planes / 2)); 1967 1968 return fb->format->num_planes / 2 + main_plane; 1969 } 1970 1971 static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) 1972 { 1973 WARN_ON(!is_ccs_modifier(fb->modifier) || 1974 ccs_plane < fb->format->num_planes / 2); 1975 1976 return ccs_plane - fb->format->num_planes / 2; 1977 } 1978 1979 /* Return either the main plane's CCS or - if not a CCS FB - UV plane */ 1980 static int 1981 intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane) 1982 { 1983 if (is_ccs_modifier(fb->modifier)) 1984 return main_to_ccs_plane(fb, main_plane); 1985 1986 return 1; 1987 } 1988 1989 bool 1990 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, 1991 uint64_t modifier) 1992 { 1993 return info->is_yuv && 1994 info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2); 1995 } 1996 1997 static unsigned int 1998 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) 1999 { 2000 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2001 unsigned int cpp = fb->format->cpp[color_plane]; 2002 2003 switch (fb->modifier) { 2004 case DRM_FORMAT_MOD_LINEAR: 2005 return intel_tile_size(dev_priv); 2006 case I915_FORMAT_MOD_X_TILED: 2007 if (IS_GEN(dev_priv, 2)) 2008 return 128; 2009 else 2010 return 512; 2011 case I915_FORMAT_MOD_Y_TILED_CCS: 2012 if (is_ccs_plane(fb, color_plane)) 2013 return 128; 2014 /* fall through */ 2015 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 2016 if (is_ccs_plane(fb, color_plane)) 2017 return 64; 2018 /* fall through */ 2019 case I915_FORMAT_MOD_Y_TILED: 2020 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv)) 2021 return 128; 2022 else 2023 return 512; 2024 case I915_FORMAT_MOD_Yf_TILED_CCS: 2025 if (is_ccs_plane(fb, color_plane)) 2026 return 128; 2027 /* fall through */ 2028 case I915_FORMAT_MOD_Yf_TILED: 2029 switch (cpp) { 2030 case 1: 2031 return 64; 2032 case 2: 2033 case 4: 2034 return 128; 2035 case 8: 2036 case 16: 2037 return 256; 2038 default: 2039 MISSING_CASE(cpp); 2040 return cpp; 2041 } 2042 break; 2043 default: 2044 MISSING_CASE(fb->modifier); 2045 return cpp; 2046 } 2047 } 2048 2049 static unsigned int 2050 intel_tile_height(const struct drm_framebuffer *fb, int color_plane) 2051 { 2052 if (is_gen12_ccs_plane(fb, color_plane)) 2053 return 1; 2054 2055 return intel_tile_size(to_i915(fb->dev)) / 2056 intel_tile_width_bytes(fb, color_plane); 2057 } 2058 2059 /* Return the tile dimensions in pixel units */ 2060 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane, 2061 unsigned int *tile_width, 2062 unsigned int *tile_height) 2063 { 2064 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane); 2065 unsigned int cpp = fb->format->cpp[color_plane]; 2066 2067 *tile_width = tile_width_bytes / cpp; 2068 *tile_height = intel_tile_height(fb, color_plane); 2069 } 2070 2071 unsigned int 2072 intel_fb_align_height(const struct drm_framebuffer *fb, 2073 int color_plane, unsigned int height) 2074 { 2075 unsigned int tile_height = intel_tile_height(fb, color_plane); 2076 2077 return ALIGN(height, tile_height); 2078 } 2079 2080 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 2081 { 2082 unsigned int size = 0; 2083 int i; 2084 2085 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 2086 size += rot_info->plane[i].width * rot_info->plane[i].height; 2087 2088 return size; 2089 } 2090 2091 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info) 2092 { 2093 unsigned int size = 0; 2094 int i; 2095 2096 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) 2097 size += rem_info->plane[i].width * rem_info->plane[i].height; 2098 2099 return size; 2100 } 2101 2102 static void 2103 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, 2104 const struct drm_framebuffer *fb, 2105 unsigned int rotation) 2106 { 2107 view->type = I915_GGTT_VIEW_NORMAL; 2108 if (drm_rotation_90_or_270(rotation)) { 2109 view->type = I915_GGTT_VIEW_ROTATED; 2110 view->rotated = to_intel_framebuffer(fb)->rot_info; 2111 } 2112 } 2113 2114 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv) 2115 { 2116 if (IS_I830(dev_priv)) 2117 return 16 * 1024; 2118 else if (IS_I85X(dev_priv)) 2119 return 256; 2120 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) 2121 return 32; 2122 else 2123 return 4 * 1024; 2124 } 2125 2126 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) 2127 { 2128 if (INTEL_GEN(dev_priv) >= 9) 2129 return 256 * 1024; 2130 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || 2131 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2132 return 128 * 1024; 2133 else if (INTEL_GEN(dev_priv) >= 4) 2134 return 4 * 1024; 2135 else 2136 return 0; 2137 } 2138 2139 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, 2140 int color_plane) 2141 { 2142 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2143 2144 /* AUX_DIST needs only 4K alignment */ 2145 if (is_aux_plane(fb, color_plane)) 2146 return 4096; 2147 2148 switch (fb->modifier) { 2149 case DRM_FORMAT_MOD_LINEAR: 2150 return intel_linear_alignment(dev_priv); 2151 case I915_FORMAT_MOD_X_TILED: 2152 if (INTEL_GEN(dev_priv) >= 9) 2153 return 256 * 1024; 2154 return 0; 2155 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 2156 return 16 * 1024; 2157 case I915_FORMAT_MOD_Y_TILED_CCS: 2158 case I915_FORMAT_MOD_Yf_TILED_CCS: 2159 case I915_FORMAT_MOD_Y_TILED: 2160 case I915_FORMAT_MOD_Yf_TILED: 2161 return 1 * 1024 * 1024; 2162 default: 2163 MISSING_CASE(fb->modifier); 2164 return 0; 2165 } 2166 } 2167 2168 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) 2169 { 2170 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 2171 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 2172 2173 return INTEL_GEN(dev_priv) < 4 || 2174 (plane->has_fbc && 2175 plane_state->view.type == I915_GGTT_VIEW_NORMAL); 2176 } 2177 2178 struct i915_vma * 2179 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 2180 const struct i915_ggtt_view *view, 2181 bool uses_fence, 2182 unsigned long *out_flags) 2183 { 2184 struct drm_device *dev = fb->dev; 2185 struct drm_i915_private *dev_priv = to_i915(dev); 2186 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2187 intel_wakeref_t wakeref; 2188 struct i915_vma *vma; 2189 unsigned int pinctl; 2190 u32 alignment; 2191 2192 if (WARN_ON(!i915_gem_object_is_framebuffer(obj))) 2193 return ERR_PTR(-EINVAL); 2194 2195 alignment = intel_surf_alignment(fb, 0); 2196 2197 /* Note that the w/a also requires 64 PTE of padding following the 2198 * bo. We currently fill all unused PTE with the shadow page and so 2199 * we should always have valid PTE following the scanout preventing 2200 * the VT-d warning. 2201 */ 2202 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) 2203 alignment = 256 * 1024; 2204 2205 /* 2206 * Global gtt pte registers are special registers which actually forward 2207 * writes to a chunk of system memory. Which means that there is no risk 2208 * that the register values disappear as soon as we call 2209 * intel_runtime_pm_put(), so it is correct to wrap only the 2210 * pin/unpin/fence and not more. 2211 */ 2212 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2213 2214 atomic_inc(&dev_priv->gpu_error.pending_fb_pin); 2215 2216 /* 2217 * Valleyview is definitely limited to scanning out the first 2218 * 512MiB. Lets presume this behaviour was inherited from the 2219 * g4x display engine and that all earlier gen are similarly 2220 * limited. Testing suggests that it is a little more 2221 * complicated than this. For example, Cherryview appears quite 2222 * happy to scanout from anywhere within its global aperture. 2223 */ 2224 pinctl = 0; 2225 if (HAS_GMCH(dev_priv)) 2226 pinctl |= PIN_MAPPABLE; 2227 2228 vma = i915_gem_object_pin_to_display_plane(obj, 2229 alignment, view, pinctl); 2230 if (IS_ERR(vma)) 2231 goto err; 2232 2233 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) { 2234 int ret; 2235 2236 /* 2237 * Install a fence for tiled scan-out. Pre-i965 always needs a 2238 * fence, whereas 965+ only requires a fence if using 2239 * framebuffer compression. For simplicity, we always, when 2240 * possible, install a fence as the cost is not that onerous. 2241 * 2242 * If we fail to fence the tiled scanout, then either the 2243 * modeset will reject the change (which is highly unlikely as 2244 * the affected systems, all but one, do not have unmappable 2245 * space) or we will not be able to enable full powersaving 2246 * techniques (also likely not to apply due to various limits 2247 * FBC and the like impose on the size of the buffer, which 2248 * presumably we violated anyway with this unmappable buffer). 2249 * Anyway, it is presumably better to stumble onwards with 2250 * something and try to run the system in a "less than optimal" 2251 * mode that matches the user configuration. 2252 */ 2253 ret = i915_vma_pin_fence(vma); 2254 if (ret != 0 && INTEL_GEN(dev_priv) < 4) { 2255 i915_gem_object_unpin_from_display_plane(vma); 2256 vma = ERR_PTR(ret); 2257 goto err; 2258 } 2259 2260 if (ret == 0 && vma->fence) 2261 *out_flags |= PLANE_HAS_FENCE; 2262 } 2263 2264 i915_vma_get(vma); 2265 err: 2266 atomic_dec(&dev_priv->gpu_error.pending_fb_pin); 2267 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2268 return vma; 2269 } 2270 2271 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) 2272 { 2273 i915_gem_object_lock(vma->obj); 2274 if (flags & PLANE_HAS_FENCE) 2275 i915_vma_unpin_fence(vma); 2276 i915_gem_object_unpin_from_display_plane(vma); 2277 i915_gem_object_unlock(vma->obj); 2278 2279 i915_vma_put(vma); 2280 } 2281 2282 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane, 2283 unsigned int rotation) 2284 { 2285 if (drm_rotation_90_or_270(rotation)) 2286 return to_intel_framebuffer(fb)->rotated[color_plane].pitch; 2287 else 2288 return fb->pitches[color_plane]; 2289 } 2290 2291 /* 2292 * Convert the x/y offsets into a linear offset. 2293 * Only valid with 0/180 degree rotation, which is fine since linear 2294 * offset is only used with linear buffers on pre-hsw and tiled buffers 2295 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 2296 */ 2297 u32 intel_fb_xy_to_linear(int x, int y, 2298 const struct intel_plane_state *state, 2299 int color_plane) 2300 { 2301 const struct drm_framebuffer *fb = state->hw.fb; 2302 unsigned int cpp = fb->format->cpp[color_plane]; 2303 unsigned int pitch = state->color_plane[color_plane].stride; 2304 2305 return y * pitch + x * cpp; 2306 } 2307 2308 /* 2309 * Add the x/y offsets derived from fb->offsets[] to the user 2310 * specified plane src x/y offsets. The resulting x/y offsets 2311 * specify the start of scanout from the beginning of the gtt mapping. 2312 */ 2313 void intel_add_fb_offsets(int *x, int *y, 2314 const struct intel_plane_state *state, 2315 int color_plane) 2316 2317 { 2318 *x += state->color_plane[color_plane].x; 2319 *y += state->color_plane[color_plane].y; 2320 } 2321 2322 static u32 intel_adjust_tile_offset(int *x, int *y, 2323 unsigned int tile_width, 2324 unsigned int tile_height, 2325 unsigned int tile_size, 2326 unsigned int pitch_tiles, 2327 u32 old_offset, 2328 u32 new_offset) 2329 { 2330 unsigned int pitch_pixels = pitch_tiles * tile_width; 2331 unsigned int tiles; 2332 2333 WARN_ON(old_offset & (tile_size - 1)); 2334 WARN_ON(new_offset & (tile_size - 1)); 2335 WARN_ON(new_offset > old_offset); 2336 2337 tiles = (old_offset - new_offset) / tile_size; 2338 2339 *y += tiles / pitch_tiles * tile_height; 2340 *x += tiles % pitch_tiles * tile_width; 2341 2342 /* minimize x in case it got needlessly big */ 2343 *y += *x / pitch_pixels * tile_height; 2344 *x %= pitch_pixels; 2345 2346 return new_offset; 2347 } 2348 2349 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane) 2350 { 2351 return fb->modifier == DRM_FORMAT_MOD_LINEAR || 2352 is_gen12_ccs_plane(fb, color_plane); 2353 } 2354 2355 static u32 intel_adjust_aligned_offset(int *x, int *y, 2356 const struct drm_framebuffer *fb, 2357 int color_plane, 2358 unsigned int rotation, 2359 unsigned int pitch, 2360 u32 old_offset, u32 new_offset) 2361 { 2362 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2363 unsigned int cpp = fb->format->cpp[color_plane]; 2364 2365 WARN_ON(new_offset > old_offset); 2366 2367 if (!is_surface_linear(fb, color_plane)) { 2368 unsigned int tile_size, tile_width, tile_height; 2369 unsigned int pitch_tiles; 2370 2371 tile_size = intel_tile_size(dev_priv); 2372 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2373 2374 if (drm_rotation_90_or_270(rotation)) { 2375 pitch_tiles = pitch / tile_height; 2376 swap(tile_width, tile_height); 2377 } else { 2378 pitch_tiles = pitch / (tile_width * cpp); 2379 } 2380 2381 intel_adjust_tile_offset(x, y, tile_width, tile_height, 2382 tile_size, pitch_tiles, 2383 old_offset, new_offset); 2384 } else { 2385 old_offset += *y * pitch + *x * cpp; 2386 2387 *y = (old_offset - new_offset) / pitch; 2388 *x = ((old_offset - new_offset) - *y * pitch) / cpp; 2389 } 2390 2391 return new_offset; 2392 } 2393 2394 /* 2395 * Adjust the tile offset by moving the difference into 2396 * the x/y offsets. 2397 */ 2398 static u32 intel_plane_adjust_aligned_offset(int *x, int *y, 2399 const struct intel_plane_state *state, 2400 int color_plane, 2401 u32 old_offset, u32 new_offset) 2402 { 2403 return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane, 2404 state->hw.rotation, 2405 state->color_plane[color_plane].stride, 2406 old_offset, new_offset); 2407 } 2408 2409 /* 2410 * Computes the aligned offset to the base tile and adjusts 2411 * x, y. bytes per pixel is assumed to be a power-of-two. 2412 * 2413 * In the 90/270 rotated case, x and y are assumed 2414 * to be already rotated to match the rotated GTT view, and 2415 * pitch is the tile_height aligned framebuffer height. 2416 * 2417 * This function is used when computing the derived information 2418 * under intel_framebuffer, so using any of that information 2419 * here is not allowed. Anything under drm_framebuffer can be 2420 * used. This is why the user has to pass in the pitch since it 2421 * is specified in the rotated orientation. 2422 */ 2423 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv, 2424 int *x, int *y, 2425 const struct drm_framebuffer *fb, 2426 int color_plane, 2427 unsigned int pitch, 2428 unsigned int rotation, 2429 u32 alignment) 2430 { 2431 unsigned int cpp = fb->format->cpp[color_plane]; 2432 u32 offset, offset_aligned; 2433 2434 if (alignment) 2435 alignment--; 2436 2437 if (!is_surface_linear(fb, color_plane)) { 2438 unsigned int tile_size, tile_width, tile_height; 2439 unsigned int tile_rows, tiles, pitch_tiles; 2440 2441 tile_size = intel_tile_size(dev_priv); 2442 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2443 2444 if (drm_rotation_90_or_270(rotation)) { 2445 pitch_tiles = pitch / tile_height; 2446 swap(tile_width, tile_height); 2447 } else { 2448 pitch_tiles = pitch / (tile_width * cpp); 2449 } 2450 2451 tile_rows = *y / tile_height; 2452 *y %= tile_height; 2453 2454 tiles = *x / tile_width; 2455 *x %= tile_width; 2456 2457 offset = (tile_rows * pitch_tiles + tiles) * tile_size; 2458 offset_aligned = offset & ~alignment; 2459 2460 intel_adjust_tile_offset(x, y, tile_width, tile_height, 2461 tile_size, pitch_tiles, 2462 offset, offset_aligned); 2463 } else { 2464 offset = *y * pitch + *x * cpp; 2465 offset_aligned = offset & ~alignment; 2466 2467 *y = (offset & alignment) / pitch; 2468 *x = ((offset & alignment) - *y * pitch) / cpp; 2469 } 2470 2471 return offset_aligned; 2472 } 2473 2474 static u32 intel_plane_compute_aligned_offset(int *x, int *y, 2475 const struct intel_plane_state *state, 2476 int color_plane) 2477 { 2478 struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane); 2479 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 2480 const struct drm_framebuffer *fb = state->hw.fb; 2481 unsigned int rotation = state->hw.rotation; 2482 int pitch = state->color_plane[color_plane].stride; 2483 u32 alignment; 2484 2485 if (intel_plane->id == PLANE_CURSOR) 2486 alignment = intel_cursor_alignment(dev_priv); 2487 else 2488 alignment = intel_surf_alignment(fb, color_plane); 2489 2490 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane, 2491 pitch, rotation, alignment); 2492 } 2493 2494 /* Convert the fb->offset[] into x/y offsets */ 2495 static int intel_fb_offset_to_xy(int *x, int *y, 2496 const struct drm_framebuffer *fb, 2497 int color_plane) 2498 { 2499 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2500 unsigned int height; 2501 2502 if (fb->modifier != DRM_FORMAT_MOD_LINEAR && 2503 fb->offsets[color_plane] % intel_tile_size(dev_priv)) { 2504 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n", 2505 fb->offsets[color_plane], color_plane); 2506 return -EINVAL; 2507 } 2508 2509 height = drm_framebuffer_plane_height(fb->height, fb, color_plane); 2510 height = ALIGN(height, intel_tile_height(fb, color_plane)); 2511 2512 /* Catch potential overflows early */ 2513 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]), 2514 fb->offsets[color_plane])) { 2515 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n", 2516 fb->offsets[color_plane], fb->pitches[color_plane], 2517 color_plane); 2518 return -ERANGE; 2519 } 2520 2521 *x = 0; 2522 *y = 0; 2523 2524 intel_adjust_aligned_offset(x, y, 2525 fb, color_plane, DRM_MODE_ROTATE_0, 2526 fb->pitches[color_plane], 2527 fb->offsets[color_plane], 0); 2528 2529 return 0; 2530 } 2531 2532 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) 2533 { 2534 switch (fb_modifier) { 2535 case I915_FORMAT_MOD_X_TILED: 2536 return I915_TILING_X; 2537 case I915_FORMAT_MOD_Y_TILED: 2538 case I915_FORMAT_MOD_Y_TILED_CCS: 2539 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 2540 return I915_TILING_Y; 2541 default: 2542 return I915_TILING_NONE; 2543 } 2544 } 2545 2546 /* 2547 * From the Sky Lake PRM: 2548 * "The Color Control Surface (CCS) contains the compression status of 2549 * the cache-line pairs. The compression state of the cache-line pair 2550 * is specified by 2 bits in the CCS. Each CCS cache-line represents 2551 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled 2552 * cache-line-pairs. CCS is always Y tiled." 2553 * 2554 * Since cache line pairs refers to horizontally adjacent cache lines, 2555 * each cache line in the CCS corresponds to an area of 32x16 cache 2556 * lines on the main surface. Since each pixel is 4 bytes, this gives 2557 * us a ratio of one byte in the CCS for each 8x16 pixels in the 2558 * main surface. 2559 */ 2560 static const struct drm_format_info skl_ccs_formats[] = { 2561 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 2562 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2563 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 2564 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2565 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 2566 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 2567 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 2568 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 2569 }; 2570 2571 /* 2572 * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the 2573 * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles 2574 * in the main surface. With 4 byte pixels and each Y-tile having dimensions of 2575 * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in 2576 * the main surface. 2577 */ 2578 static const struct drm_format_info gen12_ccs_formats[] = { 2579 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 2580 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2581 .hsub = 1, .vsub = 1, }, 2582 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 2583 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2584 .hsub = 1, .vsub = 1, }, 2585 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 2586 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2587 .hsub = 1, .vsub = 1, .has_alpha = true }, 2588 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 2589 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2590 .hsub = 1, .vsub = 1, .has_alpha = true }, 2591 }; 2592 2593 static const struct drm_format_info * 2594 lookup_format_info(const struct drm_format_info formats[], 2595 int num_formats, u32 format) 2596 { 2597 int i; 2598 2599 for (i = 0; i < num_formats; i++) { 2600 if (formats[i].format == format) 2601 return &formats[i]; 2602 } 2603 2604 return NULL; 2605 } 2606 2607 static const struct drm_format_info * 2608 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 2609 { 2610 switch (cmd->modifier[0]) { 2611 case I915_FORMAT_MOD_Y_TILED_CCS: 2612 case I915_FORMAT_MOD_Yf_TILED_CCS: 2613 return lookup_format_info(skl_ccs_formats, 2614 ARRAY_SIZE(skl_ccs_formats), 2615 cmd->pixel_format); 2616 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 2617 return lookup_format_info(gen12_ccs_formats, 2618 ARRAY_SIZE(gen12_ccs_formats), 2619 cmd->pixel_format); 2620 default: 2621 return NULL; 2622 } 2623 } 2624 2625 bool is_ccs_modifier(u64 modifier) 2626 { 2627 return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || 2628 modifier == I915_FORMAT_MOD_Y_TILED_CCS || 2629 modifier == I915_FORMAT_MOD_Yf_TILED_CCS; 2630 } 2631 2632 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane) 2633 { 2634 return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)], 2635 512) * 64; 2636 } 2637 2638 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 2639 u32 pixel_format, u64 modifier) 2640 { 2641 struct intel_crtc *crtc; 2642 struct intel_plane *plane; 2643 2644 /* 2645 * We assume the primary plane for pipe A has 2646 * the highest stride limits of them all. 2647 */ 2648 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); 2649 if (!crtc) 2650 return 0; 2651 2652 plane = to_intel_plane(crtc->base.primary); 2653 2654 return plane->max_stride(plane, pixel_format, modifier, 2655 DRM_MODE_ROTATE_0); 2656 } 2657 2658 static 2659 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv, 2660 u32 pixel_format, u64 modifier) 2661 { 2662 /* 2663 * Arbitrary limit for gen4+ chosen to match the 2664 * render engine max stride. 2665 * 2666 * The new CCS hash mode makes remapping impossible 2667 */ 2668 if (!is_ccs_modifier(modifier)) { 2669 if (INTEL_GEN(dev_priv) >= 7) 2670 return 256*1024; 2671 else if (INTEL_GEN(dev_priv) >= 4) 2672 return 128*1024; 2673 } 2674 2675 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier); 2676 } 2677 2678 static u32 2679 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) 2680 { 2681 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2682 u32 tile_width; 2683 2684 if (is_surface_linear(fb, color_plane)) { 2685 u32 max_stride = intel_plane_fb_max_stride(dev_priv, 2686 fb->format->format, 2687 fb->modifier); 2688 2689 /* 2690 * To make remapping with linear generally feasible 2691 * we need the stride to be page aligned. 2692 */ 2693 if (fb->pitches[color_plane] > max_stride && 2694 !is_ccs_modifier(fb->modifier)) 2695 return intel_tile_size(dev_priv); 2696 else 2697 return 64; 2698 } 2699 2700 tile_width = intel_tile_width_bytes(fb, color_plane); 2701 if (is_ccs_modifier(fb->modifier) && color_plane == 0) { 2702 /* 2703 * Display WA #0531: skl,bxt,kbl,glk 2704 * 2705 * Render decompression and plane width > 3840 2706 * combined with horizontal panning requires the 2707 * plane stride to be a multiple of 4. We'll just 2708 * require the entire fb to accommodate that to avoid 2709 * potential runtime errors at plane configuration time. 2710 */ 2711 if (IS_GEN(dev_priv, 9) && fb->width > 3840) 2712 tile_width *= 4; 2713 /* 2714 * The main surface pitch must be padded to a multiple of four 2715 * tile widths. 2716 */ 2717 else if (INTEL_GEN(dev_priv) >= 12) 2718 tile_width *= 4; 2719 } 2720 return tile_width; 2721 } 2722 2723 bool intel_plane_can_remap(const struct intel_plane_state *plane_state) 2724 { 2725 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 2726 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 2727 const struct drm_framebuffer *fb = plane_state->hw.fb; 2728 int i; 2729 2730 /* We don't want to deal with remapping with cursors */ 2731 if (plane->id == PLANE_CURSOR) 2732 return false; 2733 2734 /* 2735 * The display engine limits already match/exceed the 2736 * render engine limits, so not much point in remapping. 2737 * Would also need to deal with the fence POT alignment 2738 * and gen2 2KiB GTT tile size. 2739 */ 2740 if (INTEL_GEN(dev_priv) < 4) 2741 return false; 2742 2743 /* 2744 * The new CCS hash mode isn't compatible with remapping as 2745 * the virtual address of the pages affects the compressed data. 2746 */ 2747 if (is_ccs_modifier(fb->modifier)) 2748 return false; 2749 2750 /* Linear needs a page aligned stride for remapping */ 2751 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) { 2752 unsigned int alignment = intel_tile_size(dev_priv) - 1; 2753 2754 for (i = 0; i < fb->format->num_planes; i++) { 2755 if (fb->pitches[i] & alignment) 2756 return false; 2757 } 2758 } 2759 2760 return true; 2761 } 2762 2763 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) 2764 { 2765 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 2766 const struct drm_framebuffer *fb = plane_state->hw.fb; 2767 unsigned int rotation = plane_state->hw.rotation; 2768 u32 stride, max_stride; 2769 2770 /* 2771 * No remapping for invisible planes since we don't have 2772 * an actual source viewport to remap. 2773 */ 2774 if (!plane_state->uapi.visible) 2775 return false; 2776 2777 if (!intel_plane_can_remap(plane_state)) 2778 return false; 2779 2780 /* 2781 * FIXME: aux plane limits on gen9+ are 2782 * unclear in Bspec, for now no checking. 2783 */ 2784 stride = intel_fb_pitch(fb, 0, rotation); 2785 max_stride = plane->max_stride(plane, fb->format->format, 2786 fb->modifier, rotation); 2787 2788 return stride > max_stride; 2789 } 2790 2791 static void 2792 intel_fb_plane_get_subsampling(int *hsub, int *vsub, 2793 const struct drm_framebuffer *fb, 2794 int color_plane) 2795 { 2796 int main_plane; 2797 2798 if (color_plane == 0) { 2799 *hsub = 1; 2800 *vsub = 1; 2801 2802 return; 2803 } 2804 2805 /* 2806 * TODO: Deduct the subsampling from the char block for all CCS 2807 * formats and planes. 2808 */ 2809 if (!is_gen12_ccs_plane(fb, color_plane)) { 2810 *hsub = fb->format->hsub; 2811 *vsub = fb->format->vsub; 2812 2813 return; 2814 } 2815 2816 main_plane = ccs_to_main_plane(fb, color_plane); 2817 *hsub = drm_format_info_block_width(fb->format, color_plane) / 2818 drm_format_info_block_width(fb->format, main_plane); 2819 2820 /* 2821 * The min stride check in the core framebuffer_check() function 2822 * assumes that format->hsub applies to every plane except for the 2823 * first plane. That's incorrect for the CCS AUX plane of the first 2824 * plane, but for the above check to pass we must define the block 2825 * width with that subsampling applied to it. Adjust the width here 2826 * accordingly, so we can calculate the actual subsampling factor. 2827 */ 2828 if (main_plane == 0) 2829 *hsub *= fb->format->hsub; 2830 2831 *vsub = 32; 2832 } 2833 static int 2834 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y) 2835 { 2836 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2837 int main_plane; 2838 int hsub, vsub; 2839 int tile_width, tile_height; 2840 int ccs_x, ccs_y; 2841 int main_x, main_y; 2842 2843 if (!is_ccs_plane(fb, ccs_plane)) 2844 return 0; 2845 2846 intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height); 2847 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); 2848 2849 tile_width *= hsub; 2850 tile_height *= vsub; 2851 2852 ccs_x = (x * hsub) % tile_width; 2853 ccs_y = (y * vsub) % tile_height; 2854 2855 main_plane = ccs_to_main_plane(fb, ccs_plane); 2856 main_x = intel_fb->normal[main_plane].x % tile_width; 2857 main_y = intel_fb->normal[main_plane].y % tile_height; 2858 2859 /* 2860 * CCS doesn't have its own x/y offset register, so the intra CCS tile 2861 * x/y offsets must match between CCS and the main surface. 2862 */ 2863 if (main_x != ccs_x || main_y != ccs_y) { 2864 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n", 2865 main_x, main_y, 2866 ccs_x, ccs_y, 2867 intel_fb->normal[main_plane].x, 2868 intel_fb->normal[main_plane].y, 2869 x, y); 2870 return -EINVAL; 2871 } 2872 2873 return 0; 2874 } 2875 2876 static void 2877 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane) 2878 { 2879 int hsub, vsub; 2880 2881 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane); 2882 *w = fb->width / hsub; 2883 *h = fb->height / vsub; 2884 } 2885 2886 /* 2887 * Setup the rotated view for an FB plane and return the size the GTT mapping 2888 * requires for this view. 2889 */ 2890 static u32 2891 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info, 2892 u32 gtt_offset_rotated, int x, int y, 2893 unsigned int width, unsigned int height, 2894 unsigned int tile_size, 2895 unsigned int tile_width, unsigned int tile_height, 2896 struct drm_framebuffer *fb) 2897 { 2898 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2899 struct intel_rotation_info *rot_info = &intel_fb->rot_info; 2900 unsigned int pitch_tiles; 2901 struct drm_rect r; 2902 2903 /* Y or Yf modifiers required for 90/270 rotation */ 2904 if (fb->modifier != I915_FORMAT_MOD_Y_TILED && 2905 fb->modifier != I915_FORMAT_MOD_Yf_TILED) 2906 return 0; 2907 2908 if (WARN_ON(plane >= ARRAY_SIZE(rot_info->plane))) 2909 return 0; 2910 2911 rot_info->plane[plane] = *plane_info; 2912 2913 intel_fb->rotated[plane].pitch = plane_info->height * tile_height; 2914 2915 /* rotate the x/y offsets to match the GTT view */ 2916 drm_rect_init(&r, x, y, width, height); 2917 drm_rect_rotate(&r, 2918 plane_info->width * tile_width, 2919 plane_info->height * tile_height, 2920 DRM_MODE_ROTATE_270); 2921 x = r.x1; 2922 y = r.y1; 2923 2924 /* rotate the tile dimensions to match the GTT view */ 2925 pitch_tiles = intel_fb->rotated[plane].pitch / tile_height; 2926 swap(tile_width, tile_height); 2927 2928 /* 2929 * We only keep the x/y offsets, so push all of the 2930 * gtt offset into the x/y offsets. 2931 */ 2932 intel_adjust_tile_offset(&x, &y, 2933 tile_width, tile_height, 2934 tile_size, pitch_tiles, 2935 gtt_offset_rotated * tile_size, 0); 2936 2937 /* 2938 * First pixel of the framebuffer from 2939 * the start of the rotated gtt mapping. 2940 */ 2941 intel_fb->rotated[plane].x = x; 2942 intel_fb->rotated[plane].y = y; 2943 2944 return plane_info->width * plane_info->height; 2945 } 2946 2947 static int 2948 intel_fill_fb_info(struct drm_i915_private *dev_priv, 2949 struct drm_framebuffer *fb) 2950 { 2951 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2952 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2953 u32 gtt_offset_rotated = 0; 2954 unsigned int max_size = 0; 2955 int i, num_planes = fb->format->num_planes; 2956 unsigned int tile_size = intel_tile_size(dev_priv); 2957 2958 for (i = 0; i < num_planes; i++) { 2959 unsigned int width, height; 2960 unsigned int cpp, size; 2961 u32 offset; 2962 int x, y; 2963 int ret; 2964 2965 cpp = fb->format->cpp[i]; 2966 intel_fb_plane_dims(&width, &height, fb, i); 2967 2968 ret = intel_fb_offset_to_xy(&x, &y, fb, i); 2969 if (ret) { 2970 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", 2971 i, fb->offsets[i]); 2972 return ret; 2973 } 2974 2975 ret = intel_fb_check_ccs_xy(fb, i, x, y); 2976 if (ret) 2977 return ret; 2978 2979 /* 2980 * The fence (if used) is aligned to the start of the object 2981 * so having the framebuffer wrap around across the edge of the 2982 * fenced region doesn't really work. We have no API to configure 2983 * the fence start offset within the object (nor could we probably 2984 * on gen2/3). So it's just easier if we just require that the 2985 * fb layout agrees with the fence layout. We already check that the 2986 * fb stride matches the fence stride elsewhere. 2987 */ 2988 if (i == 0 && i915_gem_object_is_tiled(obj) && 2989 (x + width) * cpp > fb->pitches[i]) { 2990 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", 2991 i, fb->offsets[i]); 2992 return -EINVAL; 2993 } 2994 2995 /* 2996 * First pixel of the framebuffer from 2997 * the start of the normal gtt mapping. 2998 */ 2999 intel_fb->normal[i].x = x; 3000 intel_fb->normal[i].y = y; 3001 3002 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i, 3003 fb->pitches[i], 3004 DRM_MODE_ROTATE_0, 3005 tile_size); 3006 offset /= tile_size; 3007 3008 if (!is_surface_linear(fb, i)) { 3009 struct intel_remapped_plane_info plane_info; 3010 unsigned int tile_width, tile_height; 3011 3012 intel_tile_dims(fb, i, &tile_width, &tile_height); 3013 3014 plane_info.offset = offset; 3015 plane_info.stride = DIV_ROUND_UP(fb->pitches[i], 3016 tile_width * cpp); 3017 plane_info.width = DIV_ROUND_UP(x + width, tile_width); 3018 plane_info.height = DIV_ROUND_UP(y + height, 3019 tile_height); 3020 3021 /* how many tiles does this plane need */ 3022 size = plane_info.stride * plane_info.height; 3023 /* 3024 * If the plane isn't horizontally tile aligned, 3025 * we need one more tile. 3026 */ 3027 if (x != 0) 3028 size++; 3029 3030 gtt_offset_rotated += 3031 setup_fb_rotation(i, &plane_info, 3032 gtt_offset_rotated, 3033 x, y, width, height, 3034 tile_size, 3035 tile_width, tile_height, 3036 fb); 3037 } else { 3038 size = DIV_ROUND_UP((y + height) * fb->pitches[i] + 3039 x * cpp, tile_size); 3040 } 3041 3042 /* how many tiles in total needed in the bo */ 3043 max_size = max(max_size, offset + size); 3044 } 3045 3046 if (mul_u32_u32(max_size, tile_size) > obj->base.size) { 3047 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n", 3048 mul_u32_u32(max_size, tile_size), obj->base.size); 3049 return -EINVAL; 3050 } 3051 3052 return 0; 3053 } 3054 3055 static void 3056 intel_plane_remap_gtt(struct intel_plane_state *plane_state) 3057 { 3058 struct drm_i915_private *dev_priv = 3059 to_i915(plane_state->uapi.plane->dev); 3060 struct drm_framebuffer *fb = plane_state->hw.fb; 3061 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 3062 struct intel_rotation_info *info = &plane_state->view.rotated; 3063 unsigned int rotation = plane_state->hw.rotation; 3064 int i, num_planes = fb->format->num_planes; 3065 unsigned int tile_size = intel_tile_size(dev_priv); 3066 unsigned int src_x, src_y; 3067 unsigned int src_w, src_h; 3068 u32 gtt_offset = 0; 3069 3070 memset(&plane_state->view, 0, sizeof(plane_state->view)); 3071 plane_state->view.type = drm_rotation_90_or_270(rotation) ? 3072 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED; 3073 3074 src_x = plane_state->uapi.src.x1 >> 16; 3075 src_y = plane_state->uapi.src.y1 >> 16; 3076 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 3077 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 3078 3079 WARN_ON(is_ccs_modifier(fb->modifier)); 3080 3081 /* Make src coordinates relative to the viewport */ 3082 drm_rect_translate(&plane_state->uapi.src, 3083 -(src_x << 16), -(src_y << 16)); 3084 3085 /* Rotate src coordinates to match rotated GTT view */ 3086 if (drm_rotation_90_or_270(rotation)) 3087 drm_rect_rotate(&plane_state->uapi.src, 3088 src_w << 16, src_h << 16, 3089 DRM_MODE_ROTATE_270); 3090 3091 for (i = 0; i < num_planes; i++) { 3092 unsigned int hsub = i ? fb->format->hsub : 1; 3093 unsigned int vsub = i ? fb->format->vsub : 1; 3094 unsigned int cpp = fb->format->cpp[i]; 3095 unsigned int tile_width, tile_height; 3096 unsigned int width, height; 3097 unsigned int pitch_tiles; 3098 unsigned int x, y; 3099 u32 offset; 3100 3101 intel_tile_dims(fb, i, &tile_width, &tile_height); 3102 3103 x = src_x / hsub; 3104 y = src_y / vsub; 3105 width = src_w / hsub; 3106 height = src_h / vsub; 3107 3108 /* 3109 * First pixel of the src viewport from the 3110 * start of the normal gtt mapping. 3111 */ 3112 x += intel_fb->normal[i].x; 3113 y += intel_fb->normal[i].y; 3114 3115 offset = intel_compute_aligned_offset(dev_priv, &x, &y, 3116 fb, i, fb->pitches[i], 3117 DRM_MODE_ROTATE_0, tile_size); 3118 offset /= tile_size; 3119 3120 WARN_ON(i >= ARRAY_SIZE(info->plane)); 3121 info->plane[i].offset = offset; 3122 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], 3123 tile_width * cpp); 3124 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); 3125 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); 3126 3127 if (drm_rotation_90_or_270(rotation)) { 3128 struct drm_rect r; 3129 3130 /* rotate the x/y offsets to match the GTT view */ 3131 drm_rect_init(&r, x, y, width, height); 3132 drm_rect_rotate(&r, 3133 info->plane[i].width * tile_width, 3134 info->plane[i].height * tile_height, 3135 DRM_MODE_ROTATE_270); 3136 x = r.x1; 3137 y = r.y1; 3138 3139 pitch_tiles = info->plane[i].height; 3140 plane_state->color_plane[i].stride = pitch_tiles * tile_height; 3141 3142 /* rotate the tile dimensions to match the GTT view */ 3143 swap(tile_width, tile_height); 3144 } else { 3145 pitch_tiles = info->plane[i].width; 3146 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp; 3147 } 3148 3149 /* 3150 * We only keep the x/y offsets, so push all of the 3151 * gtt offset into the x/y offsets. 3152 */ 3153 intel_adjust_tile_offset(&x, &y, 3154 tile_width, tile_height, 3155 tile_size, pitch_tiles, 3156 gtt_offset * tile_size, 0); 3157 3158 gtt_offset += info->plane[i].width * info->plane[i].height; 3159 3160 plane_state->color_plane[i].offset = 0; 3161 plane_state->color_plane[i].x = x; 3162 plane_state->color_plane[i].y = y; 3163 } 3164 } 3165 3166 static int 3167 intel_plane_compute_gtt(struct intel_plane_state *plane_state) 3168 { 3169 const struct intel_framebuffer *fb = 3170 to_intel_framebuffer(plane_state->hw.fb); 3171 unsigned int rotation = plane_state->hw.rotation; 3172 int i, num_planes; 3173 3174 if (!fb) 3175 return 0; 3176 3177 num_planes = fb->base.format->num_planes; 3178 3179 if (intel_plane_needs_remap(plane_state)) { 3180 intel_plane_remap_gtt(plane_state); 3181 3182 /* 3183 * Sometimes even remapping can't overcome 3184 * the stride limitations :( Can happen with 3185 * big plane sizes and suitably misaligned 3186 * offsets. 3187 */ 3188 return intel_plane_check_stride(plane_state); 3189 } 3190 3191 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation); 3192 3193 for (i = 0; i < num_planes; i++) { 3194 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation); 3195 plane_state->color_plane[i].offset = 0; 3196 3197 if (drm_rotation_90_or_270(rotation)) { 3198 plane_state->color_plane[i].x = fb->rotated[i].x; 3199 plane_state->color_plane[i].y = fb->rotated[i].y; 3200 } else { 3201 plane_state->color_plane[i].x = fb->normal[i].x; 3202 plane_state->color_plane[i].y = fb->normal[i].y; 3203 } 3204 } 3205 3206 /* Rotate src coordinates to match rotated GTT view */ 3207 if (drm_rotation_90_or_270(rotation)) 3208 drm_rect_rotate(&plane_state->uapi.src, 3209 fb->base.width << 16, fb->base.height << 16, 3210 DRM_MODE_ROTATE_270); 3211 3212 return intel_plane_check_stride(plane_state); 3213 } 3214 3215 static int i9xx_format_to_fourcc(int format) 3216 { 3217 switch (format) { 3218 case DISPPLANE_8BPP: 3219 return DRM_FORMAT_C8; 3220 case DISPPLANE_BGRA555: 3221 return DRM_FORMAT_ARGB1555; 3222 case DISPPLANE_BGRX555: 3223 return DRM_FORMAT_XRGB1555; 3224 case DISPPLANE_BGRX565: 3225 return DRM_FORMAT_RGB565; 3226 default: 3227 case DISPPLANE_BGRX888: 3228 return DRM_FORMAT_XRGB8888; 3229 case DISPPLANE_RGBX888: 3230 return DRM_FORMAT_XBGR8888; 3231 case DISPPLANE_BGRA888: 3232 return DRM_FORMAT_ARGB8888; 3233 case DISPPLANE_RGBA888: 3234 return DRM_FORMAT_ABGR8888; 3235 case DISPPLANE_BGRX101010: 3236 return DRM_FORMAT_XRGB2101010; 3237 case DISPPLANE_RGBX101010: 3238 return DRM_FORMAT_XBGR2101010; 3239 case DISPPLANE_BGRA101010: 3240 return DRM_FORMAT_ARGB2101010; 3241 case DISPPLANE_RGBA101010: 3242 return DRM_FORMAT_ABGR2101010; 3243 case DISPPLANE_RGBX161616: 3244 return DRM_FORMAT_XBGR16161616F; 3245 } 3246 } 3247 3248 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 3249 { 3250 switch (format) { 3251 case PLANE_CTL_FORMAT_RGB_565: 3252 return DRM_FORMAT_RGB565; 3253 case PLANE_CTL_FORMAT_NV12: 3254 return DRM_FORMAT_NV12; 3255 case PLANE_CTL_FORMAT_P010: 3256 return DRM_FORMAT_P010; 3257 case PLANE_CTL_FORMAT_P012: 3258 return DRM_FORMAT_P012; 3259 case PLANE_CTL_FORMAT_P016: 3260 return DRM_FORMAT_P016; 3261 case PLANE_CTL_FORMAT_Y210: 3262 return DRM_FORMAT_Y210; 3263 case PLANE_CTL_FORMAT_Y212: 3264 return DRM_FORMAT_Y212; 3265 case PLANE_CTL_FORMAT_Y216: 3266 return DRM_FORMAT_Y216; 3267 case PLANE_CTL_FORMAT_Y410: 3268 return DRM_FORMAT_XVYU2101010; 3269 case PLANE_CTL_FORMAT_Y412: 3270 return DRM_FORMAT_XVYU12_16161616; 3271 case PLANE_CTL_FORMAT_Y416: 3272 return DRM_FORMAT_XVYU16161616; 3273 default: 3274 case PLANE_CTL_FORMAT_XRGB_8888: 3275 if (rgb_order) { 3276 if (alpha) 3277 return DRM_FORMAT_ABGR8888; 3278 else 3279 return DRM_FORMAT_XBGR8888; 3280 } else { 3281 if (alpha) 3282 return DRM_FORMAT_ARGB8888; 3283 else 3284 return DRM_FORMAT_XRGB8888; 3285 } 3286 case PLANE_CTL_FORMAT_XRGB_2101010: 3287 if (rgb_order) { 3288 if (alpha) 3289 return DRM_FORMAT_ABGR2101010; 3290 else 3291 return DRM_FORMAT_XBGR2101010; 3292 } else { 3293 if (alpha) 3294 return DRM_FORMAT_ARGB2101010; 3295 else 3296 return DRM_FORMAT_XRGB2101010; 3297 } 3298 case PLANE_CTL_FORMAT_XRGB_16161616F: 3299 if (rgb_order) { 3300 if (alpha) 3301 return DRM_FORMAT_ABGR16161616F; 3302 else 3303 return DRM_FORMAT_XBGR16161616F; 3304 } else { 3305 if (alpha) 3306 return DRM_FORMAT_ARGB16161616F; 3307 else 3308 return DRM_FORMAT_XRGB16161616F; 3309 } 3310 } 3311 } 3312 3313 static bool 3314 intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 3315 struct intel_initial_plane_config *plane_config) 3316 { 3317 struct drm_device *dev = crtc->base.dev; 3318 struct drm_i915_private *dev_priv = to_i915(dev); 3319 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 3320 struct drm_framebuffer *fb = &plane_config->fb->base; 3321 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); 3322 u32 size_aligned = round_up(plane_config->base + plane_config->size, 3323 PAGE_SIZE); 3324 struct drm_i915_gem_object *obj; 3325 bool ret = false; 3326 3327 size_aligned -= base_aligned; 3328 3329 if (plane_config->size == 0) 3330 return false; 3331 3332 /* If the FB is too big, just don't use it since fbdev is not very 3333 * important and we should probably use that space with FBC or other 3334 * features. */ 3335 if (size_aligned * 2 > dev_priv->stolen_usable_size) 3336 return false; 3337 3338 switch (fb->modifier) { 3339 case DRM_FORMAT_MOD_LINEAR: 3340 case I915_FORMAT_MOD_X_TILED: 3341 case I915_FORMAT_MOD_Y_TILED: 3342 break; 3343 default: 3344 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n", 3345 fb->modifier); 3346 return false; 3347 } 3348 3349 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv, 3350 base_aligned, 3351 base_aligned, 3352 size_aligned); 3353 if (IS_ERR(obj)) 3354 return false; 3355 3356 switch (plane_config->tiling) { 3357 case I915_TILING_NONE: 3358 break; 3359 case I915_TILING_X: 3360 case I915_TILING_Y: 3361 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling; 3362 break; 3363 default: 3364 MISSING_CASE(plane_config->tiling); 3365 goto out; 3366 } 3367 3368 mode_cmd.pixel_format = fb->format->format; 3369 mode_cmd.width = fb->width; 3370 mode_cmd.height = fb->height; 3371 mode_cmd.pitches[0] = fb->pitches[0]; 3372 mode_cmd.modifier[0] = fb->modifier; 3373 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 3374 3375 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) { 3376 DRM_DEBUG_KMS("intel fb init failed\n"); 3377 goto out; 3378 } 3379 3380 3381 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); 3382 ret = true; 3383 out: 3384 i915_gem_object_put(obj); 3385 return ret; 3386 } 3387 3388 static void 3389 intel_set_plane_visible(struct intel_crtc_state *crtc_state, 3390 struct intel_plane_state *plane_state, 3391 bool visible) 3392 { 3393 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 3394 3395 plane_state->uapi.visible = visible; 3396 3397 if (visible) 3398 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base); 3399 else 3400 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); 3401 } 3402 3403 static void fixup_active_planes(struct intel_crtc_state *crtc_state) 3404 { 3405 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 3406 struct drm_plane *plane; 3407 3408 /* 3409 * Active_planes aliases if multiple "primary" or cursor planes 3410 * have been used on the same (or wrong) pipe. plane_mask uses 3411 * unique ids, hence we can use that to reconstruct active_planes. 3412 */ 3413 crtc_state->active_planes = 0; 3414 3415 drm_for_each_plane_mask(plane, &dev_priv->drm, 3416 crtc_state->uapi.plane_mask) 3417 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 3418 } 3419 3420 static void intel_plane_disable_noatomic(struct intel_crtc *crtc, 3421 struct intel_plane *plane) 3422 { 3423 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3424 struct intel_crtc_state *crtc_state = 3425 to_intel_crtc_state(crtc->base.state); 3426 struct intel_plane_state *plane_state = 3427 to_intel_plane_state(plane->base.state); 3428 3429 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 3430 plane->base.base.id, plane->base.name, 3431 crtc->base.base.id, crtc->base.name); 3432 3433 intel_set_plane_visible(crtc_state, plane_state, false); 3434 fixup_active_planes(crtc_state); 3435 crtc_state->data_rate[plane->id] = 0; 3436 crtc_state->min_cdclk[plane->id] = 0; 3437 3438 if (plane->id == PLANE_PRIMARY) 3439 hsw_disable_ips(crtc_state); 3440 3441 /* 3442 * Vblank time updates from the shadow to live plane control register 3443 * are blocked if the memory self-refresh mode is active at that 3444 * moment. So to make sure the plane gets truly disabled, disable 3445 * first the self-refresh mode. The self-refresh enable bit in turn 3446 * will be checked/applied by the HW only at the next frame start 3447 * event which is after the vblank start event, so we need to have a 3448 * wait-for-vblank between disabling the plane and the pipe. 3449 */ 3450 if (HAS_GMCH(dev_priv) && 3451 intel_set_memory_cxsr(dev_priv, false)) 3452 intel_wait_for_vblank(dev_priv, crtc->pipe); 3453 3454 /* 3455 * Gen2 reports pipe underruns whenever all planes are disabled. 3456 * So disable underrun reporting before all the planes get disabled. 3457 */ 3458 if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes) 3459 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 3460 3461 intel_disable_plane(plane, crtc_state); 3462 } 3463 3464 static struct intel_frontbuffer * 3465 to_intel_frontbuffer(struct drm_framebuffer *fb) 3466 { 3467 return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL; 3468 } 3469 3470 static void 3471 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 3472 struct intel_initial_plane_config *plane_config) 3473 { 3474 struct drm_device *dev = intel_crtc->base.dev; 3475 struct drm_i915_private *dev_priv = to_i915(dev); 3476 struct drm_crtc *c; 3477 struct drm_plane *primary = intel_crtc->base.primary; 3478 struct drm_plane_state *plane_state = primary->state; 3479 struct intel_plane *intel_plane = to_intel_plane(primary); 3480 struct intel_plane_state *intel_state = 3481 to_intel_plane_state(plane_state); 3482 struct drm_framebuffer *fb; 3483 3484 if (!plane_config->fb) 3485 return; 3486 3487 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 3488 fb = &plane_config->fb->base; 3489 goto valid_fb; 3490 } 3491 3492 kfree(plane_config->fb); 3493 3494 /* 3495 * Failed to alloc the obj, check to see if we should share 3496 * an fb with another CRTC instead 3497 */ 3498 for_each_crtc(dev, c) { 3499 struct intel_plane_state *state; 3500 3501 if (c == &intel_crtc->base) 3502 continue; 3503 3504 if (!to_intel_crtc(c)->active) 3505 continue; 3506 3507 state = to_intel_plane_state(c->primary->state); 3508 if (!state->vma) 3509 continue; 3510 3511 if (intel_plane_ggtt_offset(state) == plane_config->base) { 3512 fb = state->hw.fb; 3513 drm_framebuffer_get(fb); 3514 goto valid_fb; 3515 } 3516 } 3517 3518 /* 3519 * We've failed to reconstruct the BIOS FB. Current display state 3520 * indicates that the primary plane is visible, but has a NULL FB, 3521 * which will lead to problems later if we don't fix it up. The 3522 * simplest solution is to just disable the primary plane now and 3523 * pretend the BIOS never had it enabled. 3524 */ 3525 intel_plane_disable_noatomic(intel_crtc, intel_plane); 3526 3527 return; 3528 3529 valid_fb: 3530 intel_state->hw.rotation = plane_config->rotation; 3531 intel_fill_fb_ggtt_view(&intel_state->view, fb, 3532 intel_state->hw.rotation); 3533 intel_state->color_plane[0].stride = 3534 intel_fb_pitch(fb, 0, intel_state->hw.rotation); 3535 3536 intel_state->vma = 3537 intel_pin_and_fence_fb_obj(fb, 3538 &intel_state->view, 3539 intel_plane_uses_fence(intel_state), 3540 &intel_state->flags); 3541 if (IS_ERR(intel_state->vma)) { 3542 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n", 3543 intel_crtc->pipe, PTR_ERR(intel_state->vma)); 3544 3545 intel_state->vma = NULL; 3546 drm_framebuffer_put(fb); 3547 return; 3548 } 3549 3550 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); 3551 3552 plane_state->src_x = 0; 3553 plane_state->src_y = 0; 3554 plane_state->src_w = fb->width << 16; 3555 plane_state->src_h = fb->height << 16; 3556 3557 plane_state->crtc_x = 0; 3558 plane_state->crtc_y = 0; 3559 plane_state->crtc_w = fb->width; 3560 plane_state->crtc_h = fb->height; 3561 3562 intel_state->uapi.src = drm_plane_state_src(plane_state); 3563 intel_state->uapi.dst = drm_plane_state_dest(plane_state); 3564 3565 if (plane_config->tiling) 3566 dev_priv->preserve_bios_swizzle = true; 3567 3568 plane_state->fb = fb; 3569 plane_state->crtc = &intel_crtc->base; 3570 intel_plane_copy_uapi_to_hw_state(intel_state, intel_state); 3571 3572 atomic_or(to_intel_plane(primary)->frontbuffer_bit, 3573 &to_intel_frontbuffer(fb)->bits); 3574 } 3575 3576 static int skl_max_plane_width(const struct drm_framebuffer *fb, 3577 int color_plane, 3578 unsigned int rotation) 3579 { 3580 int cpp = fb->format->cpp[color_plane]; 3581 3582 switch (fb->modifier) { 3583 case DRM_FORMAT_MOD_LINEAR: 3584 case I915_FORMAT_MOD_X_TILED: 3585 /* 3586 * Validated limit is 4k, but has 5k should 3587 * work apart from the following features: 3588 * - Ytile (already limited to 4k) 3589 * - FP16 (already limited to 4k) 3590 * - render compression (already limited to 4k) 3591 * - KVMR sprite and cursor (don't care) 3592 * - horizontal panning (TODO verify this) 3593 * - pipe and plane scaling (TODO verify this) 3594 */ 3595 if (cpp == 8) 3596 return 4096; 3597 else 3598 return 5120; 3599 case I915_FORMAT_MOD_Y_TILED_CCS: 3600 case I915_FORMAT_MOD_Yf_TILED_CCS: 3601 /* FIXME AUX plane? */ 3602 case I915_FORMAT_MOD_Y_TILED: 3603 case I915_FORMAT_MOD_Yf_TILED: 3604 if (cpp == 8) 3605 return 2048; 3606 else 3607 return 4096; 3608 default: 3609 MISSING_CASE(fb->modifier); 3610 return 2048; 3611 } 3612 } 3613 3614 static int glk_max_plane_width(const struct drm_framebuffer *fb, 3615 int color_plane, 3616 unsigned int rotation) 3617 { 3618 int cpp = fb->format->cpp[color_plane]; 3619 3620 switch (fb->modifier) { 3621 case DRM_FORMAT_MOD_LINEAR: 3622 case I915_FORMAT_MOD_X_TILED: 3623 if (cpp == 8) 3624 return 4096; 3625 else 3626 return 5120; 3627 case I915_FORMAT_MOD_Y_TILED_CCS: 3628 case I915_FORMAT_MOD_Yf_TILED_CCS: 3629 /* FIXME AUX plane? */ 3630 case I915_FORMAT_MOD_Y_TILED: 3631 case I915_FORMAT_MOD_Yf_TILED: 3632 if (cpp == 8) 3633 return 2048; 3634 else 3635 return 5120; 3636 default: 3637 MISSING_CASE(fb->modifier); 3638 return 2048; 3639 } 3640 } 3641 3642 static int icl_max_plane_width(const struct drm_framebuffer *fb, 3643 int color_plane, 3644 unsigned int rotation) 3645 { 3646 return 5120; 3647 } 3648 3649 static int skl_max_plane_height(void) 3650 { 3651 return 4096; 3652 } 3653 3654 static int icl_max_plane_height(void) 3655 { 3656 return 4320; 3657 } 3658 3659 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, 3660 int main_x, int main_y, u32 main_offset) 3661 { 3662 const struct drm_framebuffer *fb = plane_state->hw.fb; 3663 int ccs_plane = main_to_ccs_plane(fb, 0); 3664 int aux_x = plane_state->color_plane[ccs_plane].x; 3665 int aux_y = plane_state->color_plane[ccs_plane].y; 3666 u32 aux_offset = plane_state->color_plane[ccs_plane].offset; 3667 u32 alignment = intel_surf_alignment(fb, ccs_plane); 3668 int hsub; 3669 int vsub; 3670 3671 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); 3672 while (aux_offset >= main_offset && aux_y <= main_y) { 3673 int x, y; 3674 3675 if (aux_x == main_x && aux_y == main_y) 3676 break; 3677 3678 if (aux_offset == 0) 3679 break; 3680 3681 x = aux_x / hsub; 3682 y = aux_y / vsub; 3683 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, 3684 plane_state, 3685 ccs_plane, 3686 aux_offset, 3687 aux_offset - 3688 alignment); 3689 aux_x = x * hsub + aux_x % hsub; 3690 aux_y = y * vsub + aux_y % vsub; 3691 } 3692 3693 if (aux_x != main_x || aux_y != main_y) 3694 return false; 3695 3696 plane_state->color_plane[ccs_plane].offset = aux_offset; 3697 plane_state->color_plane[ccs_plane].x = aux_x; 3698 plane_state->color_plane[ccs_plane].y = aux_y; 3699 3700 return true; 3701 } 3702 3703 static int skl_check_main_surface(struct intel_plane_state *plane_state) 3704 { 3705 struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); 3706 const struct drm_framebuffer *fb = plane_state->hw.fb; 3707 unsigned int rotation = plane_state->hw.rotation; 3708 int x = plane_state->uapi.src.x1 >> 16; 3709 int y = plane_state->uapi.src.y1 >> 16; 3710 int w = drm_rect_width(&plane_state->uapi.src) >> 16; 3711 int h = drm_rect_height(&plane_state->uapi.src) >> 16; 3712 int max_width; 3713 int max_height; 3714 u32 alignment; 3715 u32 offset; 3716 int aux_plane = intel_main_to_aux_plane(fb, 0); 3717 u32 aux_offset = plane_state->color_plane[aux_plane].offset; 3718 3719 if (INTEL_GEN(dev_priv) >= 11) 3720 max_width = icl_max_plane_width(fb, 0, rotation); 3721 else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 3722 max_width = glk_max_plane_width(fb, 0, rotation); 3723 else 3724 max_width = skl_max_plane_width(fb, 0, rotation); 3725 3726 if (INTEL_GEN(dev_priv) >= 11) 3727 max_height = icl_max_plane_height(); 3728 else 3729 max_height = skl_max_plane_height(); 3730 3731 if (w > max_width || h > max_height) { 3732 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n", 3733 w, h, max_width, max_height); 3734 return -EINVAL; 3735 } 3736 3737 intel_add_fb_offsets(&x, &y, plane_state, 0); 3738 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0); 3739 alignment = intel_surf_alignment(fb, 0); 3740 3741 /* 3742 * AUX surface offset is specified as the distance from the 3743 * main surface offset, and it must be non-negative. Make 3744 * sure that is what we will get. 3745 */ 3746 if (offset > aux_offset) 3747 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3748 offset, aux_offset & ~(alignment - 1)); 3749 3750 /* 3751 * When using an X-tiled surface, the plane blows up 3752 * if the x offset + width exceed the stride. 3753 * 3754 * TODO: linear and Y-tiled seem fine, Yf untested, 3755 */ 3756 if (fb->modifier == I915_FORMAT_MOD_X_TILED) { 3757 int cpp = fb->format->cpp[0]; 3758 3759 while ((x + w) * cpp > plane_state->color_plane[0].stride) { 3760 if (offset == 0) { 3761 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n"); 3762 return -EINVAL; 3763 } 3764 3765 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3766 offset, offset - alignment); 3767 } 3768 } 3769 3770 /* 3771 * CCS AUX surface doesn't have its own x/y offsets, we must make sure 3772 * they match with the main surface x/y offsets. 3773 */ 3774 if (is_ccs_modifier(fb->modifier)) { 3775 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) { 3776 if (offset == 0) 3777 break; 3778 3779 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3780 offset, offset - alignment); 3781 } 3782 3783 if (x != plane_state->color_plane[aux_plane].x || 3784 y != plane_state->color_plane[aux_plane].y) { 3785 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n"); 3786 return -EINVAL; 3787 } 3788 } 3789 3790 plane_state->color_plane[0].offset = offset; 3791 plane_state->color_plane[0].x = x; 3792 plane_state->color_plane[0].y = y; 3793 3794 /* 3795 * Put the final coordinates back so that the src 3796 * coordinate checks will see the right values. 3797 */ 3798 drm_rect_translate_to(&plane_state->uapi.src, 3799 x << 16, y << 16); 3800 3801 return 0; 3802 } 3803 3804 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) 3805 { 3806 const struct drm_framebuffer *fb = plane_state->hw.fb; 3807 unsigned int rotation = plane_state->hw.rotation; 3808 int max_width = skl_max_plane_width(fb, 1, rotation); 3809 int max_height = 4096; 3810 int x = plane_state->uapi.src.x1 >> 17; 3811 int y = plane_state->uapi.src.y1 >> 17; 3812 int w = drm_rect_width(&plane_state->uapi.src) >> 17; 3813 int h = drm_rect_height(&plane_state->uapi.src) >> 17; 3814 u32 offset; 3815 3816 intel_add_fb_offsets(&x, &y, plane_state, 1); 3817 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1); 3818 3819 /* FIXME not quite sure how/if these apply to the chroma plane */ 3820 if (w > max_width || h > max_height) { 3821 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n", 3822 w, h, max_width, max_height); 3823 return -EINVAL; 3824 } 3825 3826 plane_state->color_plane[1].offset = offset; 3827 plane_state->color_plane[1].x = x; 3828 plane_state->color_plane[1].y = y; 3829 3830 return 0; 3831 } 3832 3833 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) 3834 { 3835 const struct drm_framebuffer *fb = plane_state->hw.fb; 3836 int src_x = plane_state->uapi.src.x1 >> 16; 3837 int src_y = plane_state->uapi.src.y1 >> 16; 3838 int hsub; 3839 int vsub; 3840 int x; 3841 int y; 3842 u32 offset; 3843 3844 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, 1); 3845 x = src_x / hsub; 3846 y = src_y / vsub; 3847 intel_add_fb_offsets(&x, &y, plane_state, 1); 3848 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1); 3849 3850 plane_state->color_plane[1].offset = offset; 3851 plane_state->color_plane[1].x = x * hsub + src_x % hsub; 3852 plane_state->color_plane[1].y = y * vsub + src_y % vsub; 3853 3854 return 0; 3855 } 3856 3857 int skl_check_plane_surface(struct intel_plane_state *plane_state) 3858 { 3859 const struct drm_framebuffer *fb = plane_state->hw.fb; 3860 int ret; 3861 3862 ret = intel_plane_compute_gtt(plane_state); 3863 if (ret) 3864 return ret; 3865 3866 if (!plane_state->uapi.visible) 3867 return 0; 3868 3869 /* 3870 * Handle the AUX surface first since 3871 * the main surface setup depends on it. 3872 */ 3873 if (intel_format_info_is_yuv_semiplanar(fb->format, 3874 fb->modifier)) { 3875 ret = skl_check_nv12_aux_surface(plane_state); 3876 if (ret) 3877 return ret; 3878 } else if (is_ccs_modifier(fb->modifier)) { 3879 ret = skl_check_ccs_aux_surface(plane_state); 3880 if (ret) 3881 return ret; 3882 } else { 3883 plane_state->color_plane[1].offset = ~0xfff; 3884 plane_state->color_plane[1].x = 0; 3885 plane_state->color_plane[1].y = 0; 3886 } 3887 3888 ret = skl_check_main_surface(plane_state); 3889 if (ret) 3890 return ret; 3891 3892 return 0; 3893 } 3894 3895 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state, 3896 const struct intel_plane_state *plane_state, 3897 unsigned int *num, unsigned int *den) 3898 { 3899 const struct drm_framebuffer *fb = plane_state->hw.fb; 3900 unsigned int cpp = fb->format->cpp[0]; 3901 3902 /* 3903 * g4x bspec says 64bpp pixel rate can't exceed 80% 3904 * of cdclk when the sprite plane is enabled on the 3905 * same pipe. ilk/snb bspec says 64bpp pixel rate is 3906 * never allowed to exceed 80% of cdclk. Let's just go 3907 * with the ilk/snb limit always. 3908 */ 3909 if (cpp == 8) { 3910 *num = 10; 3911 *den = 8; 3912 } else { 3913 *num = 1; 3914 *den = 1; 3915 } 3916 } 3917 3918 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state, 3919 const struct intel_plane_state *plane_state) 3920 { 3921 unsigned int pixel_rate; 3922 unsigned int num, den; 3923 3924 /* 3925 * Note that crtc_state->pixel_rate accounts for both 3926 * horizontal and vertical panel fitter downscaling factors. 3927 * Pre-HSW bspec tells us to only consider the horizontal 3928 * downscaling factor here. We ignore that and just consider 3929 * both for simplicity. 3930 */ 3931 pixel_rate = crtc_state->pixel_rate; 3932 3933 i9xx_plane_ratio(crtc_state, plane_state, &num, &den); 3934 3935 /* two pixels per clock with double wide pipe */ 3936 if (crtc_state->double_wide) 3937 den *= 2; 3938 3939 return DIV_ROUND_UP(pixel_rate * num, den); 3940 } 3941 3942 unsigned int 3943 i9xx_plane_max_stride(struct intel_plane *plane, 3944 u32 pixel_format, u64 modifier, 3945 unsigned int rotation) 3946 { 3947 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3948 3949 if (!HAS_GMCH(dev_priv)) { 3950 return 32*1024; 3951 } else if (INTEL_GEN(dev_priv) >= 4) { 3952 if (modifier == I915_FORMAT_MOD_X_TILED) 3953 return 16*1024; 3954 else 3955 return 32*1024; 3956 } else if (INTEL_GEN(dev_priv) >= 3) { 3957 if (modifier == I915_FORMAT_MOD_X_TILED) 3958 return 8*1024; 3959 else 3960 return 16*1024; 3961 } else { 3962 if (plane->i9xx_plane == PLANE_C) 3963 return 4*1024; 3964 else 3965 return 8*1024; 3966 } 3967 } 3968 3969 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 3970 { 3971 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3972 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3973 u32 dspcntr = 0; 3974 3975 if (crtc_state->gamma_enable) 3976 dspcntr |= DISPPLANE_GAMMA_ENABLE; 3977 3978 if (crtc_state->csc_enable) 3979 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 3980 3981 if (INTEL_GEN(dev_priv) < 5) 3982 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); 3983 3984 return dspcntr; 3985 } 3986 3987 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, 3988 const struct intel_plane_state *plane_state) 3989 { 3990 struct drm_i915_private *dev_priv = 3991 to_i915(plane_state->uapi.plane->dev); 3992 const struct drm_framebuffer *fb = plane_state->hw.fb; 3993 unsigned int rotation = plane_state->hw.rotation; 3994 u32 dspcntr; 3995 3996 dspcntr = DISPLAY_PLANE_ENABLE; 3997 3998 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) || 3999 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 4000 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 4001 4002 switch (fb->format->format) { 4003 case DRM_FORMAT_C8: 4004 dspcntr |= DISPPLANE_8BPP; 4005 break; 4006 case DRM_FORMAT_XRGB1555: 4007 dspcntr |= DISPPLANE_BGRX555; 4008 break; 4009 case DRM_FORMAT_ARGB1555: 4010 dspcntr |= DISPPLANE_BGRA555; 4011 break; 4012 case DRM_FORMAT_RGB565: 4013 dspcntr |= DISPPLANE_BGRX565; 4014 break; 4015 case DRM_FORMAT_XRGB8888: 4016 dspcntr |= DISPPLANE_BGRX888; 4017 break; 4018 case DRM_FORMAT_XBGR8888: 4019 dspcntr |= DISPPLANE_RGBX888; 4020 break; 4021 case DRM_FORMAT_ARGB8888: 4022 dspcntr |= DISPPLANE_BGRA888; 4023 break; 4024 case DRM_FORMAT_ABGR8888: 4025 dspcntr |= DISPPLANE_RGBA888; 4026 break; 4027 case DRM_FORMAT_XRGB2101010: 4028 dspcntr |= DISPPLANE_BGRX101010; 4029 break; 4030 case DRM_FORMAT_XBGR2101010: 4031 dspcntr |= DISPPLANE_RGBX101010; 4032 break; 4033 case DRM_FORMAT_ARGB2101010: 4034 dspcntr |= DISPPLANE_BGRA101010; 4035 break; 4036 case DRM_FORMAT_ABGR2101010: 4037 dspcntr |= DISPPLANE_RGBA101010; 4038 break; 4039 case DRM_FORMAT_XBGR16161616F: 4040 dspcntr |= DISPPLANE_RGBX161616; 4041 break; 4042 default: 4043 MISSING_CASE(fb->format->format); 4044 return 0; 4045 } 4046 4047 if (INTEL_GEN(dev_priv) >= 4 && 4048 fb->modifier == I915_FORMAT_MOD_X_TILED) 4049 dspcntr |= DISPPLANE_TILED; 4050 4051 if (rotation & DRM_MODE_ROTATE_180) 4052 dspcntr |= DISPPLANE_ROTATE_180; 4053 4054 if (rotation & DRM_MODE_REFLECT_X) 4055 dspcntr |= DISPPLANE_MIRROR; 4056 4057 return dspcntr; 4058 } 4059 4060 int i9xx_check_plane_surface(struct intel_plane_state *plane_state) 4061 { 4062 struct drm_i915_private *dev_priv = 4063 to_i915(plane_state->uapi.plane->dev); 4064 const struct drm_framebuffer *fb = plane_state->hw.fb; 4065 int src_x, src_y, src_w; 4066 u32 offset; 4067 int ret; 4068 4069 ret = intel_plane_compute_gtt(plane_state); 4070 if (ret) 4071 return ret; 4072 4073 if (!plane_state->uapi.visible) 4074 return 0; 4075 4076 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 4077 src_x = plane_state->uapi.src.x1 >> 16; 4078 src_y = plane_state->uapi.src.y1 >> 16; 4079 4080 /* Undocumented hardware limit on i965/g4x/vlv/chv */ 4081 if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048) 4082 return -EINVAL; 4083 4084 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 4085 4086 if (INTEL_GEN(dev_priv) >= 4) 4087 offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 4088 plane_state, 0); 4089 else 4090 offset = 0; 4091 4092 /* 4093 * Put the final coordinates back so that the src 4094 * coordinate checks will see the right values. 4095 */ 4096 drm_rect_translate_to(&plane_state->uapi.src, 4097 src_x << 16, src_y << 16); 4098 4099 /* HSW/BDW do this automagically in hardware */ 4100 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { 4101 unsigned int rotation = plane_state->hw.rotation; 4102 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 4103 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 4104 4105 if (rotation & DRM_MODE_ROTATE_180) { 4106 src_x += src_w - 1; 4107 src_y += src_h - 1; 4108 } else if (rotation & DRM_MODE_REFLECT_X) { 4109 src_x += src_w - 1; 4110 } 4111 } 4112 4113 plane_state->color_plane[0].offset = offset; 4114 plane_state->color_plane[0].x = src_x; 4115 plane_state->color_plane[0].y = src_y; 4116 4117 return 0; 4118 } 4119 4120 static bool i9xx_plane_has_windowing(struct intel_plane *plane) 4121 { 4122 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4123 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4124 4125 if (IS_CHERRYVIEW(dev_priv)) 4126 return i9xx_plane == PLANE_B; 4127 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 4128 return false; 4129 else if (IS_GEN(dev_priv, 4)) 4130 return i9xx_plane == PLANE_C; 4131 else 4132 return i9xx_plane == PLANE_B || 4133 i9xx_plane == PLANE_C; 4134 } 4135 4136 static int 4137 i9xx_plane_check(struct intel_crtc_state *crtc_state, 4138 struct intel_plane_state *plane_state) 4139 { 4140 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 4141 int ret; 4142 4143 ret = chv_plane_check_rotation(plane_state); 4144 if (ret) 4145 return ret; 4146 4147 ret = drm_atomic_helper_check_plane_state(&plane_state->uapi, 4148 &crtc_state->uapi, 4149 DRM_PLANE_HELPER_NO_SCALING, 4150 DRM_PLANE_HELPER_NO_SCALING, 4151 i9xx_plane_has_windowing(plane), 4152 true); 4153 if (ret) 4154 return ret; 4155 4156 ret = i9xx_check_plane_surface(plane_state); 4157 if (ret) 4158 return ret; 4159 4160 if (!plane_state->uapi.visible) 4161 return 0; 4162 4163 ret = intel_plane_check_src_coordinates(plane_state); 4164 if (ret) 4165 return ret; 4166 4167 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state); 4168 4169 return 0; 4170 } 4171 4172 static void i9xx_update_plane(struct intel_plane *plane, 4173 const struct intel_crtc_state *crtc_state, 4174 const struct intel_plane_state *plane_state) 4175 { 4176 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4177 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4178 u32 linear_offset; 4179 int x = plane_state->color_plane[0].x; 4180 int y = plane_state->color_plane[0].y; 4181 int crtc_x = plane_state->uapi.dst.x1; 4182 int crtc_y = plane_state->uapi.dst.y1; 4183 int crtc_w = drm_rect_width(&plane_state->uapi.dst); 4184 int crtc_h = drm_rect_height(&plane_state->uapi.dst); 4185 unsigned long irqflags; 4186 u32 dspaddr_offset; 4187 u32 dspcntr; 4188 4189 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); 4190 4191 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 4192 4193 if (INTEL_GEN(dev_priv) >= 4) 4194 dspaddr_offset = plane_state->color_plane[0].offset; 4195 else 4196 dspaddr_offset = linear_offset; 4197 4198 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 4199 4200 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride); 4201 4202 if (INTEL_GEN(dev_priv) < 4) { 4203 /* 4204 * PLANE_A doesn't actually have a full window 4205 * generator but let's assume we still need to 4206 * program whatever is there. 4207 */ 4208 I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x); 4209 I915_WRITE_FW(DSPSIZE(i9xx_plane), 4210 ((crtc_h - 1) << 16) | (crtc_w - 1)); 4211 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { 4212 I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x); 4213 I915_WRITE_FW(PRIMSIZE(i9xx_plane), 4214 ((crtc_h - 1) << 16) | (crtc_w - 1)); 4215 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0); 4216 } 4217 4218 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 4219 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x); 4220 } else if (INTEL_GEN(dev_priv) >= 4) { 4221 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset); 4222 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x); 4223 } 4224 4225 /* 4226 * The control register self-arms if the plane was previously 4227 * disabled. Try to make the plane enable atomic by writing 4228 * the control register just before the surface register. 4229 */ 4230 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr); 4231 if (INTEL_GEN(dev_priv) >= 4) 4232 I915_WRITE_FW(DSPSURF(i9xx_plane), 4233 intel_plane_ggtt_offset(plane_state) + 4234 dspaddr_offset); 4235 else 4236 I915_WRITE_FW(DSPADDR(i9xx_plane), 4237 intel_plane_ggtt_offset(plane_state) + 4238 dspaddr_offset); 4239 4240 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 4241 } 4242 4243 static void i9xx_disable_plane(struct intel_plane *plane, 4244 const struct intel_crtc_state *crtc_state) 4245 { 4246 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4247 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4248 unsigned long irqflags; 4249 u32 dspcntr; 4250 4251 /* 4252 * DSPCNTR pipe gamma enable on g4x+ and pipe csc 4253 * enable on ilk+ affect the pipe bottom color as 4254 * well, so we must configure them even if the plane 4255 * is disabled. 4256 * 4257 * On pre-g4x there is no way to gamma correct the 4258 * pipe bottom color but we'll keep on doing this 4259 * anyway so that the crtc state readout works correctly. 4260 */ 4261 dspcntr = i9xx_plane_ctl_crtc(crtc_state); 4262 4263 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 4264 4265 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr); 4266 if (INTEL_GEN(dev_priv) >= 4) 4267 I915_WRITE_FW(DSPSURF(i9xx_plane), 0); 4268 else 4269 I915_WRITE_FW(DSPADDR(i9xx_plane), 0); 4270 4271 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 4272 } 4273 4274 static bool i9xx_plane_get_hw_state(struct intel_plane *plane, 4275 enum pipe *pipe) 4276 { 4277 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4278 enum intel_display_power_domain power_domain; 4279 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4280 intel_wakeref_t wakeref; 4281 bool ret; 4282 u32 val; 4283 4284 /* 4285 * Not 100% correct for planes that can move between pipes, 4286 * but that's only the case for gen2-4 which don't have any 4287 * display power wells. 4288 */ 4289 power_domain = POWER_DOMAIN_PIPE(plane->pipe); 4290 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 4291 if (!wakeref) 4292 return false; 4293 4294 val = I915_READ(DSPCNTR(i9xx_plane)); 4295 4296 ret = val & DISPLAY_PLANE_ENABLE; 4297 4298 if (INTEL_GEN(dev_priv) >= 5) 4299 *pipe = plane->pipe; 4300 else 4301 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 4302 DISPPLANE_SEL_PIPE_SHIFT; 4303 4304 intel_display_power_put(dev_priv, power_domain, wakeref); 4305 4306 return ret; 4307 } 4308 4309 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 4310 { 4311 struct drm_device *dev = intel_crtc->base.dev; 4312 struct drm_i915_private *dev_priv = to_i915(dev); 4313 4314 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); 4315 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 4316 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); 4317 } 4318 4319 /* 4320 * This function detaches (aka. unbinds) unused scalers in hardware 4321 */ 4322 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state) 4323 { 4324 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 4325 const struct intel_crtc_scaler_state *scaler_state = 4326 &crtc_state->scaler_state; 4327 int i; 4328 4329 /* loop through and disable scalers that aren't in use */ 4330 for (i = 0; i < intel_crtc->num_scalers; i++) { 4331 if (!scaler_state->scalers[i].in_use) 4332 skl_detach_scaler(intel_crtc, i); 4333 } 4334 } 4335 4336 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb, 4337 int color_plane, unsigned int rotation) 4338 { 4339 /* 4340 * The stride is either expressed as a multiple of 64 bytes chunks for 4341 * linear buffers or in number of tiles for tiled buffers. 4342 */ 4343 if (is_surface_linear(fb, color_plane)) 4344 return 64; 4345 else if (drm_rotation_90_or_270(rotation)) 4346 return intel_tile_height(fb, color_plane); 4347 else 4348 return intel_tile_width_bytes(fb, color_plane); 4349 } 4350 4351 u32 skl_plane_stride(const struct intel_plane_state *plane_state, 4352 int color_plane) 4353 { 4354 const struct drm_framebuffer *fb = plane_state->hw.fb; 4355 unsigned int rotation = plane_state->hw.rotation; 4356 u32 stride = plane_state->color_plane[color_plane].stride; 4357 4358 if (color_plane >= fb->format->num_planes) 4359 return 0; 4360 4361 return stride / skl_plane_stride_mult(fb, color_plane, rotation); 4362 } 4363 4364 static u32 skl_plane_ctl_format(u32 pixel_format) 4365 { 4366 switch (pixel_format) { 4367 case DRM_FORMAT_C8: 4368 return PLANE_CTL_FORMAT_INDEXED; 4369 case DRM_FORMAT_RGB565: 4370 return PLANE_CTL_FORMAT_RGB_565; 4371 case DRM_FORMAT_XBGR8888: 4372 case DRM_FORMAT_ABGR8888: 4373 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; 4374 case DRM_FORMAT_XRGB8888: 4375 case DRM_FORMAT_ARGB8888: 4376 return PLANE_CTL_FORMAT_XRGB_8888; 4377 case DRM_FORMAT_XBGR2101010: 4378 case DRM_FORMAT_ABGR2101010: 4379 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX; 4380 case DRM_FORMAT_XRGB2101010: 4381 case DRM_FORMAT_ARGB2101010: 4382 return PLANE_CTL_FORMAT_XRGB_2101010; 4383 case DRM_FORMAT_XBGR16161616F: 4384 case DRM_FORMAT_ABGR16161616F: 4385 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX; 4386 case DRM_FORMAT_XRGB16161616F: 4387 case DRM_FORMAT_ARGB16161616F: 4388 return PLANE_CTL_FORMAT_XRGB_16161616F; 4389 case DRM_FORMAT_YUYV: 4390 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 4391 case DRM_FORMAT_YVYU: 4392 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; 4393 case DRM_FORMAT_UYVY: 4394 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; 4395 case DRM_FORMAT_VYUY: 4396 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 4397 case DRM_FORMAT_NV12: 4398 return PLANE_CTL_FORMAT_NV12; 4399 case DRM_FORMAT_P010: 4400 return PLANE_CTL_FORMAT_P010; 4401 case DRM_FORMAT_P012: 4402 return PLANE_CTL_FORMAT_P012; 4403 case DRM_FORMAT_P016: 4404 return PLANE_CTL_FORMAT_P016; 4405 case DRM_FORMAT_Y210: 4406 return PLANE_CTL_FORMAT_Y210; 4407 case DRM_FORMAT_Y212: 4408 return PLANE_CTL_FORMAT_Y212; 4409 case DRM_FORMAT_Y216: 4410 return PLANE_CTL_FORMAT_Y216; 4411 case DRM_FORMAT_XVYU2101010: 4412 return PLANE_CTL_FORMAT_Y410; 4413 case DRM_FORMAT_XVYU12_16161616: 4414 return PLANE_CTL_FORMAT_Y412; 4415 case DRM_FORMAT_XVYU16161616: 4416 return PLANE_CTL_FORMAT_Y416; 4417 default: 4418 MISSING_CASE(pixel_format); 4419 } 4420 4421 return 0; 4422 } 4423 4424 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state) 4425 { 4426 if (!plane_state->hw.fb->format->has_alpha) 4427 return PLANE_CTL_ALPHA_DISABLE; 4428 4429 switch (plane_state->hw.pixel_blend_mode) { 4430 case DRM_MODE_BLEND_PIXEL_NONE: 4431 return PLANE_CTL_ALPHA_DISABLE; 4432 case DRM_MODE_BLEND_PREMULTI: 4433 return PLANE_CTL_ALPHA_SW_PREMULTIPLY; 4434 case DRM_MODE_BLEND_COVERAGE: 4435 return PLANE_CTL_ALPHA_HW_PREMULTIPLY; 4436 default: 4437 MISSING_CASE(plane_state->hw.pixel_blend_mode); 4438 return PLANE_CTL_ALPHA_DISABLE; 4439 } 4440 } 4441 4442 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state) 4443 { 4444 if (!plane_state->hw.fb->format->has_alpha) 4445 return PLANE_COLOR_ALPHA_DISABLE; 4446 4447 switch (plane_state->hw.pixel_blend_mode) { 4448 case DRM_MODE_BLEND_PIXEL_NONE: 4449 return PLANE_COLOR_ALPHA_DISABLE; 4450 case DRM_MODE_BLEND_PREMULTI: 4451 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY; 4452 case DRM_MODE_BLEND_COVERAGE: 4453 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY; 4454 default: 4455 MISSING_CASE(plane_state->hw.pixel_blend_mode); 4456 return PLANE_COLOR_ALPHA_DISABLE; 4457 } 4458 } 4459 4460 static u32 skl_plane_ctl_tiling(u64 fb_modifier) 4461 { 4462 switch (fb_modifier) { 4463 case DRM_FORMAT_MOD_LINEAR: 4464 break; 4465 case I915_FORMAT_MOD_X_TILED: 4466 return PLANE_CTL_TILED_X; 4467 case I915_FORMAT_MOD_Y_TILED: 4468 return PLANE_CTL_TILED_Y; 4469 case I915_FORMAT_MOD_Y_TILED_CCS: 4470 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; 4471 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 4472 return PLANE_CTL_TILED_Y | 4473 PLANE_CTL_RENDER_DECOMPRESSION_ENABLE | 4474 PLANE_CTL_CLEAR_COLOR_DISABLE; 4475 case I915_FORMAT_MOD_Yf_TILED: 4476 return PLANE_CTL_TILED_YF; 4477 case I915_FORMAT_MOD_Yf_TILED_CCS: 4478 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; 4479 default: 4480 MISSING_CASE(fb_modifier); 4481 } 4482 4483 return 0; 4484 } 4485 4486 static u32 skl_plane_ctl_rotate(unsigned int rotate) 4487 { 4488 switch (rotate) { 4489 case DRM_MODE_ROTATE_0: 4490 break; 4491 /* 4492 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 4493 * while i915 HW rotation is clockwise, thats why this swapping. 4494 */ 4495 case DRM_MODE_ROTATE_90: 4496 return PLANE_CTL_ROTATE_270; 4497 case DRM_MODE_ROTATE_180: 4498 return PLANE_CTL_ROTATE_180; 4499 case DRM_MODE_ROTATE_270: 4500 return PLANE_CTL_ROTATE_90; 4501 default: 4502 MISSING_CASE(rotate); 4503 } 4504 4505 return 0; 4506 } 4507 4508 static u32 cnl_plane_ctl_flip(unsigned int reflect) 4509 { 4510 switch (reflect) { 4511 case 0: 4512 break; 4513 case DRM_MODE_REFLECT_X: 4514 return PLANE_CTL_FLIP_HORIZONTAL; 4515 case DRM_MODE_REFLECT_Y: 4516 default: 4517 MISSING_CASE(reflect); 4518 } 4519 4520 return 0; 4521 } 4522 4523 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 4524 { 4525 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 4526 u32 plane_ctl = 0; 4527 4528 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 4529 return plane_ctl; 4530 4531 if (crtc_state->gamma_enable) 4532 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE; 4533 4534 if (crtc_state->csc_enable) 4535 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE; 4536 4537 return plane_ctl; 4538 } 4539 4540 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, 4541 const struct intel_plane_state *plane_state) 4542 { 4543 struct drm_i915_private *dev_priv = 4544 to_i915(plane_state->uapi.plane->dev); 4545 const struct drm_framebuffer *fb = plane_state->hw.fb; 4546 unsigned int rotation = plane_state->hw.rotation; 4547 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 4548 u32 plane_ctl; 4549 4550 plane_ctl = PLANE_CTL_ENABLE; 4551 4552 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) { 4553 plane_ctl |= skl_plane_ctl_alpha(plane_state); 4554 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 4555 4556 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) 4557 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709; 4558 4559 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 4560 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE; 4561 } 4562 4563 plane_ctl |= skl_plane_ctl_format(fb->format->format); 4564 plane_ctl |= skl_plane_ctl_tiling(fb->modifier); 4565 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK); 4566 4567 if (INTEL_GEN(dev_priv) >= 10) 4568 plane_ctl |= cnl_plane_ctl_flip(rotation & 4569 DRM_MODE_REFLECT_MASK); 4570 4571 if (key->flags & I915_SET_COLORKEY_DESTINATION) 4572 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; 4573 else if (key->flags & I915_SET_COLORKEY_SOURCE) 4574 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; 4575 4576 return plane_ctl; 4577 } 4578 4579 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state) 4580 { 4581 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 4582 u32 plane_color_ctl = 0; 4583 4584 if (INTEL_GEN(dev_priv) >= 11) 4585 return plane_color_ctl; 4586 4587 if (crtc_state->gamma_enable) 4588 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE; 4589 4590 if (crtc_state->csc_enable) 4591 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; 4592 4593 return plane_color_ctl; 4594 } 4595 4596 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, 4597 const struct intel_plane_state *plane_state) 4598 { 4599 struct drm_i915_private *dev_priv = 4600 to_i915(plane_state->uapi.plane->dev); 4601 const struct drm_framebuffer *fb = plane_state->hw.fb; 4602 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 4603 u32 plane_color_ctl = 0; 4604 4605 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; 4606 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state); 4607 4608 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) { 4609 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) 4610 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; 4611 else 4612 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709; 4613 4614 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 4615 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; 4616 } else if (fb->format->is_yuv) { 4617 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE; 4618 } 4619 4620 return plane_color_ctl; 4621 } 4622 4623 static int 4624 __intel_display_resume(struct drm_device *dev, 4625 struct drm_atomic_state *state, 4626 struct drm_modeset_acquire_ctx *ctx) 4627 { 4628 struct drm_crtc_state *crtc_state; 4629 struct drm_crtc *crtc; 4630 int i, ret; 4631 4632 intel_modeset_setup_hw_state(dev, ctx); 4633 intel_vga_redisable(to_i915(dev)); 4634 4635 if (!state) 4636 return 0; 4637 4638 /* 4639 * We've duplicated the state, pointers to the old state are invalid. 4640 * 4641 * Don't attempt to use the old state until we commit the duplicated state. 4642 */ 4643 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 4644 /* 4645 * Force recalculation even if we restore 4646 * current state. With fast modeset this may not result 4647 * in a modeset when the state is compatible. 4648 */ 4649 crtc_state->mode_changed = true; 4650 } 4651 4652 /* ignore any reset values/BIOS leftovers in the WM registers */ 4653 if (!HAS_GMCH(to_i915(dev))) 4654 to_intel_atomic_state(state)->skip_intermediate_wm = true; 4655 4656 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 4657 4658 WARN_ON(ret == -EDEADLK); 4659 return ret; 4660 } 4661 4662 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) 4663 { 4664 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display && 4665 intel_has_gpu_reset(&dev_priv->gt)); 4666 } 4667 4668 void intel_prepare_reset(struct drm_i915_private *dev_priv) 4669 { 4670 struct drm_device *dev = &dev_priv->drm; 4671 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 4672 struct drm_atomic_state *state; 4673 int ret; 4674 4675 /* reset doesn't touch the display */ 4676 if (!i915_modparams.force_reset_modeset_test && 4677 !gpu_reset_clobbers_display(dev_priv)) 4678 return; 4679 4680 /* We have a modeset vs reset deadlock, defensively unbreak it. */ 4681 set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 4682 smp_mb__after_atomic(); 4683 wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET); 4684 4685 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) { 4686 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n"); 4687 intel_gt_set_wedged(&dev_priv->gt); 4688 } 4689 4690 /* 4691 * Need mode_config.mutex so that we don't 4692 * trample ongoing ->detect() and whatnot. 4693 */ 4694 mutex_lock(&dev->mode_config.mutex); 4695 drm_modeset_acquire_init(ctx, 0); 4696 while (1) { 4697 ret = drm_modeset_lock_all_ctx(dev, ctx); 4698 if (ret != -EDEADLK) 4699 break; 4700 4701 drm_modeset_backoff(ctx); 4702 } 4703 /* 4704 * Disabling the crtcs gracefully seems nicer. Also the 4705 * g33 docs say we should at least disable all the planes. 4706 */ 4707 state = drm_atomic_helper_duplicate_state(dev, ctx); 4708 if (IS_ERR(state)) { 4709 ret = PTR_ERR(state); 4710 DRM_ERROR("Duplicating state failed with %i\n", ret); 4711 return; 4712 } 4713 4714 ret = drm_atomic_helper_disable_all(dev, ctx); 4715 if (ret) { 4716 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 4717 drm_atomic_state_put(state); 4718 return; 4719 } 4720 4721 dev_priv->modeset_restore_state = state; 4722 state->acquire_ctx = ctx; 4723 } 4724 4725 void intel_finish_reset(struct drm_i915_private *dev_priv) 4726 { 4727 struct drm_device *dev = &dev_priv->drm; 4728 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 4729 struct drm_atomic_state *state; 4730 int ret; 4731 4732 /* reset doesn't touch the display */ 4733 if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) 4734 return; 4735 4736 state = fetch_and_zero(&dev_priv->modeset_restore_state); 4737 if (!state) 4738 goto unlock; 4739 4740 /* reset doesn't touch the display */ 4741 if (!gpu_reset_clobbers_display(dev_priv)) { 4742 /* for testing only restore the display */ 4743 ret = __intel_display_resume(dev, state, ctx); 4744 if (ret) 4745 DRM_ERROR("Restoring old state failed with %i\n", ret); 4746 } else { 4747 /* 4748 * The display has been reset as well, 4749 * so need a full re-initialization. 4750 */ 4751 intel_pps_unlock_regs_wa(dev_priv); 4752 intel_modeset_init_hw(dev_priv); 4753 intel_init_clock_gating(dev_priv); 4754 4755 spin_lock_irq(&dev_priv->irq_lock); 4756 if (dev_priv->display.hpd_irq_setup) 4757 dev_priv->display.hpd_irq_setup(dev_priv); 4758 spin_unlock_irq(&dev_priv->irq_lock); 4759 4760 ret = __intel_display_resume(dev, state, ctx); 4761 if (ret) 4762 DRM_ERROR("Restoring old state failed with %i\n", ret); 4763 4764 intel_hpd_init(dev_priv); 4765 } 4766 4767 drm_atomic_state_put(state); 4768 unlock: 4769 drm_modeset_drop_locks(ctx); 4770 drm_modeset_acquire_fini(ctx); 4771 mutex_unlock(&dev->mode_config.mutex); 4772 4773 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 4774 } 4775 4776 static void icl_set_pipe_chicken(struct intel_crtc *crtc) 4777 { 4778 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4779 enum pipe pipe = crtc->pipe; 4780 u32 tmp; 4781 4782 tmp = I915_READ(PIPE_CHICKEN(pipe)); 4783 4784 /* 4785 * Display WA #1153: icl 4786 * enable hardware to bypass the alpha math 4787 * and rounding for per-pixel values 00 and 0xff 4788 */ 4789 tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 4790 /* 4791 * Display WA # 1605353570: icl 4792 * Set the pixel rounding bit to 1 for allowing 4793 * passthrough of Frame buffer pixels unmodified 4794 * across pipe 4795 */ 4796 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 4797 I915_WRITE(PIPE_CHICKEN(pipe), tmp); 4798 } 4799 4800 static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state) 4801 { 4802 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4803 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4804 u32 trans_ddi_func_ctl2_val; 4805 u8 master_select; 4806 4807 /* 4808 * Configure the master select and enable Transcoder Port Sync for 4809 * Slave CRTCs transcoder. 4810 */ 4811 if (crtc_state->master_transcoder == INVALID_TRANSCODER) 4812 return; 4813 4814 if (crtc_state->master_transcoder == TRANSCODER_EDP) 4815 master_select = 0; 4816 else 4817 master_select = crtc_state->master_transcoder + 1; 4818 4819 /* Set the master select bits for Tranascoder Port Sync */ 4820 trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) & 4821 PORT_SYNC_MODE_MASTER_SELECT_MASK) << 4822 PORT_SYNC_MODE_MASTER_SELECT_SHIFT; 4823 /* Enable Transcoder Port Sync */ 4824 trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE; 4825 4826 I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder), 4827 trans_ddi_func_ctl2_val); 4828 } 4829 4830 static void intel_fdi_normal_train(struct intel_crtc *crtc) 4831 { 4832 struct drm_device *dev = crtc->base.dev; 4833 struct drm_i915_private *dev_priv = to_i915(dev); 4834 enum pipe pipe = crtc->pipe; 4835 i915_reg_t reg; 4836 u32 temp; 4837 4838 /* enable normal train */ 4839 reg = FDI_TX_CTL(pipe); 4840 temp = I915_READ(reg); 4841 if (IS_IVYBRIDGE(dev_priv)) { 4842 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 4843 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 4844 } else { 4845 temp &= ~FDI_LINK_TRAIN_NONE; 4846 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 4847 } 4848 I915_WRITE(reg, temp); 4849 4850 reg = FDI_RX_CTL(pipe); 4851 temp = I915_READ(reg); 4852 if (HAS_PCH_CPT(dev_priv)) { 4853 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4854 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 4855 } else { 4856 temp &= ~FDI_LINK_TRAIN_NONE; 4857 temp |= FDI_LINK_TRAIN_NONE; 4858 } 4859 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 4860 4861 /* wait one idle pattern time */ 4862 POSTING_READ(reg); 4863 udelay(1000); 4864 4865 /* IVB wants error correction enabled */ 4866 if (IS_IVYBRIDGE(dev_priv)) 4867 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 4868 FDI_FE_ERRC_ENABLE); 4869 } 4870 4871 /* The FDI link training functions for ILK/Ibexpeak. */ 4872 static void ironlake_fdi_link_train(struct intel_crtc *crtc, 4873 const struct intel_crtc_state *crtc_state) 4874 { 4875 struct drm_device *dev = crtc->base.dev; 4876 struct drm_i915_private *dev_priv = to_i915(dev); 4877 enum pipe pipe = crtc->pipe; 4878 i915_reg_t reg; 4879 u32 temp, tries; 4880 4881 /* FDI needs bits from pipe first */ 4882 assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder); 4883 4884 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 4885 for train result */ 4886 reg = FDI_RX_IMR(pipe); 4887 temp = I915_READ(reg); 4888 temp &= ~FDI_RX_SYMBOL_LOCK; 4889 temp &= ~FDI_RX_BIT_LOCK; 4890 I915_WRITE(reg, temp); 4891 I915_READ(reg); 4892 udelay(150); 4893 4894 /* enable CPU FDI TX and PCH FDI RX */ 4895 reg = FDI_TX_CTL(pipe); 4896 temp = I915_READ(reg); 4897 temp &= ~FDI_DP_PORT_WIDTH_MASK; 4898 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 4899 temp &= ~FDI_LINK_TRAIN_NONE; 4900 temp |= FDI_LINK_TRAIN_PATTERN_1; 4901 I915_WRITE(reg, temp | FDI_TX_ENABLE); 4902 4903 reg = FDI_RX_CTL(pipe); 4904 temp = I915_READ(reg); 4905 temp &= ~FDI_LINK_TRAIN_NONE; 4906 temp |= FDI_LINK_TRAIN_PATTERN_1; 4907 I915_WRITE(reg, temp | FDI_RX_ENABLE); 4908 4909 POSTING_READ(reg); 4910 udelay(150); 4911 4912 /* Ironlake workaround, enable clock pointer after FDI enable*/ 4913 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 4914 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 4915 FDI_RX_PHASE_SYNC_POINTER_EN); 4916 4917 reg = FDI_RX_IIR(pipe); 4918 for (tries = 0; tries < 5; tries++) { 4919 temp = I915_READ(reg); 4920 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4921 4922 if ((temp & FDI_RX_BIT_LOCK)) { 4923 DRM_DEBUG_KMS("FDI train 1 done.\n"); 4924 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 4925 break; 4926 } 4927 } 4928 if (tries == 5) 4929 DRM_ERROR("FDI train 1 fail!\n"); 4930 4931 /* Train 2 */ 4932 reg = FDI_TX_CTL(pipe); 4933 temp = I915_READ(reg); 4934 temp &= ~FDI_LINK_TRAIN_NONE; 4935 temp |= FDI_LINK_TRAIN_PATTERN_2; 4936 I915_WRITE(reg, temp); 4937 4938 reg = FDI_RX_CTL(pipe); 4939 temp = I915_READ(reg); 4940 temp &= ~FDI_LINK_TRAIN_NONE; 4941 temp |= FDI_LINK_TRAIN_PATTERN_2; 4942 I915_WRITE(reg, temp); 4943 4944 POSTING_READ(reg); 4945 udelay(150); 4946 4947 reg = FDI_RX_IIR(pipe); 4948 for (tries = 0; tries < 5; tries++) { 4949 temp = I915_READ(reg); 4950 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4951 4952 if (temp & FDI_RX_SYMBOL_LOCK) { 4953 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 4954 DRM_DEBUG_KMS("FDI train 2 done.\n"); 4955 break; 4956 } 4957 } 4958 if (tries == 5) 4959 DRM_ERROR("FDI train 2 fail!\n"); 4960 4961 DRM_DEBUG_KMS("FDI train done\n"); 4962 4963 } 4964 4965 static const int snb_b_fdi_train_param[] = { 4966 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 4967 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 4968 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 4969 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 4970 }; 4971 4972 /* The FDI link training functions for SNB/Cougarpoint. */ 4973 static void gen6_fdi_link_train(struct intel_crtc *crtc, 4974 const struct intel_crtc_state *crtc_state) 4975 { 4976 struct drm_device *dev = crtc->base.dev; 4977 struct drm_i915_private *dev_priv = to_i915(dev); 4978 enum pipe pipe = crtc->pipe; 4979 i915_reg_t reg; 4980 u32 temp, i, retry; 4981 4982 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 4983 for train result */ 4984 reg = FDI_RX_IMR(pipe); 4985 temp = I915_READ(reg); 4986 temp &= ~FDI_RX_SYMBOL_LOCK; 4987 temp &= ~FDI_RX_BIT_LOCK; 4988 I915_WRITE(reg, temp); 4989 4990 POSTING_READ(reg); 4991 udelay(150); 4992 4993 /* enable CPU FDI TX and PCH FDI RX */ 4994 reg = FDI_TX_CTL(pipe); 4995 temp = I915_READ(reg); 4996 temp &= ~FDI_DP_PORT_WIDTH_MASK; 4997 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 4998 temp &= ~FDI_LINK_TRAIN_NONE; 4999 temp |= FDI_LINK_TRAIN_PATTERN_1; 5000 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5001 /* SNB-B */ 5002 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 5003 I915_WRITE(reg, temp | FDI_TX_ENABLE); 5004 5005 I915_WRITE(FDI_RX_MISC(pipe), 5006 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 5007 5008 reg = FDI_RX_CTL(pipe); 5009 temp = I915_READ(reg); 5010 if (HAS_PCH_CPT(dev_priv)) { 5011 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5012 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 5013 } else { 5014 temp &= ~FDI_LINK_TRAIN_NONE; 5015 temp |= FDI_LINK_TRAIN_PATTERN_1; 5016 } 5017 I915_WRITE(reg, temp | FDI_RX_ENABLE); 5018 5019 POSTING_READ(reg); 5020 udelay(150); 5021 5022 for (i = 0; i < 4; i++) { 5023 reg = FDI_TX_CTL(pipe); 5024 temp = I915_READ(reg); 5025 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5026 temp |= snb_b_fdi_train_param[i]; 5027 I915_WRITE(reg, temp); 5028 5029 POSTING_READ(reg); 5030 udelay(500); 5031 5032 for (retry = 0; retry < 5; retry++) { 5033 reg = FDI_RX_IIR(pipe); 5034 temp = I915_READ(reg); 5035 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 5036 if (temp & FDI_RX_BIT_LOCK) { 5037 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 5038 DRM_DEBUG_KMS("FDI train 1 done.\n"); 5039 break; 5040 } 5041 udelay(50); 5042 } 5043 if (retry < 5) 5044 break; 5045 } 5046 if (i == 4) 5047 DRM_ERROR("FDI train 1 fail!\n"); 5048 5049 /* Train 2 */ 5050 reg = FDI_TX_CTL(pipe); 5051 temp = I915_READ(reg); 5052 temp &= ~FDI_LINK_TRAIN_NONE; 5053 temp |= FDI_LINK_TRAIN_PATTERN_2; 5054 if (IS_GEN(dev_priv, 6)) { 5055 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5056 /* SNB-B */ 5057 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 5058 } 5059 I915_WRITE(reg, temp); 5060 5061 reg = FDI_RX_CTL(pipe); 5062 temp = I915_READ(reg); 5063 if (HAS_PCH_CPT(dev_priv)) { 5064 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5065 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 5066 } else { 5067 temp &= ~FDI_LINK_TRAIN_NONE; 5068 temp |= FDI_LINK_TRAIN_PATTERN_2; 5069 } 5070 I915_WRITE(reg, temp); 5071 5072 POSTING_READ(reg); 5073 udelay(150); 5074 5075 for (i = 0; i < 4; i++) { 5076 reg = FDI_TX_CTL(pipe); 5077 temp = I915_READ(reg); 5078 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5079 temp |= snb_b_fdi_train_param[i]; 5080 I915_WRITE(reg, temp); 5081 5082 POSTING_READ(reg); 5083 udelay(500); 5084 5085 for (retry = 0; retry < 5; retry++) { 5086 reg = FDI_RX_IIR(pipe); 5087 temp = I915_READ(reg); 5088 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 5089 if (temp & FDI_RX_SYMBOL_LOCK) { 5090 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 5091 DRM_DEBUG_KMS("FDI train 2 done.\n"); 5092 break; 5093 } 5094 udelay(50); 5095 } 5096 if (retry < 5) 5097 break; 5098 } 5099 if (i == 4) 5100 DRM_ERROR("FDI train 2 fail!\n"); 5101 5102 DRM_DEBUG_KMS("FDI train done.\n"); 5103 } 5104 5105 /* Manual link training for Ivy Bridge A0 parts */ 5106 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, 5107 const struct intel_crtc_state *crtc_state) 5108 { 5109 struct drm_device *dev = crtc->base.dev; 5110 struct drm_i915_private *dev_priv = to_i915(dev); 5111 enum pipe pipe = crtc->pipe; 5112 i915_reg_t reg; 5113 u32 temp, i, j; 5114 5115 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 5116 for train result */ 5117 reg = FDI_RX_IMR(pipe); 5118 temp = I915_READ(reg); 5119 temp &= ~FDI_RX_SYMBOL_LOCK; 5120 temp &= ~FDI_RX_BIT_LOCK; 5121 I915_WRITE(reg, temp); 5122 5123 POSTING_READ(reg); 5124 udelay(150); 5125 5126 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 5127 I915_READ(FDI_RX_IIR(pipe))); 5128 5129 /* Try each vswing and preemphasis setting twice before moving on */ 5130 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 5131 /* disable first in case we need to retry */ 5132 reg = FDI_TX_CTL(pipe); 5133 temp = I915_READ(reg); 5134 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 5135 temp &= ~FDI_TX_ENABLE; 5136 I915_WRITE(reg, temp); 5137 5138 reg = FDI_RX_CTL(pipe); 5139 temp = I915_READ(reg); 5140 temp &= ~FDI_LINK_TRAIN_AUTO; 5141 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5142 temp &= ~FDI_RX_ENABLE; 5143 I915_WRITE(reg, temp); 5144 5145 /* enable CPU FDI TX and PCH FDI RX */ 5146 reg = FDI_TX_CTL(pipe); 5147 temp = I915_READ(reg); 5148 temp &= ~FDI_DP_PORT_WIDTH_MASK; 5149 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 5150 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 5151 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5152 temp |= snb_b_fdi_train_param[j/2]; 5153 temp |= FDI_COMPOSITE_SYNC; 5154 I915_WRITE(reg, temp | FDI_TX_ENABLE); 5155 5156 I915_WRITE(FDI_RX_MISC(pipe), 5157 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 5158 5159 reg = FDI_RX_CTL(pipe); 5160 temp = I915_READ(reg); 5161 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 5162 temp |= FDI_COMPOSITE_SYNC; 5163 I915_WRITE(reg, temp | FDI_RX_ENABLE); 5164 5165 POSTING_READ(reg); 5166 udelay(1); /* should be 0.5us */ 5167 5168 for (i = 0; i < 4; i++) { 5169 reg = FDI_RX_IIR(pipe); 5170 temp = I915_READ(reg); 5171 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 5172 5173 if (temp & FDI_RX_BIT_LOCK || 5174 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 5175 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 5176 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", 5177 i); 5178 break; 5179 } 5180 udelay(1); /* should be 0.5us */ 5181 } 5182 if (i == 4) { 5183 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); 5184 continue; 5185 } 5186 5187 /* Train 2 */ 5188 reg = FDI_TX_CTL(pipe); 5189 temp = I915_READ(reg); 5190 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 5191 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 5192 I915_WRITE(reg, temp); 5193 5194 reg = FDI_RX_CTL(pipe); 5195 temp = I915_READ(reg); 5196 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5197 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 5198 I915_WRITE(reg, temp); 5199 5200 POSTING_READ(reg); 5201 udelay(2); /* should be 1.5us */ 5202 5203 for (i = 0; i < 4; i++) { 5204 reg = FDI_RX_IIR(pipe); 5205 temp = I915_READ(reg); 5206 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 5207 5208 if (temp & FDI_RX_SYMBOL_LOCK || 5209 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { 5210 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 5211 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", 5212 i); 5213 goto train_done; 5214 } 5215 udelay(2); /* should be 1.5us */ 5216 } 5217 if (i == 4) 5218 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); 5219 } 5220 5221 train_done: 5222 DRM_DEBUG_KMS("FDI train done.\n"); 5223 } 5224 5225 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state) 5226 { 5227 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 5228 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 5229 enum pipe pipe = intel_crtc->pipe; 5230 i915_reg_t reg; 5231 u32 temp; 5232 5233 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 5234 reg = FDI_RX_CTL(pipe); 5235 temp = I915_READ(reg); 5236 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 5237 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 5238 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 5239 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 5240 5241 POSTING_READ(reg); 5242 udelay(200); 5243 5244 /* Switch from Rawclk to PCDclk */ 5245 temp = I915_READ(reg); 5246 I915_WRITE(reg, temp | FDI_PCDCLK); 5247 5248 POSTING_READ(reg); 5249 udelay(200); 5250 5251 /* Enable CPU FDI TX PLL, always on for Ironlake */ 5252 reg = FDI_TX_CTL(pipe); 5253 temp = I915_READ(reg); 5254 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 5255 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 5256 5257 POSTING_READ(reg); 5258 udelay(100); 5259 } 5260 } 5261 5262 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 5263 { 5264 struct drm_device *dev = intel_crtc->base.dev; 5265 struct drm_i915_private *dev_priv = to_i915(dev); 5266 enum pipe pipe = intel_crtc->pipe; 5267 i915_reg_t reg; 5268 u32 temp; 5269 5270 /* Switch from PCDclk to Rawclk */ 5271 reg = FDI_RX_CTL(pipe); 5272 temp = I915_READ(reg); 5273 I915_WRITE(reg, temp & ~FDI_PCDCLK); 5274 5275 /* Disable CPU FDI TX PLL */ 5276 reg = FDI_TX_CTL(pipe); 5277 temp = I915_READ(reg); 5278 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 5279 5280 POSTING_READ(reg); 5281 udelay(100); 5282 5283 reg = FDI_RX_CTL(pipe); 5284 temp = I915_READ(reg); 5285 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 5286 5287 /* Wait for the clocks to turn off. */ 5288 POSTING_READ(reg); 5289 udelay(100); 5290 } 5291 5292 static void ironlake_fdi_disable(struct intel_crtc *crtc) 5293 { 5294 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5295 enum pipe pipe = crtc->pipe; 5296 i915_reg_t reg; 5297 u32 temp; 5298 5299 /* disable CPU FDI tx and PCH FDI rx */ 5300 reg = FDI_TX_CTL(pipe); 5301 temp = I915_READ(reg); 5302 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 5303 POSTING_READ(reg); 5304 5305 reg = FDI_RX_CTL(pipe); 5306 temp = I915_READ(reg); 5307 temp &= ~(0x7 << 16); 5308 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 5309 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 5310 5311 POSTING_READ(reg); 5312 udelay(100); 5313 5314 /* Ironlake workaround, disable clock pointer after downing FDI */ 5315 if (HAS_PCH_IBX(dev_priv)) 5316 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 5317 5318 /* still set train pattern 1 */ 5319 reg = FDI_TX_CTL(pipe); 5320 temp = I915_READ(reg); 5321 temp &= ~FDI_LINK_TRAIN_NONE; 5322 temp |= FDI_LINK_TRAIN_PATTERN_1; 5323 I915_WRITE(reg, temp); 5324 5325 reg = FDI_RX_CTL(pipe); 5326 temp = I915_READ(reg); 5327 if (HAS_PCH_CPT(dev_priv)) { 5328 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5329 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 5330 } else { 5331 temp &= ~FDI_LINK_TRAIN_NONE; 5332 temp |= FDI_LINK_TRAIN_PATTERN_1; 5333 } 5334 /* BPC in FDI rx is consistent with that in PIPECONF */ 5335 temp &= ~(0x07 << 16); 5336 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 5337 I915_WRITE(reg, temp); 5338 5339 POSTING_READ(reg); 5340 udelay(100); 5341 } 5342 5343 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 5344 { 5345 struct drm_crtc *crtc; 5346 bool cleanup_done; 5347 5348 drm_for_each_crtc(crtc, &dev_priv->drm) { 5349 struct drm_crtc_commit *commit; 5350 spin_lock(&crtc->commit_lock); 5351 commit = list_first_entry_or_null(&crtc->commit_list, 5352 struct drm_crtc_commit, commit_entry); 5353 cleanup_done = commit ? 5354 try_wait_for_completion(&commit->cleanup_done) : true; 5355 spin_unlock(&crtc->commit_lock); 5356 5357 if (cleanup_done) 5358 continue; 5359 5360 drm_crtc_wait_one_vblank(crtc); 5361 5362 return true; 5363 } 5364 5365 return false; 5366 } 5367 5368 void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 5369 { 5370 u32 temp; 5371 5372 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 5373 5374 mutex_lock(&dev_priv->sb_lock); 5375 5376 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5377 temp |= SBI_SSCCTL_DISABLE; 5378 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 5379 5380 mutex_unlock(&dev_priv->sb_lock); 5381 } 5382 5383 /* Program iCLKIP clock to the desired frequency */ 5384 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) 5385 { 5386 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5387 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5388 int clock = crtc_state->hw.adjusted_mode.crtc_clock; 5389 u32 divsel, phaseinc, auxdiv, phasedir = 0; 5390 u32 temp; 5391 5392 lpt_disable_iclkip(dev_priv); 5393 5394 /* The iCLK virtual clock root frequency is in MHz, 5395 * but the adjusted_mode->crtc_clock in in KHz. To get the 5396 * divisors, it is necessary to divide one by another, so we 5397 * convert the virtual clock precision to KHz here for higher 5398 * precision. 5399 */ 5400 for (auxdiv = 0; auxdiv < 2; auxdiv++) { 5401 u32 iclk_virtual_root_freq = 172800 * 1000; 5402 u32 iclk_pi_range = 64; 5403 u32 desired_divisor; 5404 5405 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 5406 clock << auxdiv); 5407 divsel = (desired_divisor / iclk_pi_range) - 2; 5408 phaseinc = desired_divisor % iclk_pi_range; 5409 5410 /* 5411 * Near 20MHz is a corner case which is 5412 * out of range for the 7-bit divisor 5413 */ 5414 if (divsel <= 0x7f) 5415 break; 5416 } 5417 5418 /* This should not happen with any sane values */ 5419 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 5420 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 5421 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 5422 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 5423 5424 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 5425 clock, 5426 auxdiv, 5427 divsel, 5428 phasedir, 5429 phaseinc); 5430 5431 mutex_lock(&dev_priv->sb_lock); 5432 5433 /* Program SSCDIVINTPHASE6 */ 5434 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 5435 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 5436 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 5437 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 5438 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 5439 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 5440 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 5441 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 5442 5443 /* Program SSCAUXDIV */ 5444 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 5445 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 5446 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 5447 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 5448 5449 /* Enable modulator and associated divider */ 5450 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5451 temp &= ~SBI_SSCCTL_DISABLE; 5452 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 5453 5454 mutex_unlock(&dev_priv->sb_lock); 5455 5456 /* Wait for initialization time */ 5457 udelay(24); 5458 5459 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 5460 } 5461 5462 int lpt_get_iclkip(struct drm_i915_private *dev_priv) 5463 { 5464 u32 divsel, phaseinc, auxdiv; 5465 u32 iclk_virtual_root_freq = 172800 * 1000; 5466 u32 iclk_pi_range = 64; 5467 u32 desired_divisor; 5468 u32 temp; 5469 5470 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) 5471 return 0; 5472 5473 mutex_lock(&dev_priv->sb_lock); 5474 5475 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5476 if (temp & SBI_SSCCTL_DISABLE) { 5477 mutex_unlock(&dev_priv->sb_lock); 5478 return 0; 5479 } 5480 5481 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 5482 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> 5483 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; 5484 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> 5485 SBI_SSCDIVINTPHASE_INCVAL_SHIFT; 5486 5487 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 5488 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> 5489 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; 5490 5491 mutex_unlock(&dev_priv->sb_lock); 5492 5493 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; 5494 5495 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 5496 desired_divisor << auxdiv); 5497 } 5498 5499 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, 5500 enum pipe pch_transcoder) 5501 { 5502 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5503 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5504 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 5505 5506 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 5507 I915_READ(HTOTAL(cpu_transcoder))); 5508 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), 5509 I915_READ(HBLANK(cpu_transcoder))); 5510 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), 5511 I915_READ(HSYNC(cpu_transcoder))); 5512 5513 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), 5514 I915_READ(VTOTAL(cpu_transcoder))); 5515 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), 5516 I915_READ(VBLANK(cpu_transcoder))); 5517 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), 5518 I915_READ(VSYNC(cpu_transcoder))); 5519 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), 5520 I915_READ(VSYNCSHIFT(cpu_transcoder))); 5521 } 5522 5523 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable) 5524 { 5525 u32 temp; 5526 5527 temp = I915_READ(SOUTH_CHICKEN1); 5528 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 5529 return; 5530 5531 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 5532 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 5533 5534 temp &= ~FDI_BC_BIFURCATION_SELECT; 5535 if (enable) 5536 temp |= FDI_BC_BIFURCATION_SELECT; 5537 5538 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); 5539 I915_WRITE(SOUTH_CHICKEN1, temp); 5540 POSTING_READ(SOUTH_CHICKEN1); 5541 } 5542 5543 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) 5544 { 5545 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5546 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5547 5548 switch (crtc->pipe) { 5549 case PIPE_A: 5550 break; 5551 case PIPE_B: 5552 if (crtc_state->fdi_lanes > 2) 5553 cpt_set_fdi_bc_bifurcation(dev_priv, false); 5554 else 5555 cpt_set_fdi_bc_bifurcation(dev_priv, true); 5556 5557 break; 5558 case PIPE_C: 5559 cpt_set_fdi_bc_bifurcation(dev_priv, true); 5560 5561 break; 5562 default: 5563 BUG(); 5564 } 5565 } 5566 5567 /* 5568 * Finds the encoder associated with the given CRTC. This can only be 5569 * used when we know that the CRTC isn't feeding multiple encoders! 5570 */ 5571 static struct intel_encoder * 5572 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 5573 const struct intel_crtc_state *crtc_state) 5574 { 5575 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5576 const struct drm_connector_state *connector_state; 5577 const struct drm_connector *connector; 5578 struct intel_encoder *encoder = NULL; 5579 int num_encoders = 0; 5580 int i; 5581 5582 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 5583 if (connector_state->crtc != &crtc->base) 5584 continue; 5585 5586 encoder = to_intel_encoder(connector_state->best_encoder); 5587 num_encoders++; 5588 } 5589 5590 WARN(num_encoders != 1, "%d encoders for pipe %c\n", 5591 num_encoders, pipe_name(crtc->pipe)); 5592 5593 return encoder; 5594 } 5595 5596 /* 5597 * Enable PCH resources required for PCH ports: 5598 * - PCH PLLs 5599 * - FDI training & RX/TX 5600 * - update transcoder timings 5601 * - DP transcoding bits 5602 * - transcoder 5603 */ 5604 static void ironlake_pch_enable(const struct intel_atomic_state *state, 5605 const struct intel_crtc_state *crtc_state) 5606 { 5607 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5608 struct drm_device *dev = crtc->base.dev; 5609 struct drm_i915_private *dev_priv = to_i915(dev); 5610 enum pipe pipe = crtc->pipe; 5611 u32 temp; 5612 5613 assert_pch_transcoder_disabled(dev_priv, pipe); 5614 5615 if (IS_IVYBRIDGE(dev_priv)) 5616 ivybridge_update_fdi_bc_bifurcation(crtc_state); 5617 5618 /* Write the TU size bits before fdi link training, so that error 5619 * detection works. */ 5620 I915_WRITE(FDI_RX_TUSIZE1(pipe), 5621 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 5622 5623 /* For PCH output, training FDI link */ 5624 dev_priv->display.fdi_link_train(crtc, crtc_state); 5625 5626 /* We need to program the right clock selection before writing the pixel 5627 * mutliplier into the DPLL. */ 5628 if (HAS_PCH_CPT(dev_priv)) { 5629 u32 sel; 5630 5631 temp = I915_READ(PCH_DPLL_SEL); 5632 temp |= TRANS_DPLL_ENABLE(pipe); 5633 sel = TRANS_DPLLB_SEL(pipe); 5634 if (crtc_state->shared_dpll == 5635 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) 5636 temp |= sel; 5637 else 5638 temp &= ~sel; 5639 I915_WRITE(PCH_DPLL_SEL, temp); 5640 } 5641 5642 /* XXX: pch pll's can be enabled any time before we enable the PCH 5643 * transcoder, and we actually should do this to not upset any PCH 5644 * transcoder that already use the clock when we share it. 5645 * 5646 * Note that enable_shared_dpll tries to do the right thing, but 5647 * get_shared_dpll unconditionally resets the pll - we need that to have 5648 * the right LVDS enable sequence. */ 5649 intel_enable_shared_dpll(crtc_state); 5650 5651 /* set transcoder timing, panel must allow it */ 5652 assert_panel_unlocked(dev_priv, pipe); 5653 ironlake_pch_transcoder_set_timings(crtc_state, pipe); 5654 5655 intel_fdi_normal_train(crtc); 5656 5657 /* For PCH DP, enable TRANS_DP_CTL */ 5658 if (HAS_PCH_CPT(dev_priv) && 5659 intel_crtc_has_dp_encoder(crtc_state)) { 5660 const struct drm_display_mode *adjusted_mode = 5661 &crtc_state->hw.adjusted_mode; 5662 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 5663 i915_reg_t reg = TRANS_DP_CTL(pipe); 5664 enum port port; 5665 5666 temp = I915_READ(reg); 5667 temp &= ~(TRANS_DP_PORT_SEL_MASK | 5668 TRANS_DP_SYNC_MASK | 5669 TRANS_DP_BPC_MASK); 5670 temp |= TRANS_DP_OUTPUT_ENABLE; 5671 temp |= bpc << 9; /* same format but at 11:9 */ 5672 5673 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 5674 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 5675 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 5676 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 5677 5678 port = intel_get_crtc_new_encoder(state, crtc_state)->port; 5679 WARN_ON(port < PORT_B || port > PORT_D); 5680 temp |= TRANS_DP_PORT_SEL(port); 5681 5682 I915_WRITE(reg, temp); 5683 } 5684 5685 ironlake_enable_pch_transcoder(crtc_state); 5686 } 5687 5688 static void lpt_pch_enable(const struct intel_atomic_state *state, 5689 const struct intel_crtc_state *crtc_state) 5690 { 5691 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5692 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5693 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 5694 5695 assert_pch_transcoder_disabled(dev_priv, PIPE_A); 5696 5697 lpt_program_iclkip(crtc_state); 5698 5699 /* Set transcoder timing. */ 5700 ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A); 5701 5702 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 5703 } 5704 5705 static void cpt_verify_modeset(struct drm_i915_private *dev_priv, 5706 enum pipe pipe) 5707 { 5708 i915_reg_t dslreg = PIPEDSL(pipe); 5709 u32 temp; 5710 5711 temp = I915_READ(dslreg); 5712 udelay(500); 5713 if (wait_for(I915_READ(dslreg) != temp, 5)) { 5714 if (wait_for(I915_READ(dslreg) != temp, 5)) 5715 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); 5716 } 5717 } 5718 5719 /* 5720 * The hardware phase 0.0 refers to the center of the pixel. 5721 * We want to start from the top/left edge which is phase 5722 * -0.5. That matches how the hardware calculates the scaling 5723 * factors (from top-left of the first pixel to bottom-right 5724 * of the last pixel, as opposed to the pixel centers). 5725 * 5726 * For 4:2:0 subsampled chroma planes we obviously have to 5727 * adjust that so that the chroma sample position lands in 5728 * the right spot. 5729 * 5730 * Note that for packed YCbCr 4:2:2 formats there is no way to 5731 * control chroma siting. The hardware simply replicates the 5732 * chroma samples for both of the luma samples, and thus we don't 5733 * actually get the expected MPEG2 chroma siting convention :( 5734 * The same behaviour is observed on pre-SKL platforms as well. 5735 * 5736 * Theory behind the formula (note that we ignore sub-pixel 5737 * source coordinates): 5738 * s = source sample position 5739 * d = destination sample position 5740 * 5741 * Downscaling 4:1: 5742 * -0.5 5743 * | 0.0 5744 * | | 1.5 (initial phase) 5745 * | | | 5746 * v v v 5747 * | s | s | s | s | 5748 * | d | 5749 * 5750 * Upscaling 1:4: 5751 * -0.5 5752 * | -0.375 (initial phase) 5753 * | | 0.0 5754 * | | | 5755 * v v v 5756 * | s | 5757 * | d | d | d | d | 5758 */ 5759 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited) 5760 { 5761 int phase = -0x8000; 5762 u16 trip = 0; 5763 5764 if (chroma_cosited) 5765 phase += (sub - 1) * 0x8000 / sub; 5766 5767 phase += scale / (2 * sub); 5768 5769 /* 5770 * Hardware initial phase limited to [-0.5:1.5]. 5771 * Since the max hardware scale factor is 3.0, we 5772 * should never actually excdeed 1.0 here. 5773 */ 5774 WARN_ON(phase < -0x8000 || phase > 0x18000); 5775 5776 if (phase < 0) 5777 phase = 0x10000 + phase; 5778 else 5779 trip = PS_PHASE_TRIP; 5780 5781 return ((phase >> 2) & PS_PHASE_MASK) | trip; 5782 } 5783 5784 #define SKL_MIN_SRC_W 8 5785 #define SKL_MAX_SRC_W 4096 5786 #define SKL_MIN_SRC_H 8 5787 #define SKL_MAX_SRC_H 4096 5788 #define SKL_MIN_DST_W 8 5789 #define SKL_MAX_DST_W 4096 5790 #define SKL_MIN_DST_H 8 5791 #define SKL_MAX_DST_H 4096 5792 #define ICL_MAX_SRC_W 5120 5793 #define ICL_MAX_SRC_H 4096 5794 #define ICL_MAX_DST_W 5120 5795 #define ICL_MAX_DST_H 4096 5796 #define SKL_MIN_YUV_420_SRC_W 16 5797 #define SKL_MIN_YUV_420_SRC_H 16 5798 5799 static int 5800 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 5801 unsigned int scaler_user, int *scaler_id, 5802 int src_w, int src_h, int dst_w, int dst_h, 5803 const struct drm_format_info *format, 5804 u64 modifier, bool need_scaler) 5805 { 5806 struct intel_crtc_scaler_state *scaler_state = 5807 &crtc_state->scaler_state; 5808 struct intel_crtc *intel_crtc = 5809 to_intel_crtc(crtc_state->uapi.crtc); 5810 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 5811 const struct drm_display_mode *adjusted_mode = 5812 &crtc_state->hw.adjusted_mode; 5813 5814 /* 5815 * Src coordinates are already rotated by 270 degrees for 5816 * the 90/270 degree plane rotation cases (to match the 5817 * GTT mapping), hence no need to account for rotation here. 5818 */ 5819 if (src_w != dst_w || src_h != dst_h) 5820 need_scaler = true; 5821 5822 /* 5823 * Scaling/fitting not supported in IF-ID mode in GEN9+ 5824 * TODO: Interlace fetch mode doesn't support YUV420 planar formats. 5825 * Once NV12 is enabled, handle it here while allocating scaler 5826 * for NV12. 5827 */ 5828 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable && 5829 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 5830 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n"); 5831 return -EINVAL; 5832 } 5833 5834 /* 5835 * if plane is being disabled or scaler is no more required or force detach 5836 * - free scaler binded to this plane/crtc 5837 * - in order to do this, update crtc->scaler_usage 5838 * 5839 * Here scaler state in crtc_state is set free so that 5840 * scaler can be assigned to other user. Actual register 5841 * update to free the scaler is done in plane/panel-fit programming. 5842 * For this purpose crtc/plane_state->scaler_id isn't reset here. 5843 */ 5844 if (force_detach || !need_scaler) { 5845 if (*scaler_id >= 0) { 5846 scaler_state->scaler_users &= ~(1 << scaler_user); 5847 scaler_state->scalers[*scaler_id].in_use = 0; 5848 5849 DRM_DEBUG_KMS("scaler_user index %u.%u: " 5850 "Staged freeing scaler id %d scaler_users = 0x%x\n", 5851 intel_crtc->pipe, scaler_user, *scaler_id, 5852 scaler_state->scaler_users); 5853 *scaler_id = -1; 5854 } 5855 return 0; 5856 } 5857 5858 if (format && intel_format_info_is_yuv_semiplanar(format, modifier) && 5859 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { 5860 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n"); 5861 return -EINVAL; 5862 } 5863 5864 /* range checks */ 5865 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 5866 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 5867 (INTEL_GEN(dev_priv) >= 11 && 5868 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H || 5869 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) || 5870 (INTEL_GEN(dev_priv) < 11 && 5871 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 5872 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) { 5873 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " 5874 "size is out of scaler range\n", 5875 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h); 5876 return -EINVAL; 5877 } 5878 5879 /* mark this plane as a scaler user in crtc_state */ 5880 scaler_state->scaler_users |= (1 << scaler_user); 5881 DRM_DEBUG_KMS("scaler_user index %u.%u: " 5882 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 5883 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 5884 scaler_state->scaler_users); 5885 5886 return 0; 5887 } 5888 5889 /** 5890 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc. 5891 * 5892 * @state: crtc's scaler state 5893 * 5894 * Return 5895 * 0 - scaler_usage updated successfully 5896 * error - requested scaling cannot be supported or other error condition 5897 */ 5898 int skl_update_scaler_crtc(struct intel_crtc_state *state) 5899 { 5900 const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode; 5901 bool need_scaler = false; 5902 5903 if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 5904 need_scaler = true; 5905 5906 return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX, 5907 &state->scaler_state.scaler_id, 5908 state->pipe_src_w, state->pipe_src_h, 5909 adjusted_mode->crtc_hdisplay, 5910 adjusted_mode->crtc_vdisplay, NULL, 0, 5911 need_scaler); 5912 } 5913 5914 /** 5915 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 5916 * @crtc_state: crtc's scaler state 5917 * @plane_state: atomic plane state to update 5918 * 5919 * Return 5920 * 0 - scaler_usage updated successfully 5921 * error - requested scaling cannot be supported or other error condition 5922 */ 5923 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 5924 struct intel_plane_state *plane_state) 5925 { 5926 struct intel_plane *intel_plane = 5927 to_intel_plane(plane_state->uapi.plane); 5928 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 5929 struct drm_framebuffer *fb = plane_state->hw.fb; 5930 int ret; 5931 bool force_detach = !fb || !plane_state->uapi.visible; 5932 bool need_scaler = false; 5933 5934 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ 5935 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) && 5936 fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) 5937 need_scaler = true; 5938 5939 ret = skl_update_scaler(crtc_state, force_detach, 5940 drm_plane_index(&intel_plane->base), 5941 &plane_state->scaler_id, 5942 drm_rect_width(&plane_state->uapi.src) >> 16, 5943 drm_rect_height(&plane_state->uapi.src) >> 16, 5944 drm_rect_width(&plane_state->uapi.dst), 5945 drm_rect_height(&plane_state->uapi.dst), 5946 fb ? fb->format : NULL, 5947 fb ? fb->modifier : 0, 5948 need_scaler); 5949 5950 if (ret || plane_state->scaler_id < 0) 5951 return ret; 5952 5953 /* check colorkey */ 5954 if (plane_state->ckey.flags) { 5955 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed", 5956 intel_plane->base.base.id, 5957 intel_plane->base.name); 5958 return -EINVAL; 5959 } 5960 5961 /* Check src format */ 5962 switch (fb->format->format) { 5963 case DRM_FORMAT_RGB565: 5964 case DRM_FORMAT_XBGR8888: 5965 case DRM_FORMAT_XRGB8888: 5966 case DRM_FORMAT_ABGR8888: 5967 case DRM_FORMAT_ARGB8888: 5968 case DRM_FORMAT_XRGB2101010: 5969 case DRM_FORMAT_XBGR2101010: 5970 case DRM_FORMAT_ARGB2101010: 5971 case DRM_FORMAT_ABGR2101010: 5972 case DRM_FORMAT_YUYV: 5973 case DRM_FORMAT_YVYU: 5974 case DRM_FORMAT_UYVY: 5975 case DRM_FORMAT_VYUY: 5976 case DRM_FORMAT_NV12: 5977 case DRM_FORMAT_P010: 5978 case DRM_FORMAT_P012: 5979 case DRM_FORMAT_P016: 5980 case DRM_FORMAT_Y210: 5981 case DRM_FORMAT_Y212: 5982 case DRM_FORMAT_Y216: 5983 case DRM_FORMAT_XVYU2101010: 5984 case DRM_FORMAT_XVYU12_16161616: 5985 case DRM_FORMAT_XVYU16161616: 5986 break; 5987 case DRM_FORMAT_XBGR16161616F: 5988 case DRM_FORMAT_ABGR16161616F: 5989 case DRM_FORMAT_XRGB16161616F: 5990 case DRM_FORMAT_ARGB16161616F: 5991 if (INTEL_GEN(dev_priv) >= 11) 5992 break; 5993 /* fall through */ 5994 default: 5995 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", 5996 intel_plane->base.base.id, intel_plane->base.name, 5997 fb->base.id, fb->format->format); 5998 return -EINVAL; 5999 } 6000 6001 return 0; 6002 } 6003 6004 void skylake_scaler_disable(const struct intel_crtc_state *old_crtc_state) 6005 { 6006 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 6007 int i; 6008 6009 for (i = 0; i < crtc->num_scalers; i++) 6010 skl_detach_scaler(crtc, i); 6011 } 6012 6013 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state) 6014 { 6015 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6016 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6017 enum pipe pipe = crtc->pipe; 6018 const struct intel_crtc_scaler_state *scaler_state = 6019 &crtc_state->scaler_state; 6020 6021 if (crtc_state->pch_pfit.enabled) { 6022 u16 uv_rgb_hphase, uv_rgb_vphase; 6023 int pfit_w, pfit_h, hscale, vscale; 6024 int id; 6025 6026 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0)) 6027 return; 6028 6029 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF; 6030 pfit_h = crtc_state->pch_pfit.size & 0xFFFF; 6031 6032 hscale = (crtc_state->pipe_src_w << 16) / pfit_w; 6033 vscale = (crtc_state->pipe_src_h << 16) / pfit_h; 6034 6035 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); 6036 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); 6037 6038 id = scaler_state->scaler_id; 6039 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 6040 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 6041 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id), 6042 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); 6043 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id), 6044 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); 6045 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos); 6046 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size); 6047 } 6048 } 6049 6050 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state) 6051 { 6052 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6053 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6054 enum pipe pipe = crtc->pipe; 6055 6056 if (crtc_state->pch_pfit.enabled) { 6057 /* Force use of hard-coded filter coefficients 6058 * as some pre-programmed values are broken, 6059 * e.g. x201. 6060 */ 6061 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 6062 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 6063 PF_PIPE_SEL_IVB(pipe)); 6064 else 6065 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 6066 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos); 6067 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size); 6068 } 6069 } 6070 6071 void hsw_enable_ips(const struct intel_crtc_state *crtc_state) 6072 { 6073 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6074 struct drm_device *dev = crtc->base.dev; 6075 struct drm_i915_private *dev_priv = to_i915(dev); 6076 6077 if (!crtc_state->ips_enabled) 6078 return; 6079 6080 /* 6081 * We can only enable IPS after we enable a plane and wait for a vblank 6082 * This function is called from post_plane_update, which is run after 6083 * a vblank wait. 6084 */ 6085 WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); 6086 6087 if (IS_BROADWELL(dev_priv)) { 6088 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 6089 IPS_ENABLE | IPS_PCODE_CONTROL)); 6090 /* Quoting Art Runyan: "its not safe to expect any particular 6091 * value in IPS_CTL bit 31 after enabling IPS through the 6092 * mailbox." Moreover, the mailbox may return a bogus state, 6093 * so we need to just enable it and continue on. 6094 */ 6095 } else { 6096 I915_WRITE(IPS_CTL, IPS_ENABLE); 6097 /* The bit only becomes 1 in the next vblank, so this wait here 6098 * is essentially intel_wait_for_vblank. If we don't have this 6099 * and don't wait for vblanks until the end of crtc_enable, then 6100 * the HW state readout code will complain that the expected 6101 * IPS_CTL value is not the one we read. */ 6102 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50)) 6103 DRM_ERROR("Timed out waiting for IPS enable\n"); 6104 } 6105 } 6106 6107 void hsw_disable_ips(const struct intel_crtc_state *crtc_state) 6108 { 6109 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6110 struct drm_device *dev = crtc->base.dev; 6111 struct drm_i915_private *dev_priv = to_i915(dev); 6112 6113 if (!crtc_state->ips_enabled) 6114 return; 6115 6116 if (IS_BROADWELL(dev_priv)) { 6117 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 6118 /* 6119 * Wait for PCODE to finish disabling IPS. The BSpec specified 6120 * 42ms timeout value leads to occasional timeouts so use 100ms 6121 * instead. 6122 */ 6123 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100)) 6124 DRM_ERROR("Timed out waiting for IPS disable\n"); 6125 } else { 6126 I915_WRITE(IPS_CTL, 0); 6127 POSTING_READ(IPS_CTL); 6128 } 6129 6130 /* We need to wait for a vblank before we can disable the plane. */ 6131 intel_wait_for_vblank(dev_priv, crtc->pipe); 6132 } 6133 6134 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) 6135 { 6136 if (intel_crtc->overlay) 6137 (void) intel_overlay_switch_off(intel_crtc->overlay); 6138 6139 /* Let userspace switch the overlay on again. In most cases userspace 6140 * has to recompute where to put it anyway. 6141 */ 6142 } 6143 6144 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state, 6145 const struct intel_crtc_state *new_crtc_state) 6146 { 6147 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 6148 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6149 6150 if (!old_crtc_state->ips_enabled) 6151 return false; 6152 6153 if (needs_modeset(new_crtc_state)) 6154 return true; 6155 6156 /* 6157 * Workaround : Do not read or write the pipe palette/gamma data while 6158 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 6159 * 6160 * Disable IPS before we program the LUT. 6161 */ 6162 if (IS_HASWELL(dev_priv) && 6163 (new_crtc_state->uapi.color_mgmt_changed || 6164 new_crtc_state->update_pipe) && 6165 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) 6166 return true; 6167 6168 return !new_crtc_state->ips_enabled; 6169 } 6170 6171 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state, 6172 const struct intel_crtc_state *new_crtc_state) 6173 { 6174 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 6175 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6176 6177 if (!new_crtc_state->ips_enabled) 6178 return false; 6179 6180 if (needs_modeset(new_crtc_state)) 6181 return true; 6182 6183 /* 6184 * Workaround : Do not read or write the pipe palette/gamma data while 6185 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 6186 * 6187 * Re-enable IPS after the LUT has been programmed. 6188 */ 6189 if (IS_HASWELL(dev_priv) && 6190 (new_crtc_state->uapi.color_mgmt_changed || 6191 new_crtc_state->update_pipe) && 6192 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) 6193 return true; 6194 6195 /* 6196 * We can't read out IPS on broadwell, assume the worst and 6197 * forcibly enable IPS on the first fastset. 6198 */ 6199 if (new_crtc_state->update_pipe && 6200 old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED) 6201 return true; 6202 6203 return !old_crtc_state->ips_enabled; 6204 } 6205 6206 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) 6207 { 6208 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 6209 6210 if (!crtc_state->nv12_planes) 6211 return false; 6212 6213 /* WA Display #0827: Gen9:all */ 6214 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) 6215 return true; 6216 6217 return false; 6218 } 6219 6220 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) 6221 { 6222 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 6223 6224 /* Wa_2006604312:icl */ 6225 if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv)) 6226 return true; 6227 6228 return false; 6229 } 6230 6231 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, 6232 const struct intel_crtc_state *new_crtc_state) 6233 { 6234 return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) && 6235 new_crtc_state->active_planes; 6236 } 6237 6238 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, 6239 const struct intel_crtc_state *new_crtc_state) 6240 { 6241 return old_crtc_state->active_planes && 6242 (!new_crtc_state->active_planes || needs_modeset(new_crtc_state)); 6243 } 6244 6245 static void intel_post_plane_update(struct intel_atomic_state *state, 6246 struct intel_crtc *crtc) 6247 { 6248 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6249 struct intel_plane *primary = to_intel_plane(crtc->base.primary); 6250 const struct intel_crtc_state *old_crtc_state = 6251 intel_atomic_get_old_crtc_state(state, crtc); 6252 const struct intel_crtc_state *new_crtc_state = 6253 intel_atomic_get_new_crtc_state(state, crtc); 6254 const struct intel_plane_state *new_primary_state = 6255 intel_atomic_get_new_plane_state(state, primary); 6256 enum pipe pipe = crtc->pipe; 6257 6258 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); 6259 6260 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) 6261 intel_update_watermarks(crtc); 6262 6263 if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state)) 6264 hsw_enable_ips(new_crtc_state); 6265 6266 if (new_primary_state) 6267 intel_fbc_post_update(crtc); 6268 6269 if (needs_nv12_wa(old_crtc_state) && 6270 !needs_nv12_wa(new_crtc_state)) 6271 skl_wa_827(dev_priv, pipe, false); 6272 6273 if (needs_scalerclk_wa(old_crtc_state) && 6274 !needs_scalerclk_wa(new_crtc_state)) 6275 icl_wa_scalerclkgating(dev_priv, pipe, false); 6276 } 6277 6278 static void intel_pre_plane_update(struct intel_atomic_state *state, 6279 struct intel_crtc *crtc) 6280 { 6281 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6282 struct intel_plane *primary = to_intel_plane(crtc->base.primary); 6283 const struct intel_crtc_state *old_crtc_state = 6284 intel_atomic_get_old_crtc_state(state, crtc); 6285 const struct intel_crtc_state *new_crtc_state = 6286 intel_atomic_get_new_crtc_state(state, crtc); 6287 const struct intel_plane_state *new_primary_state = 6288 intel_atomic_get_new_plane_state(state, primary); 6289 enum pipe pipe = crtc->pipe; 6290 6291 if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state)) 6292 hsw_disable_ips(old_crtc_state); 6293 6294 if (new_primary_state && 6295 intel_fbc_pre_update(crtc, new_crtc_state, new_primary_state)) 6296 intel_wait_for_vblank(dev_priv, pipe); 6297 6298 /* Display WA 827 */ 6299 if (!needs_nv12_wa(old_crtc_state) && 6300 needs_nv12_wa(new_crtc_state)) 6301 skl_wa_827(dev_priv, pipe, true); 6302 6303 /* Wa_2006604312:icl */ 6304 if (!needs_scalerclk_wa(old_crtc_state) && 6305 needs_scalerclk_wa(new_crtc_state)) 6306 icl_wa_scalerclkgating(dev_priv, pipe, true); 6307 6308 /* 6309 * Vblank time updates from the shadow to live plane control register 6310 * are blocked if the memory self-refresh mode is active at that 6311 * moment. So to make sure the plane gets truly disabled, disable 6312 * first the self-refresh mode. The self-refresh enable bit in turn 6313 * will be checked/applied by the HW only at the next frame start 6314 * event which is after the vblank start event, so we need to have a 6315 * wait-for-vblank between disabling the plane and the pipe. 6316 */ 6317 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && 6318 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 6319 intel_wait_for_vblank(dev_priv, pipe); 6320 6321 /* 6322 * IVB workaround: must disable low power watermarks for at least 6323 * one frame before enabling scaling. LP watermarks can be re-enabled 6324 * when scaling is disabled. 6325 * 6326 * WaCxSRDisabledForSpriteScaling:ivb 6327 */ 6328 if (old_crtc_state->hw.active && 6329 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv)) 6330 intel_wait_for_vblank(dev_priv, pipe); 6331 6332 /* 6333 * If we're doing a modeset we don't need to do any 6334 * pre-vblank watermark programming here. 6335 */ 6336 if (!needs_modeset(new_crtc_state)) { 6337 /* 6338 * For platforms that support atomic watermarks, program the 6339 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 6340 * will be the intermediate values that are safe for both pre- and 6341 * post- vblank; when vblank happens, the 'active' values will be set 6342 * to the final 'target' values and we'll do this again to get the 6343 * optimal watermarks. For gen9+ platforms, the values we program here 6344 * will be the final target values which will get automatically latched 6345 * at vblank time; no further programming will be necessary. 6346 * 6347 * If a platform hasn't been transitioned to atomic watermarks yet, 6348 * we'll continue to update watermarks the old way, if flags tell 6349 * us to. 6350 */ 6351 if (dev_priv->display.initial_watermarks) 6352 dev_priv->display.initial_watermarks(state, crtc); 6353 else if (new_crtc_state->update_wm_pre) 6354 intel_update_watermarks(crtc); 6355 } 6356 6357 /* 6358 * Gen2 reports pipe underruns whenever all planes are disabled. 6359 * So disable underrun reporting before all the planes get disabled. 6360 * 6361 * We do this after .initial_watermarks() so that we have a 6362 * chance of catching underruns with the intermediate watermarks 6363 * vs. the old plane configuration. 6364 */ 6365 if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state)) 6366 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6367 } 6368 6369 static void intel_crtc_disable_planes(struct intel_atomic_state *state, 6370 struct intel_crtc *crtc) 6371 { 6372 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6373 const struct intel_crtc_state *new_crtc_state = 6374 intel_atomic_get_new_crtc_state(state, crtc); 6375 unsigned int update_mask = new_crtc_state->update_planes; 6376 const struct intel_plane_state *old_plane_state; 6377 struct intel_plane *plane; 6378 unsigned fb_bits = 0; 6379 int i; 6380 6381 intel_crtc_dpms_overlay_disable(crtc); 6382 6383 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 6384 if (crtc->pipe != plane->pipe || 6385 !(update_mask & BIT(plane->id))) 6386 continue; 6387 6388 intel_disable_plane(plane, new_crtc_state); 6389 6390 if (old_plane_state->uapi.visible) 6391 fb_bits |= plane->frontbuffer_bit; 6392 } 6393 6394 intel_frontbuffer_flip(dev_priv, fb_bits); 6395 } 6396 6397 /* 6398 * intel_connector_primary_encoder - get the primary encoder for a connector 6399 * @connector: connector for which to return the encoder 6400 * 6401 * Returns the primary encoder for a connector. There is a 1:1 mapping from 6402 * all connectors to their encoder, except for DP-MST connectors which have 6403 * both a virtual and a primary encoder. These DP-MST primary encoders can be 6404 * pointed to by as many DP-MST connectors as there are pipes. 6405 */ 6406 static struct intel_encoder * 6407 intel_connector_primary_encoder(struct intel_connector *connector) 6408 { 6409 struct intel_encoder *encoder; 6410 6411 if (connector->mst_port) 6412 return &dp_to_dig_port(connector->mst_port)->base; 6413 6414 encoder = intel_attached_encoder(&connector->base); 6415 WARN_ON(!encoder); 6416 6417 return encoder; 6418 } 6419 6420 static bool 6421 intel_connector_needs_modeset(struct intel_atomic_state *state, 6422 const struct drm_connector_state *old_conn_state, 6423 const struct drm_connector_state *new_conn_state) 6424 { 6425 struct intel_crtc *old_crtc = old_conn_state->crtc ? 6426 to_intel_crtc(old_conn_state->crtc) : NULL; 6427 struct intel_crtc *new_crtc = new_conn_state->crtc ? 6428 to_intel_crtc(new_conn_state->crtc) : NULL; 6429 6430 return new_crtc != old_crtc || 6431 (new_crtc && 6432 needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc))); 6433 } 6434 6435 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 6436 { 6437 struct drm_connector_state *old_conn_state; 6438 struct drm_connector_state *new_conn_state; 6439 struct drm_connector *conn; 6440 int i; 6441 6442 for_each_oldnew_connector_in_state(&state->base, conn, 6443 old_conn_state, new_conn_state, i) { 6444 struct intel_encoder *encoder; 6445 struct intel_crtc *crtc; 6446 6447 if (!intel_connector_needs_modeset(state, 6448 old_conn_state, 6449 new_conn_state)) 6450 continue; 6451 6452 encoder = intel_connector_primary_encoder(to_intel_connector(conn)); 6453 if (!encoder->update_prepare) 6454 continue; 6455 6456 crtc = new_conn_state->crtc ? 6457 to_intel_crtc(new_conn_state->crtc) : NULL; 6458 encoder->update_prepare(state, encoder, crtc); 6459 } 6460 } 6461 6462 static void intel_encoders_update_complete(struct intel_atomic_state *state) 6463 { 6464 struct drm_connector_state *old_conn_state; 6465 struct drm_connector_state *new_conn_state; 6466 struct drm_connector *conn; 6467 int i; 6468 6469 for_each_oldnew_connector_in_state(&state->base, conn, 6470 old_conn_state, new_conn_state, i) { 6471 struct intel_encoder *encoder; 6472 struct intel_crtc *crtc; 6473 6474 if (!intel_connector_needs_modeset(state, 6475 old_conn_state, 6476 new_conn_state)) 6477 continue; 6478 6479 encoder = intel_connector_primary_encoder(to_intel_connector(conn)); 6480 if (!encoder->update_complete) 6481 continue; 6482 6483 crtc = new_conn_state->crtc ? 6484 to_intel_crtc(new_conn_state->crtc) : NULL; 6485 encoder->update_complete(state, encoder, crtc); 6486 } 6487 } 6488 6489 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, 6490 struct intel_crtc *crtc) 6491 { 6492 const struct intel_crtc_state *crtc_state = 6493 intel_atomic_get_new_crtc_state(state, crtc); 6494 const struct drm_connector_state *conn_state; 6495 struct drm_connector *conn; 6496 int i; 6497 6498 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6499 struct intel_encoder *encoder = 6500 to_intel_encoder(conn_state->best_encoder); 6501 6502 if (conn_state->crtc != &crtc->base) 6503 continue; 6504 6505 if (encoder->pre_pll_enable) 6506 encoder->pre_pll_enable(encoder, crtc_state, conn_state); 6507 } 6508 } 6509 6510 static void intel_encoders_pre_enable(struct intel_atomic_state *state, 6511 struct intel_crtc *crtc) 6512 { 6513 const struct intel_crtc_state *crtc_state = 6514 intel_atomic_get_new_crtc_state(state, crtc); 6515 const struct drm_connector_state *conn_state; 6516 struct drm_connector *conn; 6517 int i; 6518 6519 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6520 struct intel_encoder *encoder = 6521 to_intel_encoder(conn_state->best_encoder); 6522 6523 if (conn_state->crtc != &crtc->base) 6524 continue; 6525 6526 if (encoder->pre_enable) 6527 encoder->pre_enable(encoder, crtc_state, conn_state); 6528 } 6529 } 6530 6531 static void intel_encoders_enable(struct intel_atomic_state *state, 6532 struct intel_crtc *crtc) 6533 { 6534 const struct intel_crtc_state *crtc_state = 6535 intel_atomic_get_new_crtc_state(state, crtc); 6536 const struct drm_connector_state *conn_state; 6537 struct drm_connector *conn; 6538 int i; 6539 6540 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6541 struct intel_encoder *encoder = 6542 to_intel_encoder(conn_state->best_encoder); 6543 6544 if (conn_state->crtc != &crtc->base) 6545 continue; 6546 6547 if (encoder->enable) 6548 encoder->enable(encoder, crtc_state, conn_state); 6549 intel_opregion_notify_encoder(encoder, true); 6550 } 6551 } 6552 6553 static void intel_encoders_disable(struct intel_atomic_state *state, 6554 struct intel_crtc *crtc) 6555 { 6556 const struct intel_crtc_state *old_crtc_state = 6557 intel_atomic_get_old_crtc_state(state, crtc); 6558 const struct drm_connector_state *old_conn_state; 6559 struct drm_connector *conn; 6560 int i; 6561 6562 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6563 struct intel_encoder *encoder = 6564 to_intel_encoder(old_conn_state->best_encoder); 6565 6566 if (old_conn_state->crtc != &crtc->base) 6567 continue; 6568 6569 intel_opregion_notify_encoder(encoder, false); 6570 if (encoder->disable) 6571 encoder->disable(encoder, old_crtc_state, old_conn_state); 6572 } 6573 } 6574 6575 static void intel_encoders_post_disable(struct intel_atomic_state *state, 6576 struct intel_crtc *crtc) 6577 { 6578 const struct intel_crtc_state *old_crtc_state = 6579 intel_atomic_get_old_crtc_state(state, crtc); 6580 const struct drm_connector_state *old_conn_state; 6581 struct drm_connector *conn; 6582 int i; 6583 6584 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6585 struct intel_encoder *encoder = 6586 to_intel_encoder(old_conn_state->best_encoder); 6587 6588 if (old_conn_state->crtc != &crtc->base) 6589 continue; 6590 6591 if (encoder->post_disable) 6592 encoder->post_disable(encoder, old_crtc_state, old_conn_state); 6593 } 6594 } 6595 6596 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, 6597 struct intel_crtc *crtc) 6598 { 6599 const struct intel_crtc_state *old_crtc_state = 6600 intel_atomic_get_old_crtc_state(state, crtc); 6601 const struct drm_connector_state *old_conn_state; 6602 struct drm_connector *conn; 6603 int i; 6604 6605 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6606 struct intel_encoder *encoder = 6607 to_intel_encoder(old_conn_state->best_encoder); 6608 6609 if (old_conn_state->crtc != &crtc->base) 6610 continue; 6611 6612 if (encoder->post_pll_disable) 6613 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state); 6614 } 6615 } 6616 6617 static void intel_encoders_update_pipe(struct intel_atomic_state *state, 6618 struct intel_crtc *crtc) 6619 { 6620 const struct intel_crtc_state *crtc_state = 6621 intel_atomic_get_new_crtc_state(state, crtc); 6622 const struct drm_connector_state *conn_state; 6623 struct drm_connector *conn; 6624 int i; 6625 6626 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6627 struct intel_encoder *encoder = 6628 to_intel_encoder(conn_state->best_encoder); 6629 6630 if (conn_state->crtc != &crtc->base) 6631 continue; 6632 6633 if (encoder->update_pipe) 6634 encoder->update_pipe(encoder, crtc_state, conn_state); 6635 } 6636 } 6637 6638 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) 6639 { 6640 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6641 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 6642 6643 plane->disable_plane(plane, crtc_state); 6644 } 6645 6646 static void ironlake_crtc_enable(struct intel_atomic_state *state, 6647 struct intel_crtc *crtc) 6648 { 6649 const struct intel_crtc_state *new_crtc_state = 6650 intel_atomic_get_new_crtc_state(state, crtc); 6651 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6652 enum pipe pipe = crtc->pipe; 6653 6654 if (WARN_ON(crtc->active)) 6655 return; 6656 6657 /* 6658 * Sometimes spurious CPU pipe underruns happen during FDI 6659 * training, at least with VGA+HDMI cloning. Suppress them. 6660 * 6661 * On ILK we get an occasional spurious CPU pipe underruns 6662 * between eDP port A enable and vdd enable. Also PCH port 6663 * enable seems to result in the occasional CPU pipe underrun. 6664 * 6665 * Spurious PCH underruns also occur during PCH enabling. 6666 */ 6667 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6668 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 6669 6670 if (new_crtc_state->has_pch_encoder) 6671 intel_prepare_shared_dpll(new_crtc_state); 6672 6673 if (intel_crtc_has_dp_encoder(new_crtc_state)) 6674 intel_dp_set_m_n(new_crtc_state, M1_N1); 6675 6676 intel_set_pipe_timings(new_crtc_state); 6677 intel_set_pipe_src_size(new_crtc_state); 6678 6679 if (new_crtc_state->has_pch_encoder) 6680 intel_cpu_transcoder_set_m_n(new_crtc_state, 6681 &new_crtc_state->fdi_m_n, NULL); 6682 6683 ironlake_set_pipeconf(new_crtc_state); 6684 6685 crtc->active = true; 6686 6687 intel_encoders_pre_enable(state, crtc); 6688 6689 if (new_crtc_state->has_pch_encoder) { 6690 /* Note: FDI PLL enabling _must_ be done before we enable the 6691 * cpu pipes, hence this is separate from all the other fdi/pch 6692 * enabling. */ 6693 ironlake_fdi_pll_enable(new_crtc_state); 6694 } else { 6695 assert_fdi_tx_disabled(dev_priv, pipe); 6696 assert_fdi_rx_disabled(dev_priv, pipe); 6697 } 6698 6699 ironlake_pfit_enable(new_crtc_state); 6700 6701 /* 6702 * On ILK+ LUT must be loaded before the pipe is running but with 6703 * clocks enabled 6704 */ 6705 intel_color_load_luts(new_crtc_state); 6706 intel_color_commit(new_crtc_state); 6707 /* update DSPCNTR to configure gamma for pipe bottom color */ 6708 intel_disable_primary_plane(new_crtc_state); 6709 6710 if (dev_priv->display.initial_watermarks) 6711 dev_priv->display.initial_watermarks(state, crtc); 6712 intel_enable_pipe(new_crtc_state); 6713 6714 if (new_crtc_state->has_pch_encoder) 6715 ironlake_pch_enable(state, new_crtc_state); 6716 6717 intel_crtc_vblank_on(new_crtc_state); 6718 6719 intel_encoders_enable(state, crtc); 6720 6721 if (HAS_PCH_CPT(dev_priv)) 6722 cpt_verify_modeset(dev_priv, pipe); 6723 6724 /* 6725 * Must wait for vblank to avoid spurious PCH FIFO underruns. 6726 * And a second vblank wait is needed at least on ILK with 6727 * some interlaced HDMI modes. Let's do the double wait always 6728 * in case there are more corner cases we don't know about. 6729 */ 6730 if (new_crtc_state->has_pch_encoder) { 6731 intel_wait_for_vblank(dev_priv, pipe); 6732 intel_wait_for_vblank(dev_priv, pipe); 6733 } 6734 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6735 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 6736 } 6737 6738 /* IPS only exists on ULT machines and is tied to pipe A. */ 6739 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 6740 { 6741 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A; 6742 } 6743 6744 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, 6745 enum pipe pipe, bool apply) 6746 { 6747 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe)); 6748 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 6749 6750 if (apply) 6751 val |= mask; 6752 else 6753 val &= ~mask; 6754 6755 I915_WRITE(CLKGATE_DIS_PSL(pipe), val); 6756 } 6757 6758 static void icl_pipe_mbus_enable(struct intel_crtc *crtc) 6759 { 6760 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6761 enum pipe pipe = crtc->pipe; 6762 u32 val; 6763 6764 val = MBUS_DBOX_A_CREDIT(2); 6765 6766 if (INTEL_GEN(dev_priv) >= 12) { 6767 val |= MBUS_DBOX_BW_CREDIT(2); 6768 val |= MBUS_DBOX_B_CREDIT(12); 6769 } else { 6770 val |= MBUS_DBOX_BW_CREDIT(1); 6771 val |= MBUS_DBOX_B_CREDIT(8); 6772 } 6773 6774 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val); 6775 } 6776 6777 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) 6778 { 6779 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6780 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6781 i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder); 6782 u32 val; 6783 6784 val = I915_READ(reg); 6785 val &= ~HSW_FRAME_START_DELAY_MASK; 6786 val |= HSW_FRAME_START_DELAY(0); 6787 I915_WRITE(reg, val); 6788 } 6789 6790 static void haswell_crtc_enable(struct intel_atomic_state *state, 6791 struct intel_crtc *crtc) 6792 { 6793 const struct intel_crtc_state *new_crtc_state = 6794 intel_atomic_get_new_crtc_state(state, crtc); 6795 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6796 enum pipe pipe = crtc->pipe, hsw_workaround_pipe; 6797 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 6798 bool psl_clkgate_wa; 6799 6800 if (WARN_ON(crtc->active)) 6801 return; 6802 6803 intel_encoders_pre_pll_enable(state, crtc); 6804 6805 if (new_crtc_state->shared_dpll) 6806 intel_enable_shared_dpll(new_crtc_state); 6807 6808 intel_encoders_pre_enable(state, crtc); 6809 6810 if (intel_crtc_has_dp_encoder(new_crtc_state)) 6811 intel_dp_set_m_n(new_crtc_state, M1_N1); 6812 6813 if (!transcoder_is_dsi(cpu_transcoder)) 6814 intel_set_pipe_timings(new_crtc_state); 6815 6816 if (INTEL_GEN(dev_priv) >= 11) 6817 icl_enable_trans_port_sync(new_crtc_state); 6818 6819 intel_set_pipe_src_size(new_crtc_state); 6820 6821 if (cpu_transcoder != TRANSCODER_EDP && 6822 !transcoder_is_dsi(cpu_transcoder)) 6823 I915_WRITE(PIPE_MULT(cpu_transcoder), 6824 new_crtc_state->pixel_multiplier - 1); 6825 6826 if (new_crtc_state->has_pch_encoder) 6827 intel_cpu_transcoder_set_m_n(new_crtc_state, 6828 &new_crtc_state->fdi_m_n, NULL); 6829 6830 if (!transcoder_is_dsi(cpu_transcoder)) { 6831 hsw_set_frame_start_delay(new_crtc_state); 6832 haswell_set_pipeconf(new_crtc_state); 6833 } 6834 6835 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 6836 bdw_set_pipemisc(new_crtc_state); 6837 6838 crtc->active = true; 6839 6840 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */ 6841 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && 6842 new_crtc_state->pch_pfit.enabled; 6843 if (psl_clkgate_wa) 6844 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); 6845 6846 if (INTEL_GEN(dev_priv) >= 9) 6847 skylake_pfit_enable(new_crtc_state); 6848 else 6849 ironlake_pfit_enable(new_crtc_state); 6850 6851 /* 6852 * On ILK+ LUT must be loaded before the pipe is running but with 6853 * clocks enabled 6854 */ 6855 intel_color_load_luts(new_crtc_state); 6856 intel_color_commit(new_crtc_state); 6857 /* update DSPCNTR to configure gamma/csc for pipe bottom color */ 6858 if (INTEL_GEN(dev_priv) < 9) 6859 intel_disable_primary_plane(new_crtc_state); 6860 6861 if (INTEL_GEN(dev_priv) >= 11) 6862 icl_set_pipe_chicken(crtc); 6863 6864 if (!transcoder_is_dsi(cpu_transcoder)) 6865 intel_ddi_enable_transcoder_func(new_crtc_state); 6866 6867 if (dev_priv->display.initial_watermarks) 6868 dev_priv->display.initial_watermarks(state, crtc); 6869 6870 if (INTEL_GEN(dev_priv) >= 11) 6871 icl_pipe_mbus_enable(crtc); 6872 6873 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 6874 if (!transcoder_is_dsi(cpu_transcoder)) 6875 intel_enable_pipe(new_crtc_state); 6876 6877 if (new_crtc_state->has_pch_encoder) 6878 lpt_pch_enable(state, new_crtc_state); 6879 6880 intel_crtc_vblank_on(new_crtc_state); 6881 6882 intel_encoders_enable(state, crtc); 6883 6884 if (psl_clkgate_wa) { 6885 intel_wait_for_vblank(dev_priv, pipe); 6886 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); 6887 } 6888 6889 /* If we change the relative order between pipe/planes enabling, we need 6890 * to change the workaround. */ 6891 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe; 6892 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 6893 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 6894 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 6895 } 6896 } 6897 6898 void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state) 6899 { 6900 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 6901 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6902 enum pipe pipe = crtc->pipe; 6903 6904 /* To avoid upsetting the power well on haswell only disable the pfit if 6905 * it's in use. The hw state code will make sure we get this right. */ 6906 if (old_crtc_state->pch_pfit.enabled) { 6907 I915_WRITE(PF_CTL(pipe), 0); 6908 I915_WRITE(PF_WIN_POS(pipe), 0); 6909 I915_WRITE(PF_WIN_SZ(pipe), 0); 6910 } 6911 } 6912 6913 static void ironlake_crtc_disable(struct intel_atomic_state *state, 6914 struct intel_crtc *crtc) 6915 { 6916 const struct intel_crtc_state *old_crtc_state = 6917 intel_atomic_get_old_crtc_state(state, crtc); 6918 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6919 enum pipe pipe = crtc->pipe; 6920 6921 /* 6922 * Sometimes spurious CPU pipe underruns happen when the 6923 * pipe is already disabled, but FDI RX/TX is still enabled. 6924 * Happens at least with VGA+HDMI cloning. Suppress them. 6925 */ 6926 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6927 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 6928 6929 intel_encoders_disable(state, crtc); 6930 6931 intel_crtc_vblank_off(old_crtc_state); 6932 6933 intel_disable_pipe(old_crtc_state); 6934 6935 ironlake_pfit_disable(old_crtc_state); 6936 6937 if (old_crtc_state->has_pch_encoder) 6938 ironlake_fdi_disable(crtc); 6939 6940 intel_encoders_post_disable(state, crtc); 6941 6942 if (old_crtc_state->has_pch_encoder) { 6943 ironlake_disable_pch_transcoder(dev_priv, pipe); 6944 6945 if (HAS_PCH_CPT(dev_priv)) { 6946 i915_reg_t reg; 6947 u32 temp; 6948 6949 /* disable TRANS_DP_CTL */ 6950 reg = TRANS_DP_CTL(pipe); 6951 temp = I915_READ(reg); 6952 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 6953 TRANS_DP_PORT_SEL_MASK); 6954 temp |= TRANS_DP_PORT_SEL_NONE; 6955 I915_WRITE(reg, temp); 6956 6957 /* disable DPLL_SEL */ 6958 temp = I915_READ(PCH_DPLL_SEL); 6959 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 6960 I915_WRITE(PCH_DPLL_SEL, temp); 6961 } 6962 6963 ironlake_fdi_pll_disable(crtc); 6964 } 6965 6966 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6967 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 6968 } 6969 6970 static void haswell_crtc_disable(struct intel_atomic_state *state, 6971 struct intel_crtc *crtc) 6972 { 6973 /* 6974 * FIXME collapse everything to one hook. 6975 * Need care with mst->ddi interactions. 6976 */ 6977 intel_encoders_disable(state, crtc); 6978 intel_encoders_post_disable(state, crtc); 6979 } 6980 6981 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) 6982 { 6983 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6984 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6985 6986 if (!crtc_state->gmch_pfit.control) 6987 return; 6988 6989 /* 6990 * The panel fitter should only be adjusted whilst the pipe is disabled, 6991 * according to register description and PRM. 6992 */ 6993 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 6994 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); 6995 6996 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios); 6997 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control); 6998 6999 /* Border color in case we don't scale up to the full screen. Black by 7000 * default, change to something else for debugging. */ 7001 I915_WRITE(BCLRPAT(crtc->pipe), 0); 7002 } 7003 7004 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) 7005 { 7006 if (phy == PHY_NONE) 7007 return false; 7008 7009 if (IS_ELKHARTLAKE(dev_priv)) 7010 return phy <= PHY_C; 7011 7012 if (INTEL_GEN(dev_priv) >= 11) 7013 return phy <= PHY_B; 7014 7015 return false; 7016 } 7017 7018 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) 7019 { 7020 if (INTEL_GEN(dev_priv) >= 12) 7021 return phy >= PHY_D && phy <= PHY_I; 7022 7023 if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv)) 7024 return phy >= PHY_C && phy <= PHY_F; 7025 7026 return false; 7027 } 7028 7029 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) 7030 { 7031 if (IS_ELKHARTLAKE(i915) && port == PORT_D) 7032 return PHY_A; 7033 7034 return (enum phy)port; 7035 } 7036 7037 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) 7038 { 7039 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) 7040 return PORT_TC_NONE; 7041 7042 if (INTEL_GEN(dev_priv) >= 12) 7043 return port - PORT_D; 7044 7045 return port - PORT_C; 7046 } 7047 7048 enum intel_display_power_domain intel_port_to_power_domain(enum port port) 7049 { 7050 switch (port) { 7051 case PORT_A: 7052 return POWER_DOMAIN_PORT_DDI_A_LANES; 7053 case PORT_B: 7054 return POWER_DOMAIN_PORT_DDI_B_LANES; 7055 case PORT_C: 7056 return POWER_DOMAIN_PORT_DDI_C_LANES; 7057 case PORT_D: 7058 return POWER_DOMAIN_PORT_DDI_D_LANES; 7059 case PORT_E: 7060 return POWER_DOMAIN_PORT_DDI_E_LANES; 7061 case PORT_F: 7062 return POWER_DOMAIN_PORT_DDI_F_LANES; 7063 case PORT_G: 7064 return POWER_DOMAIN_PORT_DDI_G_LANES; 7065 default: 7066 MISSING_CASE(port); 7067 return POWER_DOMAIN_PORT_OTHER; 7068 } 7069 } 7070 7071 enum intel_display_power_domain 7072 intel_aux_power_domain(struct intel_digital_port *dig_port) 7073 { 7074 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 7075 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 7076 7077 if (intel_phy_is_tc(dev_priv, phy) && 7078 dig_port->tc_mode == TC_PORT_TBT_ALT) { 7079 switch (dig_port->aux_ch) { 7080 case AUX_CH_C: 7081 return POWER_DOMAIN_AUX_C_TBT; 7082 case AUX_CH_D: 7083 return POWER_DOMAIN_AUX_D_TBT; 7084 case AUX_CH_E: 7085 return POWER_DOMAIN_AUX_E_TBT; 7086 case AUX_CH_F: 7087 return POWER_DOMAIN_AUX_F_TBT; 7088 case AUX_CH_G: 7089 return POWER_DOMAIN_AUX_G_TBT; 7090 default: 7091 MISSING_CASE(dig_port->aux_ch); 7092 return POWER_DOMAIN_AUX_C_TBT; 7093 } 7094 } 7095 7096 switch (dig_port->aux_ch) { 7097 case AUX_CH_A: 7098 return POWER_DOMAIN_AUX_A; 7099 case AUX_CH_B: 7100 return POWER_DOMAIN_AUX_B; 7101 case AUX_CH_C: 7102 return POWER_DOMAIN_AUX_C; 7103 case AUX_CH_D: 7104 return POWER_DOMAIN_AUX_D; 7105 case AUX_CH_E: 7106 return POWER_DOMAIN_AUX_E; 7107 case AUX_CH_F: 7108 return POWER_DOMAIN_AUX_F; 7109 case AUX_CH_G: 7110 return POWER_DOMAIN_AUX_G; 7111 default: 7112 MISSING_CASE(dig_port->aux_ch); 7113 return POWER_DOMAIN_AUX_A; 7114 } 7115 } 7116 7117 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) 7118 { 7119 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7120 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7121 struct drm_encoder *encoder; 7122 enum pipe pipe = crtc->pipe; 7123 u64 mask; 7124 enum transcoder transcoder = crtc_state->cpu_transcoder; 7125 7126 if (!crtc_state->hw.active) 7127 return 0; 7128 7129 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe)); 7130 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder)); 7131 if (crtc_state->pch_pfit.enabled || 7132 crtc_state->pch_pfit.force_thru) 7133 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 7134 7135 drm_for_each_encoder_mask(encoder, &dev_priv->drm, 7136 crtc_state->uapi.encoder_mask) { 7137 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 7138 7139 mask |= BIT_ULL(intel_encoder->power_domain); 7140 } 7141 7142 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 7143 mask |= BIT_ULL(POWER_DOMAIN_AUDIO); 7144 7145 if (crtc_state->shared_dpll) 7146 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE); 7147 7148 return mask; 7149 } 7150 7151 static u64 7152 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state) 7153 { 7154 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7155 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7156 enum intel_display_power_domain domain; 7157 u64 domains, new_domains, old_domains; 7158 7159 old_domains = crtc->enabled_power_domains; 7160 crtc->enabled_power_domains = new_domains = 7161 get_crtc_power_domains(crtc_state); 7162 7163 domains = new_domains & ~old_domains; 7164 7165 for_each_power_domain(domain, domains) 7166 intel_display_power_get(dev_priv, domain); 7167 7168 return old_domains & ~new_domains; 7169 } 7170 7171 static void modeset_put_power_domains(struct drm_i915_private *dev_priv, 7172 u64 domains) 7173 { 7174 enum intel_display_power_domain domain; 7175 7176 for_each_power_domain(domain, domains) 7177 intel_display_power_put_unchecked(dev_priv, domain); 7178 } 7179 7180 static void valleyview_crtc_enable(struct intel_atomic_state *state, 7181 struct intel_crtc *crtc) 7182 { 7183 const struct intel_crtc_state *new_crtc_state = 7184 intel_atomic_get_new_crtc_state(state, crtc); 7185 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7186 enum pipe pipe = crtc->pipe; 7187 7188 if (WARN_ON(crtc->active)) 7189 return; 7190 7191 if (intel_crtc_has_dp_encoder(new_crtc_state)) 7192 intel_dp_set_m_n(new_crtc_state, M1_N1); 7193 7194 intel_set_pipe_timings(new_crtc_state); 7195 intel_set_pipe_src_size(new_crtc_state); 7196 7197 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 7198 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 7199 I915_WRITE(CHV_CANVAS(pipe), 0); 7200 } 7201 7202 i9xx_set_pipeconf(new_crtc_state); 7203 7204 crtc->active = true; 7205 7206 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 7207 7208 intel_encoders_pre_pll_enable(state, crtc); 7209 7210 if (IS_CHERRYVIEW(dev_priv)) { 7211 chv_prepare_pll(crtc, new_crtc_state); 7212 chv_enable_pll(crtc, new_crtc_state); 7213 } else { 7214 vlv_prepare_pll(crtc, new_crtc_state); 7215 vlv_enable_pll(crtc, new_crtc_state); 7216 } 7217 7218 intel_encoders_pre_enable(state, crtc); 7219 7220 i9xx_pfit_enable(new_crtc_state); 7221 7222 intel_color_load_luts(new_crtc_state); 7223 intel_color_commit(new_crtc_state); 7224 /* update DSPCNTR to configure gamma for pipe bottom color */ 7225 intel_disable_primary_plane(new_crtc_state); 7226 7227 dev_priv->display.initial_watermarks(state, crtc); 7228 intel_enable_pipe(new_crtc_state); 7229 7230 intel_crtc_vblank_on(new_crtc_state); 7231 7232 intel_encoders_enable(state, crtc); 7233 } 7234 7235 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state) 7236 { 7237 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7238 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7239 7240 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0); 7241 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1); 7242 } 7243 7244 static void i9xx_crtc_enable(struct intel_atomic_state *state, 7245 struct intel_crtc *crtc) 7246 { 7247 const struct intel_crtc_state *new_crtc_state = 7248 intel_atomic_get_new_crtc_state(state, crtc); 7249 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7250 enum pipe pipe = crtc->pipe; 7251 7252 if (WARN_ON(crtc->active)) 7253 return; 7254 7255 i9xx_set_pll_dividers(new_crtc_state); 7256 7257 if (intel_crtc_has_dp_encoder(new_crtc_state)) 7258 intel_dp_set_m_n(new_crtc_state, M1_N1); 7259 7260 intel_set_pipe_timings(new_crtc_state); 7261 intel_set_pipe_src_size(new_crtc_state); 7262 7263 i9xx_set_pipeconf(new_crtc_state); 7264 7265 crtc->active = true; 7266 7267 if (!IS_GEN(dev_priv, 2)) 7268 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 7269 7270 intel_encoders_pre_enable(state, crtc); 7271 7272 i9xx_enable_pll(crtc, new_crtc_state); 7273 7274 i9xx_pfit_enable(new_crtc_state); 7275 7276 intel_color_load_luts(new_crtc_state); 7277 intel_color_commit(new_crtc_state); 7278 /* update DSPCNTR to configure gamma for pipe bottom color */ 7279 intel_disable_primary_plane(new_crtc_state); 7280 7281 if (dev_priv->display.initial_watermarks) 7282 dev_priv->display.initial_watermarks(state, crtc); 7283 else 7284 intel_update_watermarks(crtc); 7285 intel_enable_pipe(new_crtc_state); 7286 7287 intel_crtc_vblank_on(new_crtc_state); 7288 7289 intel_encoders_enable(state, crtc); 7290 } 7291 7292 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) 7293 { 7294 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 7295 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7296 7297 if (!old_crtc_state->gmch_pfit.control) 7298 return; 7299 7300 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); 7301 7302 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n", 7303 I915_READ(PFIT_CONTROL)); 7304 I915_WRITE(PFIT_CONTROL, 0); 7305 } 7306 7307 static void i9xx_crtc_disable(struct intel_atomic_state *state, 7308 struct intel_crtc *crtc) 7309 { 7310 struct intel_crtc_state *old_crtc_state = 7311 intel_atomic_get_old_crtc_state(state, crtc); 7312 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7313 enum pipe pipe = crtc->pipe; 7314 7315 /* 7316 * On gen2 planes are double buffered but the pipe isn't, so we must 7317 * wait for planes to fully turn off before disabling the pipe. 7318 */ 7319 if (IS_GEN(dev_priv, 2)) 7320 intel_wait_for_vblank(dev_priv, pipe); 7321 7322 intel_encoders_disable(state, crtc); 7323 7324 intel_crtc_vblank_off(old_crtc_state); 7325 7326 intel_disable_pipe(old_crtc_state); 7327 7328 i9xx_pfit_disable(old_crtc_state); 7329 7330 intel_encoders_post_disable(state, crtc); 7331 7332 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 7333 if (IS_CHERRYVIEW(dev_priv)) 7334 chv_disable_pll(dev_priv, pipe); 7335 else if (IS_VALLEYVIEW(dev_priv)) 7336 vlv_disable_pll(dev_priv, pipe); 7337 else 7338 i9xx_disable_pll(old_crtc_state); 7339 } 7340 7341 intel_encoders_post_pll_disable(state, crtc); 7342 7343 if (!IS_GEN(dev_priv, 2)) 7344 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 7345 7346 if (!dev_priv->display.initial_watermarks) 7347 intel_update_watermarks(crtc); 7348 7349 /* clock the pipe down to 640x480@60 to potentially save power */ 7350 if (IS_I830(dev_priv)) 7351 i830_enable_pipe(dev_priv, pipe); 7352 } 7353 7354 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, 7355 struct drm_modeset_acquire_ctx *ctx) 7356 { 7357 struct intel_encoder *encoder; 7358 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7359 struct intel_bw_state *bw_state = 7360 to_intel_bw_state(dev_priv->bw_obj.state); 7361 struct intel_crtc_state *crtc_state = 7362 to_intel_crtc_state(crtc->base.state); 7363 enum intel_display_power_domain domain; 7364 struct intel_plane *plane; 7365 struct drm_atomic_state *state; 7366 struct intel_crtc_state *temp_crtc_state; 7367 enum pipe pipe = crtc->pipe; 7368 u64 domains; 7369 int ret; 7370 7371 if (!crtc_state->hw.active) 7372 return; 7373 7374 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 7375 const struct intel_plane_state *plane_state = 7376 to_intel_plane_state(plane->base.state); 7377 7378 if (plane_state->uapi.visible) 7379 intel_plane_disable_noatomic(crtc, plane); 7380 } 7381 7382 state = drm_atomic_state_alloc(&dev_priv->drm); 7383 if (!state) { 7384 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory", 7385 crtc->base.base.id, crtc->base.name); 7386 return; 7387 } 7388 7389 state->acquire_ctx = ctx; 7390 7391 /* Everything's already locked, -EDEADLK can't happen. */ 7392 temp_crtc_state = intel_atomic_get_crtc_state(state, crtc); 7393 ret = drm_atomic_add_affected_connectors(state, &crtc->base); 7394 7395 WARN_ON(IS_ERR(temp_crtc_state) || ret); 7396 7397 dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc); 7398 7399 drm_atomic_state_put(state); 7400 7401 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", 7402 crtc->base.base.id, crtc->base.name); 7403 7404 crtc->active = false; 7405 crtc->base.enabled = false; 7406 7407 WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0); 7408 crtc_state->uapi.active = false; 7409 crtc_state->uapi.connector_mask = 0; 7410 crtc_state->uapi.encoder_mask = 0; 7411 intel_crtc_free_hw_state(crtc_state); 7412 memset(&crtc_state->hw, 0, sizeof(crtc_state->hw)); 7413 7414 for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder) 7415 encoder->base.crtc = NULL; 7416 7417 intel_fbc_disable(crtc); 7418 intel_update_watermarks(crtc); 7419 intel_disable_shared_dpll(crtc_state); 7420 7421 domains = crtc->enabled_power_domains; 7422 for_each_power_domain(domain, domains) 7423 intel_display_power_put_unchecked(dev_priv, domain); 7424 crtc->enabled_power_domains = 0; 7425 7426 dev_priv->active_pipes &= ~BIT(pipe); 7427 dev_priv->min_cdclk[pipe] = 0; 7428 dev_priv->min_voltage_level[pipe] = 0; 7429 7430 bw_state->data_rate[pipe] = 0; 7431 bw_state->num_active_planes[pipe] = 0; 7432 } 7433 7434 /* 7435 * turn all crtc's off, but do not adjust state 7436 * This has to be paired with a call to intel_modeset_setup_hw_state. 7437 */ 7438 int intel_display_suspend(struct drm_device *dev) 7439 { 7440 struct drm_i915_private *dev_priv = to_i915(dev); 7441 struct drm_atomic_state *state; 7442 int ret; 7443 7444 state = drm_atomic_helper_suspend(dev); 7445 ret = PTR_ERR_OR_ZERO(state); 7446 if (ret) 7447 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 7448 else 7449 dev_priv->modeset_restore_state = state; 7450 return ret; 7451 } 7452 7453 void intel_encoder_destroy(struct drm_encoder *encoder) 7454 { 7455 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 7456 7457 drm_encoder_cleanup(encoder); 7458 kfree(intel_encoder); 7459 } 7460 7461 /* Cross check the actual hw state with our own modeset state tracking (and it's 7462 * internal consistency). */ 7463 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, 7464 struct drm_connector_state *conn_state) 7465 { 7466 struct intel_connector *connector = to_intel_connector(conn_state->connector); 7467 7468 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 7469 connector->base.base.id, 7470 connector->base.name); 7471 7472 if (connector->get_hw_state(connector)) { 7473 struct intel_encoder *encoder = connector->encoder; 7474 7475 I915_STATE_WARN(!crtc_state, 7476 "connector enabled without attached crtc\n"); 7477 7478 if (!crtc_state) 7479 return; 7480 7481 I915_STATE_WARN(!crtc_state->hw.active, 7482 "connector is active, but attached crtc isn't\n"); 7483 7484 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 7485 return; 7486 7487 I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 7488 "atomic encoder doesn't match attached encoder\n"); 7489 7490 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 7491 "attached encoder crtc differs from connector crtc\n"); 7492 } else { 7493 I915_STATE_WARN(crtc_state && crtc_state->hw.active, 7494 "attached crtc is active, but connector isn't\n"); 7495 I915_STATE_WARN(!crtc_state && conn_state->best_encoder, 7496 "best encoder set without crtc!\n"); 7497 } 7498 } 7499 7500 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 7501 { 7502 if (crtc_state->hw.enable && crtc_state->has_pch_encoder) 7503 return crtc_state->fdi_lanes; 7504 7505 return 0; 7506 } 7507 7508 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, 7509 struct intel_crtc_state *pipe_config) 7510 { 7511 struct drm_i915_private *dev_priv = to_i915(dev); 7512 struct drm_atomic_state *state = pipe_config->uapi.state; 7513 struct intel_crtc *other_crtc; 7514 struct intel_crtc_state *other_crtc_state; 7515 7516 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", 7517 pipe_name(pipe), pipe_config->fdi_lanes); 7518 if (pipe_config->fdi_lanes > 4) { 7519 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", 7520 pipe_name(pipe), pipe_config->fdi_lanes); 7521 return -EINVAL; 7522 } 7523 7524 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 7525 if (pipe_config->fdi_lanes > 2) { 7526 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 7527 pipe_config->fdi_lanes); 7528 return -EINVAL; 7529 } else { 7530 return 0; 7531 } 7532 } 7533 7534 if (INTEL_NUM_PIPES(dev_priv) == 2) 7535 return 0; 7536 7537 /* Ivybridge 3 pipe is really complicated */ 7538 switch (pipe) { 7539 case PIPE_A: 7540 return 0; 7541 case PIPE_B: 7542 if (pipe_config->fdi_lanes <= 2) 7543 return 0; 7544 7545 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C); 7546 other_crtc_state = 7547 intel_atomic_get_crtc_state(state, other_crtc); 7548 if (IS_ERR(other_crtc_state)) 7549 return PTR_ERR(other_crtc_state); 7550 7551 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 7552 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 7553 pipe_name(pipe), pipe_config->fdi_lanes); 7554 return -EINVAL; 7555 } 7556 return 0; 7557 case PIPE_C: 7558 if (pipe_config->fdi_lanes > 2) { 7559 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", 7560 pipe_name(pipe), pipe_config->fdi_lanes); 7561 return -EINVAL; 7562 } 7563 7564 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B); 7565 other_crtc_state = 7566 intel_atomic_get_crtc_state(state, other_crtc); 7567 if (IS_ERR(other_crtc_state)) 7568 return PTR_ERR(other_crtc_state); 7569 7570 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 7571 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 7572 return -EINVAL; 7573 } 7574 return 0; 7575 default: 7576 BUG(); 7577 } 7578 } 7579 7580 #define RETRY 1 7581 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, 7582 struct intel_crtc_state *pipe_config) 7583 { 7584 struct drm_device *dev = intel_crtc->base.dev; 7585 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 7586 int lane, link_bw, fdi_dotclock, ret; 7587 bool needs_recompute = false; 7588 7589 retry: 7590 /* FDI is a binary signal running at ~2.7GHz, encoding 7591 * each output octet as 10 bits. The actual frequency 7592 * is stored as a divider into a 100MHz clock, and the 7593 * mode pixel clock is stored in units of 1KHz. 7594 * Hence the bw of each lane in terms of the mode signal 7595 * is: 7596 */ 7597 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config); 7598 7599 fdi_dotclock = adjusted_mode->crtc_clock; 7600 7601 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 7602 pipe_config->pipe_bpp); 7603 7604 pipe_config->fdi_lanes = lane; 7605 7606 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 7607 link_bw, &pipe_config->fdi_m_n, false, false); 7608 7609 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 7610 if (ret == -EDEADLK) 7611 return ret; 7612 7613 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 7614 pipe_config->pipe_bpp -= 2*3; 7615 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 7616 pipe_config->pipe_bpp); 7617 needs_recompute = true; 7618 pipe_config->bw_constrained = true; 7619 7620 goto retry; 7621 } 7622 7623 if (needs_recompute) 7624 return RETRY; 7625 7626 return ret; 7627 } 7628 7629 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) 7630 { 7631 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7632 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7633 7634 /* IPS only exists on ULT machines and is tied to pipe A. */ 7635 if (!hsw_crtc_supports_ips(crtc)) 7636 return false; 7637 7638 if (!i915_modparams.enable_ips) 7639 return false; 7640 7641 if (crtc_state->pipe_bpp > 24) 7642 return false; 7643 7644 /* 7645 * We compare against max which means we must take 7646 * the increased cdclk requirement into account when 7647 * calculating the new cdclk. 7648 * 7649 * Should measure whether using a lower cdclk w/o IPS 7650 */ 7651 if (IS_BROADWELL(dev_priv) && 7652 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100) 7653 return false; 7654 7655 return true; 7656 } 7657 7658 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state) 7659 { 7660 struct drm_i915_private *dev_priv = 7661 to_i915(crtc_state->uapi.crtc->dev); 7662 struct intel_atomic_state *intel_state = 7663 to_intel_atomic_state(crtc_state->uapi.state); 7664 7665 if (!hsw_crtc_state_ips_capable(crtc_state)) 7666 return false; 7667 7668 /* 7669 * When IPS gets enabled, the pipe CRC changes. Since IPS gets 7670 * enabled and disabled dynamically based on package C states, 7671 * user space can't make reliable use of the CRCs, so let's just 7672 * completely disable it. 7673 */ 7674 if (crtc_state->crc_enabled) 7675 return false; 7676 7677 /* IPS should be fine as long as at least one plane is enabled. */ 7678 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))) 7679 return false; 7680 7681 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 7682 if (IS_BROADWELL(dev_priv) && 7683 crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100) 7684 return false; 7685 7686 return true; 7687 } 7688 7689 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 7690 { 7691 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7692 7693 /* GDG double wide on either pipe, otherwise pipe A only */ 7694 return INTEL_GEN(dev_priv) < 4 && 7695 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 7696 } 7697 7698 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) 7699 { 7700 u32 pixel_rate; 7701 7702 pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock; 7703 7704 /* 7705 * We only use IF-ID interlacing. If we ever use 7706 * PF-ID we'll need to adjust the pixel_rate here. 7707 */ 7708 7709 if (pipe_config->pch_pfit.enabled) { 7710 u64 pipe_w, pipe_h, pfit_w, pfit_h; 7711 u32 pfit_size = pipe_config->pch_pfit.size; 7712 7713 pipe_w = pipe_config->pipe_src_w; 7714 pipe_h = pipe_config->pipe_src_h; 7715 7716 pfit_w = (pfit_size >> 16) & 0xFFFF; 7717 pfit_h = pfit_size & 0xFFFF; 7718 if (pipe_w < pfit_w) 7719 pipe_w = pfit_w; 7720 if (pipe_h < pfit_h) 7721 pipe_h = pfit_h; 7722 7723 if (WARN_ON(!pfit_w || !pfit_h)) 7724 return pixel_rate; 7725 7726 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h), 7727 pfit_w * pfit_h); 7728 } 7729 7730 return pixel_rate; 7731 } 7732 7733 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 7734 { 7735 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 7736 7737 if (HAS_GMCH(dev_priv)) 7738 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 7739 crtc_state->pixel_rate = 7740 crtc_state->hw.adjusted_mode.crtc_clock; 7741 else 7742 crtc_state->pixel_rate = 7743 ilk_pipe_pixel_rate(crtc_state); 7744 } 7745 7746 static int intel_crtc_compute_config(struct intel_crtc *crtc, 7747 struct intel_crtc_state *pipe_config) 7748 { 7749 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7750 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 7751 int clock_limit = dev_priv->max_dotclk_freq; 7752 7753 if (INTEL_GEN(dev_priv) < 4) { 7754 clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 7755 7756 /* 7757 * Enable double wide mode when the dot clock 7758 * is > 90% of the (display) core speed. 7759 */ 7760 if (intel_crtc_supports_double_wide(crtc) && 7761 adjusted_mode->crtc_clock > clock_limit) { 7762 clock_limit = dev_priv->max_dotclk_freq; 7763 pipe_config->double_wide = true; 7764 } 7765 } 7766 7767 if (adjusted_mode->crtc_clock > clock_limit) { 7768 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 7769 adjusted_mode->crtc_clock, clock_limit, 7770 yesno(pipe_config->double_wide)); 7771 return -EINVAL; 7772 } 7773 7774 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 7775 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) && 7776 pipe_config->hw.ctm) { 7777 /* 7778 * There is only one pipe CSC unit per pipe, and we need that 7779 * for output conversion from RGB->YCBCR. So if CTM is already 7780 * applied we can't support YCBCR420 output. 7781 */ 7782 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n"); 7783 return -EINVAL; 7784 } 7785 7786 /* 7787 * Pipe horizontal size must be even in: 7788 * - DVO ganged mode 7789 * - LVDS dual channel mode 7790 * - Double wide pipe 7791 */ 7792 if (pipe_config->pipe_src_w & 1) { 7793 if (pipe_config->double_wide) { 7794 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n"); 7795 return -EINVAL; 7796 } 7797 7798 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && 7799 intel_is_dual_link_lvds(dev_priv)) { 7800 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n"); 7801 return -EINVAL; 7802 } 7803 } 7804 7805 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 7806 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 7807 */ 7808 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) && 7809 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay) 7810 return -EINVAL; 7811 7812 intel_crtc_compute_pixel_rate(pipe_config); 7813 7814 if (pipe_config->has_pch_encoder) 7815 return ironlake_fdi_compute_config(crtc, pipe_config); 7816 7817 return 0; 7818 } 7819 7820 static void 7821 intel_reduce_m_n_ratio(u32 *num, u32 *den) 7822 { 7823 while (*num > DATA_LINK_M_N_MASK || 7824 *den > DATA_LINK_M_N_MASK) { 7825 *num >>= 1; 7826 *den >>= 1; 7827 } 7828 } 7829 7830 static void compute_m_n(unsigned int m, unsigned int n, 7831 u32 *ret_m, u32 *ret_n, 7832 bool constant_n) 7833 { 7834 /* 7835 * Several DP dongles in particular seem to be fussy about 7836 * too large link M/N values. Give N value as 0x8000 that 7837 * should be acceptable by specific devices. 0x8000 is the 7838 * specified fixed N value for asynchronous clock mode, 7839 * which the devices expect also in synchronous clock mode. 7840 */ 7841 if (constant_n) 7842 *ret_n = 0x8000; 7843 else 7844 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 7845 7846 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 7847 intel_reduce_m_n_ratio(ret_m, ret_n); 7848 } 7849 7850 void 7851 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, 7852 int pixel_clock, int link_clock, 7853 struct intel_link_m_n *m_n, 7854 bool constant_n, bool fec_enable) 7855 { 7856 u32 data_clock = bits_per_pixel * pixel_clock; 7857 7858 if (fec_enable) 7859 data_clock = intel_dp_mode_to_fec_clock(data_clock); 7860 7861 m_n->tu = 64; 7862 compute_m_n(data_clock, 7863 link_clock * nlanes * 8, 7864 &m_n->gmch_m, &m_n->gmch_n, 7865 constant_n); 7866 7867 compute_m_n(pixel_clock, link_clock, 7868 &m_n->link_m, &m_n->link_n, 7869 constant_n); 7870 } 7871 7872 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) 7873 { 7874 /* 7875 * There may be no VBT; and if the BIOS enabled SSC we can 7876 * just keep using it to avoid unnecessary flicker. Whereas if the 7877 * BIOS isn't using it, don't assume it will work even if the VBT 7878 * indicates as much. 7879 */ 7880 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 7881 bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) & 7882 DREF_SSC1_ENABLE; 7883 7884 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 7885 DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n", 7886 enableddisabled(bios_lvds_use_ssc), 7887 enableddisabled(dev_priv->vbt.lvds_use_ssc)); 7888 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 7889 } 7890 } 7891 } 7892 7893 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 7894 { 7895 if (i915_modparams.panel_use_ssc >= 0) 7896 return i915_modparams.panel_use_ssc != 0; 7897 return dev_priv->vbt.lvds_use_ssc 7898 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 7899 } 7900 7901 static u32 pnv_dpll_compute_fp(struct dpll *dpll) 7902 { 7903 return (1 << dpll->n) << 16 | dpll->m2; 7904 } 7905 7906 static u32 i9xx_dpll_compute_fp(struct dpll *dpll) 7907 { 7908 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 7909 } 7910 7911 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 7912 struct intel_crtc_state *crtc_state, 7913 struct dpll *reduced_clock) 7914 { 7915 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7916 u32 fp, fp2 = 0; 7917 7918 if (IS_PINEVIEW(dev_priv)) { 7919 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 7920 if (reduced_clock) 7921 fp2 = pnv_dpll_compute_fp(reduced_clock); 7922 } else { 7923 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 7924 if (reduced_clock) 7925 fp2 = i9xx_dpll_compute_fp(reduced_clock); 7926 } 7927 7928 crtc_state->dpll_hw_state.fp0 = fp; 7929 7930 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 7931 reduced_clock) { 7932 crtc_state->dpll_hw_state.fp1 = fp2; 7933 } else { 7934 crtc_state->dpll_hw_state.fp1 = fp; 7935 } 7936 } 7937 7938 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe 7939 pipe) 7940 { 7941 u32 reg_val; 7942 7943 /* 7944 * PLLB opamp always calibrates to max value of 0x3f, force enable it 7945 * and set it to a reasonable value instead. 7946 */ 7947 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 7948 reg_val &= 0xffffff00; 7949 reg_val |= 0x00000030; 7950 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 7951 7952 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 7953 reg_val &= 0x00ffffff; 7954 reg_val |= 0x8c000000; 7955 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 7956 7957 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 7958 reg_val &= 0xffffff00; 7959 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 7960 7961 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 7962 reg_val &= 0x00ffffff; 7963 reg_val |= 0xb0000000; 7964 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 7965 } 7966 7967 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 7968 const struct intel_link_m_n *m_n) 7969 { 7970 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7971 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7972 enum pipe pipe = crtc->pipe; 7973 7974 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7975 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 7976 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); 7977 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 7978 } 7979 7980 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, 7981 enum transcoder transcoder) 7982 { 7983 if (IS_HASWELL(dev_priv)) 7984 return transcoder == TRANSCODER_EDP; 7985 7986 /* 7987 * Strictly speaking some registers are available before 7988 * gen7, but we only support DRRS on gen7+ 7989 */ 7990 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv); 7991 } 7992 7993 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 7994 const struct intel_link_m_n *m_n, 7995 const struct intel_link_m_n *m2_n2) 7996 { 7997 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7998 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7999 enum pipe pipe = crtc->pipe; 8000 enum transcoder transcoder = crtc_state->cpu_transcoder; 8001 8002 if (INTEL_GEN(dev_priv) >= 5) { 8003 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 8004 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 8005 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 8006 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 8007 /* 8008 * M2_N2 registers are set only if DRRS is supported 8009 * (to make sure the registers are not unnecessarily accessed). 8010 */ 8011 if (m2_n2 && crtc_state->has_drrs && 8012 transcoder_has_m2_n2(dev_priv, transcoder)) { 8013 I915_WRITE(PIPE_DATA_M2(transcoder), 8014 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 8015 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 8016 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); 8017 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); 8018 } 8019 } else { 8020 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 8021 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 8022 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); 8023 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); 8024 } 8025 } 8026 8027 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n) 8028 { 8029 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 8030 8031 if (m_n == M1_N1) { 8032 dp_m_n = &crtc_state->dp_m_n; 8033 dp_m2_n2 = &crtc_state->dp_m2_n2; 8034 } else if (m_n == M2_N2) { 8035 8036 /* 8037 * M2_N2 registers are not supported. Hence m2_n2 divider value 8038 * needs to be programmed into M1_N1. 8039 */ 8040 dp_m_n = &crtc_state->dp_m2_n2; 8041 } else { 8042 DRM_ERROR("Unsupported divider value\n"); 8043 return; 8044 } 8045 8046 if (crtc_state->has_pch_encoder) 8047 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n); 8048 else 8049 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2); 8050 } 8051 8052 static void vlv_compute_dpll(struct intel_crtc *crtc, 8053 struct intel_crtc_state *pipe_config) 8054 { 8055 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | 8056 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 8057 if (crtc->pipe != PIPE_A) 8058 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 8059 8060 /* DPLL not used with DSI, but still need the rest set up */ 8061 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 8062 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | 8063 DPLL_EXT_BUFFER_ENABLE_VLV; 8064 8065 pipe_config->dpll_hw_state.dpll_md = 8066 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 8067 } 8068 8069 static void chv_compute_dpll(struct intel_crtc *crtc, 8070 struct intel_crtc_state *pipe_config) 8071 { 8072 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | 8073 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 8074 if (crtc->pipe != PIPE_A) 8075 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 8076 8077 /* DPLL not used with DSI, but still need the rest set up */ 8078 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 8079 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; 8080 8081 pipe_config->dpll_hw_state.dpll_md = 8082 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 8083 } 8084 8085 static void vlv_prepare_pll(struct intel_crtc *crtc, 8086 const struct intel_crtc_state *pipe_config) 8087 { 8088 struct drm_device *dev = crtc->base.dev; 8089 struct drm_i915_private *dev_priv = to_i915(dev); 8090 enum pipe pipe = crtc->pipe; 8091 u32 mdiv; 8092 u32 bestn, bestm1, bestm2, bestp1, bestp2; 8093 u32 coreclk, reg_val; 8094 8095 /* Enable Refclk */ 8096 I915_WRITE(DPLL(pipe), 8097 pipe_config->dpll_hw_state.dpll & 8098 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); 8099 8100 /* No need to actually set up the DPLL with DSI */ 8101 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8102 return; 8103 8104 vlv_dpio_get(dev_priv); 8105 8106 bestn = pipe_config->dpll.n; 8107 bestm1 = pipe_config->dpll.m1; 8108 bestm2 = pipe_config->dpll.m2; 8109 bestp1 = pipe_config->dpll.p1; 8110 bestp2 = pipe_config->dpll.p2; 8111 8112 /* See eDP HDMI DPIO driver vbios notes doc */ 8113 8114 /* PLL B needs special handling */ 8115 if (pipe == PIPE_B) 8116 vlv_pllb_recal_opamp(dev_priv, pipe); 8117 8118 /* Set up Tx target for periodic Rcomp update */ 8119 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 8120 8121 /* Disable target IRef on PLL */ 8122 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 8123 reg_val &= 0x00ffffff; 8124 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 8125 8126 /* Disable fast lock */ 8127 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 8128 8129 /* Set idtafcrecal before PLL is enabled */ 8130 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 8131 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 8132 mdiv |= ((bestn << DPIO_N_SHIFT)); 8133 mdiv |= (1 << DPIO_K_SHIFT); 8134 8135 /* 8136 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 8137 * but we don't support that). 8138 * Note: don't use the DAC post divider as it seems unstable. 8139 */ 8140 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 8141 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 8142 8143 mdiv |= DPIO_ENABLE_CALIBRATION; 8144 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 8145 8146 /* Set HBR and RBR LPF coefficients */ 8147 if (pipe_config->port_clock == 162000 || 8148 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) || 8149 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) 8150 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 8151 0x009f0003); 8152 else 8153 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 8154 0x00d0000f); 8155 8156 if (intel_crtc_has_dp_encoder(pipe_config)) { 8157 /* Use SSC source */ 8158 if (pipe == PIPE_A) 8159 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 8160 0x0df40000); 8161 else 8162 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 8163 0x0df70000); 8164 } else { /* HDMI or VGA */ 8165 /* Use bend source */ 8166 if (pipe == PIPE_A) 8167 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 8168 0x0df70000); 8169 else 8170 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 8171 0x0df40000); 8172 } 8173 8174 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 8175 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 8176 if (intel_crtc_has_dp_encoder(pipe_config)) 8177 coreclk |= 0x01000000; 8178 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 8179 8180 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 8181 8182 vlv_dpio_put(dev_priv); 8183 } 8184 8185 static void chv_prepare_pll(struct intel_crtc *crtc, 8186 const struct intel_crtc_state *pipe_config) 8187 { 8188 struct drm_device *dev = crtc->base.dev; 8189 struct drm_i915_private *dev_priv = to_i915(dev); 8190 enum pipe pipe = crtc->pipe; 8191 enum dpio_channel port = vlv_pipe_to_channel(pipe); 8192 u32 loopfilter, tribuf_calcntr; 8193 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 8194 u32 dpio_val; 8195 int vco; 8196 8197 /* Enable Refclk and SSC */ 8198 I915_WRITE(DPLL(pipe), 8199 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 8200 8201 /* No need to actually set up the DPLL with DSI */ 8202 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8203 return; 8204 8205 bestn = pipe_config->dpll.n; 8206 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 8207 bestm1 = pipe_config->dpll.m1; 8208 bestm2 = pipe_config->dpll.m2 >> 22; 8209 bestp1 = pipe_config->dpll.p1; 8210 bestp2 = pipe_config->dpll.p2; 8211 vco = pipe_config->dpll.vco; 8212 dpio_val = 0; 8213 loopfilter = 0; 8214 8215 vlv_dpio_get(dev_priv); 8216 8217 /* p1 and p2 divider */ 8218 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 8219 5 << DPIO_CHV_S1_DIV_SHIFT | 8220 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 8221 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 8222 1 << DPIO_CHV_K_DIV_SHIFT); 8223 8224 /* Feedback post-divider - m2 */ 8225 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 8226 8227 /* Feedback refclk divider - n and m1 */ 8228 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 8229 DPIO_CHV_M1_DIV_BY_2 | 8230 1 << DPIO_CHV_N_DIV_SHIFT); 8231 8232 /* M2 fraction division */ 8233 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 8234 8235 /* M2 fraction division enable */ 8236 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 8237 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 8238 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 8239 if (bestm2_frac) 8240 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 8241 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 8242 8243 /* Program digital lock detect threshold */ 8244 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 8245 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 8246 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 8247 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 8248 if (!bestm2_frac) 8249 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 8250 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 8251 8252 /* Loop filter */ 8253 if (vco == 5400000) { 8254 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 8255 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 8256 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 8257 tribuf_calcntr = 0x9; 8258 } else if (vco <= 6200000) { 8259 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 8260 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 8261 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 8262 tribuf_calcntr = 0x9; 8263 } else if (vco <= 6480000) { 8264 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 8265 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 8266 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 8267 tribuf_calcntr = 0x8; 8268 } else { 8269 /* Not supported. Apply the same limits as in the max case */ 8270 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 8271 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 8272 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 8273 tribuf_calcntr = 0; 8274 } 8275 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 8276 8277 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 8278 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 8279 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 8280 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 8281 8282 /* AFC Recal */ 8283 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 8284 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 8285 DPIO_AFC_RECAL); 8286 8287 vlv_dpio_put(dev_priv); 8288 } 8289 8290 /** 8291 * vlv_force_pll_on - forcibly enable just the PLL 8292 * @dev_priv: i915 private structure 8293 * @pipe: pipe PLL to enable 8294 * @dpll: PLL configuration 8295 * 8296 * Enable the PLL for @pipe using the supplied @dpll config. To be used 8297 * in cases where we need the PLL enabled even when @pipe is not going to 8298 * be enabled. 8299 */ 8300 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, 8301 const struct dpll *dpll) 8302 { 8303 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 8304 struct intel_crtc_state *pipe_config; 8305 8306 pipe_config = intel_crtc_state_alloc(crtc); 8307 if (!pipe_config) 8308 return -ENOMEM; 8309 8310 pipe_config->cpu_transcoder = (enum transcoder)pipe; 8311 pipe_config->pixel_multiplier = 1; 8312 pipe_config->dpll = *dpll; 8313 8314 if (IS_CHERRYVIEW(dev_priv)) { 8315 chv_compute_dpll(crtc, pipe_config); 8316 chv_prepare_pll(crtc, pipe_config); 8317 chv_enable_pll(crtc, pipe_config); 8318 } else { 8319 vlv_compute_dpll(crtc, pipe_config); 8320 vlv_prepare_pll(crtc, pipe_config); 8321 vlv_enable_pll(crtc, pipe_config); 8322 } 8323 8324 kfree(pipe_config); 8325 8326 return 0; 8327 } 8328 8329 /** 8330 * vlv_force_pll_off - forcibly disable just the PLL 8331 * @dev_priv: i915 private structure 8332 * @pipe: pipe PLL to disable 8333 * 8334 * Disable the PLL for @pipe. To be used in cases where we need 8335 * the PLL enabled even when @pipe is not going to be enabled. 8336 */ 8337 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) 8338 { 8339 if (IS_CHERRYVIEW(dev_priv)) 8340 chv_disable_pll(dev_priv, pipe); 8341 else 8342 vlv_disable_pll(dev_priv, pipe); 8343 } 8344 8345 static void i9xx_compute_dpll(struct intel_crtc *crtc, 8346 struct intel_crtc_state *crtc_state, 8347 struct dpll *reduced_clock) 8348 { 8349 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8350 u32 dpll; 8351 struct dpll *clock = &crtc_state->dpll; 8352 8353 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 8354 8355 dpll = DPLL_VGA_MODE_DIS; 8356 8357 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 8358 dpll |= DPLLB_MODE_LVDS; 8359 else 8360 dpll |= DPLLB_MODE_DAC_SERIAL; 8361 8362 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 8363 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 8364 dpll |= (crtc_state->pixel_multiplier - 1) 8365 << SDVO_MULTIPLIER_SHIFT_HIRES; 8366 } 8367 8368 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 8369 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 8370 dpll |= DPLL_SDVO_HIGH_SPEED; 8371 8372 if (intel_crtc_has_dp_encoder(crtc_state)) 8373 dpll |= DPLL_SDVO_HIGH_SPEED; 8374 8375 /* compute bitmask from p1 value */ 8376 if (IS_PINEVIEW(dev_priv)) 8377 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 8378 else { 8379 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8380 if (IS_G4X(dev_priv) && reduced_clock) 8381 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 8382 } 8383 switch (clock->p2) { 8384 case 5: 8385 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 8386 break; 8387 case 7: 8388 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 8389 break; 8390 case 10: 8391 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 8392 break; 8393 case 14: 8394 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 8395 break; 8396 } 8397 if (INTEL_GEN(dev_priv) >= 4) 8398 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 8399 8400 if (crtc_state->sdvo_tv_clock) 8401 dpll |= PLL_REF_INPUT_TVCLKINBC; 8402 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8403 intel_panel_use_ssc(dev_priv)) 8404 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8405 else 8406 dpll |= PLL_REF_INPUT_DREFCLK; 8407 8408 dpll |= DPLL_VCO_ENABLE; 8409 crtc_state->dpll_hw_state.dpll = dpll; 8410 8411 if (INTEL_GEN(dev_priv) >= 4) { 8412 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 8413 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 8414 crtc_state->dpll_hw_state.dpll_md = dpll_md; 8415 } 8416 } 8417 8418 static void i8xx_compute_dpll(struct intel_crtc *crtc, 8419 struct intel_crtc_state *crtc_state, 8420 struct dpll *reduced_clock) 8421 { 8422 struct drm_device *dev = crtc->base.dev; 8423 struct drm_i915_private *dev_priv = to_i915(dev); 8424 u32 dpll; 8425 struct dpll *clock = &crtc_state->dpll; 8426 8427 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 8428 8429 dpll = DPLL_VGA_MODE_DIS; 8430 8431 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8432 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8433 } else { 8434 if (clock->p1 == 2) 8435 dpll |= PLL_P1_DIVIDE_BY_TWO; 8436 else 8437 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8438 if (clock->p2 == 4) 8439 dpll |= PLL_P2_DIVIDE_BY_4; 8440 } 8441 8442 /* 8443 * Bspec: 8444 * "[Almador Errata}: For the correct operation of the muxed DVO pins 8445 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data, 8446 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock 8447 * Enable) must be set to “1” in both the DPLL A Control Register 8448 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)." 8449 * 8450 * For simplicity We simply keep both bits always enabled in 8451 * both DPLLS. The spec says we should disable the DVO 2X clock 8452 * when not needed, but this seems to work fine in practice. 8453 */ 8454 if (IS_I830(dev_priv) || 8455 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) 8456 dpll |= DPLL_DVO_2X_MODE; 8457 8458 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8459 intel_panel_use_ssc(dev_priv)) 8460 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8461 else 8462 dpll |= PLL_REF_INPUT_DREFCLK; 8463 8464 dpll |= DPLL_VCO_ENABLE; 8465 crtc_state->dpll_hw_state.dpll = dpll; 8466 } 8467 8468 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state) 8469 { 8470 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8471 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8472 enum pipe pipe = crtc->pipe; 8473 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 8474 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 8475 u32 crtc_vtotal, crtc_vblank_end; 8476 int vsyncshift = 0; 8477 8478 /* We need to be careful not to changed the adjusted mode, for otherwise 8479 * the hw state checker will get angry at the mismatch. */ 8480 crtc_vtotal = adjusted_mode->crtc_vtotal; 8481 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 8482 8483 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 8484 /* the chip adds 2 halflines automatically */ 8485 crtc_vtotal -= 1; 8486 crtc_vblank_end -= 1; 8487 8488 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 8489 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 8490 else 8491 vsyncshift = adjusted_mode->crtc_hsync_start - 8492 adjusted_mode->crtc_htotal / 2; 8493 if (vsyncshift < 0) 8494 vsyncshift += adjusted_mode->crtc_htotal; 8495 } 8496 8497 if (INTEL_GEN(dev_priv) > 3) 8498 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 8499 8500 I915_WRITE(HTOTAL(cpu_transcoder), 8501 (adjusted_mode->crtc_hdisplay - 1) | 8502 ((adjusted_mode->crtc_htotal - 1) << 16)); 8503 I915_WRITE(HBLANK(cpu_transcoder), 8504 (adjusted_mode->crtc_hblank_start - 1) | 8505 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 8506 I915_WRITE(HSYNC(cpu_transcoder), 8507 (adjusted_mode->crtc_hsync_start - 1) | 8508 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 8509 8510 I915_WRITE(VTOTAL(cpu_transcoder), 8511 (adjusted_mode->crtc_vdisplay - 1) | 8512 ((crtc_vtotal - 1) << 16)); 8513 I915_WRITE(VBLANK(cpu_transcoder), 8514 (adjusted_mode->crtc_vblank_start - 1) | 8515 ((crtc_vblank_end - 1) << 16)); 8516 I915_WRITE(VSYNC(cpu_transcoder), 8517 (adjusted_mode->crtc_vsync_start - 1) | 8518 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 8519 8520 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 8521 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 8522 * documented on the DDI_FUNC_CTL register description, EDP Input Select 8523 * bits. */ 8524 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 8525 (pipe == PIPE_B || pipe == PIPE_C)) 8526 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 8527 8528 } 8529 8530 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 8531 { 8532 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8533 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8534 enum pipe pipe = crtc->pipe; 8535 8536 /* pipesrc controls the size that is scaled from, which should 8537 * always be the user's requested size. 8538 */ 8539 I915_WRITE(PIPESRC(pipe), 8540 ((crtc_state->pipe_src_w - 1) << 16) | 8541 (crtc_state->pipe_src_h - 1)); 8542 } 8543 8544 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) 8545 { 8546 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 8547 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 8548 8549 if (IS_GEN(dev_priv, 2)) 8550 return false; 8551 8552 if (INTEL_GEN(dev_priv) >= 9 || 8553 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 8554 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW; 8555 else 8556 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK; 8557 } 8558 8559 static void intel_get_pipe_timings(struct intel_crtc *crtc, 8560 struct intel_crtc_state *pipe_config) 8561 { 8562 struct drm_device *dev = crtc->base.dev; 8563 struct drm_i915_private *dev_priv = to_i915(dev); 8564 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 8565 u32 tmp; 8566 8567 tmp = I915_READ(HTOTAL(cpu_transcoder)); 8568 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 8569 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 8570 8571 if (!transcoder_is_dsi(cpu_transcoder)) { 8572 tmp = I915_READ(HBLANK(cpu_transcoder)); 8573 pipe_config->hw.adjusted_mode.crtc_hblank_start = 8574 (tmp & 0xffff) + 1; 8575 pipe_config->hw.adjusted_mode.crtc_hblank_end = 8576 ((tmp >> 16) & 0xffff) + 1; 8577 } 8578 tmp = I915_READ(HSYNC(cpu_transcoder)); 8579 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 8580 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 8581 8582 tmp = I915_READ(VTOTAL(cpu_transcoder)); 8583 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 8584 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 8585 8586 if (!transcoder_is_dsi(cpu_transcoder)) { 8587 tmp = I915_READ(VBLANK(cpu_transcoder)); 8588 pipe_config->hw.adjusted_mode.crtc_vblank_start = 8589 (tmp & 0xffff) + 1; 8590 pipe_config->hw.adjusted_mode.crtc_vblank_end = 8591 ((tmp >> 16) & 0xffff) + 1; 8592 } 8593 tmp = I915_READ(VSYNC(cpu_transcoder)); 8594 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 8595 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 8596 8597 if (intel_pipe_is_interlaced(pipe_config)) { 8598 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 8599 pipe_config->hw.adjusted_mode.crtc_vtotal += 1; 8600 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1; 8601 } 8602 } 8603 8604 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 8605 struct intel_crtc_state *pipe_config) 8606 { 8607 struct drm_device *dev = crtc->base.dev; 8608 struct drm_i915_private *dev_priv = to_i915(dev); 8609 u32 tmp; 8610 8611 tmp = I915_READ(PIPESRC(crtc->pipe)); 8612 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 8613 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 8614 8615 pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h; 8616 pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w; 8617 } 8618 8619 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 8620 struct intel_crtc_state *pipe_config) 8621 { 8622 mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay; 8623 mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal; 8624 mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start; 8625 mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end; 8626 8627 mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay; 8628 mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal; 8629 mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start; 8630 mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end; 8631 8632 mode->flags = pipe_config->hw.adjusted_mode.flags; 8633 mode->type = DRM_MODE_TYPE_DRIVER; 8634 8635 mode->clock = pipe_config->hw.adjusted_mode.crtc_clock; 8636 8637 mode->hsync = drm_mode_hsync(mode); 8638 mode->vrefresh = drm_mode_vrefresh(mode); 8639 drm_mode_set_name(mode); 8640 } 8641 8642 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 8643 { 8644 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8645 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8646 u32 pipeconf; 8647 8648 pipeconf = 0; 8649 8650 /* we keep both pipes enabled on 830 */ 8651 if (IS_I830(dev_priv)) 8652 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE; 8653 8654 if (crtc_state->double_wide) 8655 pipeconf |= PIPECONF_DOUBLE_WIDE; 8656 8657 /* only g4x and later have fancy bpc/dither controls */ 8658 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 8659 IS_CHERRYVIEW(dev_priv)) { 8660 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 8661 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 8662 pipeconf |= PIPECONF_DITHER_EN | 8663 PIPECONF_DITHER_TYPE_SP; 8664 8665 switch (crtc_state->pipe_bpp) { 8666 case 18: 8667 pipeconf |= PIPECONF_6BPC; 8668 break; 8669 case 24: 8670 pipeconf |= PIPECONF_8BPC; 8671 break; 8672 case 30: 8673 pipeconf |= PIPECONF_10BPC; 8674 break; 8675 default: 8676 /* Case prevented by intel_choose_pipe_bpp_dither. */ 8677 BUG(); 8678 } 8679 } 8680 8681 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 8682 if (INTEL_GEN(dev_priv) < 4 || 8683 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 8684 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 8685 else 8686 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 8687 } else { 8688 pipeconf |= PIPECONF_PROGRESSIVE; 8689 } 8690 8691 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 8692 crtc_state->limited_color_range) 8693 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 8694 8695 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 8696 8697 pipeconf |= PIPECONF_FRAME_START_DELAY(0); 8698 8699 I915_WRITE(PIPECONF(crtc->pipe), pipeconf); 8700 POSTING_READ(PIPECONF(crtc->pipe)); 8701 } 8702 8703 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, 8704 struct intel_crtc_state *crtc_state) 8705 { 8706 struct drm_device *dev = crtc->base.dev; 8707 struct drm_i915_private *dev_priv = to_i915(dev); 8708 const struct intel_limit *limit; 8709 int refclk = 48000; 8710 8711 memset(&crtc_state->dpll_hw_state, 0, 8712 sizeof(crtc_state->dpll_hw_state)); 8713 8714 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8715 if (intel_panel_use_ssc(dev_priv)) { 8716 refclk = dev_priv->vbt.lvds_ssc_freq; 8717 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8718 } 8719 8720 limit = &intel_limits_i8xx_lvds; 8721 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) { 8722 limit = &intel_limits_i8xx_dvo; 8723 } else { 8724 limit = &intel_limits_i8xx_dac; 8725 } 8726 8727 if (!crtc_state->clock_set && 8728 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8729 refclk, NULL, &crtc_state->dpll)) { 8730 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8731 return -EINVAL; 8732 } 8733 8734 i8xx_compute_dpll(crtc, crtc_state, NULL); 8735 8736 return 0; 8737 } 8738 8739 static int g4x_crtc_compute_clock(struct intel_crtc *crtc, 8740 struct intel_crtc_state *crtc_state) 8741 { 8742 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8743 const struct intel_limit *limit; 8744 int refclk = 96000; 8745 8746 memset(&crtc_state->dpll_hw_state, 0, 8747 sizeof(crtc_state->dpll_hw_state)); 8748 8749 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8750 if (intel_panel_use_ssc(dev_priv)) { 8751 refclk = dev_priv->vbt.lvds_ssc_freq; 8752 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8753 } 8754 8755 if (intel_is_dual_link_lvds(dev_priv)) 8756 limit = &intel_limits_g4x_dual_channel_lvds; 8757 else 8758 limit = &intel_limits_g4x_single_channel_lvds; 8759 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || 8760 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 8761 limit = &intel_limits_g4x_hdmi; 8762 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) { 8763 limit = &intel_limits_g4x_sdvo; 8764 } else { 8765 /* The option is for other outputs */ 8766 limit = &intel_limits_i9xx_sdvo; 8767 } 8768 8769 if (!crtc_state->clock_set && 8770 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8771 refclk, NULL, &crtc_state->dpll)) { 8772 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8773 return -EINVAL; 8774 } 8775 8776 i9xx_compute_dpll(crtc, crtc_state, NULL); 8777 8778 return 0; 8779 } 8780 8781 static int pnv_crtc_compute_clock(struct intel_crtc *crtc, 8782 struct intel_crtc_state *crtc_state) 8783 { 8784 struct drm_device *dev = crtc->base.dev; 8785 struct drm_i915_private *dev_priv = to_i915(dev); 8786 const struct intel_limit *limit; 8787 int refclk = 96000; 8788 8789 memset(&crtc_state->dpll_hw_state, 0, 8790 sizeof(crtc_state->dpll_hw_state)); 8791 8792 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8793 if (intel_panel_use_ssc(dev_priv)) { 8794 refclk = dev_priv->vbt.lvds_ssc_freq; 8795 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8796 } 8797 8798 limit = &intel_limits_pineview_lvds; 8799 } else { 8800 limit = &intel_limits_pineview_sdvo; 8801 } 8802 8803 if (!crtc_state->clock_set && 8804 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8805 refclk, NULL, &crtc_state->dpll)) { 8806 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8807 return -EINVAL; 8808 } 8809 8810 i9xx_compute_dpll(crtc, crtc_state, NULL); 8811 8812 return 0; 8813 } 8814 8815 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 8816 struct intel_crtc_state *crtc_state) 8817 { 8818 struct drm_device *dev = crtc->base.dev; 8819 struct drm_i915_private *dev_priv = to_i915(dev); 8820 const struct intel_limit *limit; 8821 int refclk = 96000; 8822 8823 memset(&crtc_state->dpll_hw_state, 0, 8824 sizeof(crtc_state->dpll_hw_state)); 8825 8826 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8827 if (intel_panel_use_ssc(dev_priv)) { 8828 refclk = dev_priv->vbt.lvds_ssc_freq; 8829 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8830 } 8831 8832 limit = &intel_limits_i9xx_lvds; 8833 } else { 8834 limit = &intel_limits_i9xx_sdvo; 8835 } 8836 8837 if (!crtc_state->clock_set && 8838 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8839 refclk, NULL, &crtc_state->dpll)) { 8840 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8841 return -EINVAL; 8842 } 8843 8844 i9xx_compute_dpll(crtc, crtc_state, NULL); 8845 8846 return 0; 8847 } 8848 8849 static int chv_crtc_compute_clock(struct intel_crtc *crtc, 8850 struct intel_crtc_state *crtc_state) 8851 { 8852 int refclk = 100000; 8853 const struct intel_limit *limit = &intel_limits_chv; 8854 8855 memset(&crtc_state->dpll_hw_state, 0, 8856 sizeof(crtc_state->dpll_hw_state)); 8857 8858 if (!crtc_state->clock_set && 8859 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8860 refclk, NULL, &crtc_state->dpll)) { 8861 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8862 return -EINVAL; 8863 } 8864 8865 chv_compute_dpll(crtc, crtc_state); 8866 8867 return 0; 8868 } 8869 8870 static int vlv_crtc_compute_clock(struct intel_crtc *crtc, 8871 struct intel_crtc_state *crtc_state) 8872 { 8873 int refclk = 100000; 8874 const struct intel_limit *limit = &intel_limits_vlv; 8875 8876 memset(&crtc_state->dpll_hw_state, 0, 8877 sizeof(crtc_state->dpll_hw_state)); 8878 8879 if (!crtc_state->clock_set && 8880 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8881 refclk, NULL, &crtc_state->dpll)) { 8882 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8883 return -EINVAL; 8884 } 8885 8886 vlv_compute_dpll(crtc, crtc_state); 8887 8888 return 0; 8889 } 8890 8891 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) 8892 { 8893 if (IS_I830(dev_priv)) 8894 return false; 8895 8896 return INTEL_GEN(dev_priv) >= 4 || 8897 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 8898 } 8899 8900 static void i9xx_get_pfit_config(struct intel_crtc *crtc, 8901 struct intel_crtc_state *pipe_config) 8902 { 8903 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8904 u32 tmp; 8905 8906 if (!i9xx_has_pfit(dev_priv)) 8907 return; 8908 8909 tmp = I915_READ(PFIT_CONTROL); 8910 if (!(tmp & PFIT_ENABLE)) 8911 return; 8912 8913 /* Check whether the pfit is attached to our pipe. */ 8914 if (INTEL_GEN(dev_priv) < 4) { 8915 if (crtc->pipe != PIPE_B) 8916 return; 8917 } else { 8918 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 8919 return; 8920 } 8921 8922 pipe_config->gmch_pfit.control = tmp; 8923 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 8924 } 8925 8926 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 8927 struct intel_crtc_state *pipe_config) 8928 { 8929 struct drm_device *dev = crtc->base.dev; 8930 struct drm_i915_private *dev_priv = to_i915(dev); 8931 enum pipe pipe = crtc->pipe; 8932 struct dpll clock; 8933 u32 mdiv; 8934 int refclk = 100000; 8935 8936 /* In case of DSI, DPLL will not be used */ 8937 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8938 return; 8939 8940 vlv_dpio_get(dev_priv); 8941 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 8942 vlv_dpio_put(dev_priv); 8943 8944 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 8945 clock.m2 = mdiv & DPIO_M2DIV_MASK; 8946 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 8947 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 8948 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 8949 8950 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 8951 } 8952 8953 static void 8954 i9xx_get_initial_plane_config(struct intel_crtc *crtc, 8955 struct intel_initial_plane_config *plane_config) 8956 { 8957 struct drm_device *dev = crtc->base.dev; 8958 struct drm_i915_private *dev_priv = to_i915(dev); 8959 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 8960 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 8961 enum pipe pipe; 8962 u32 val, base, offset; 8963 int fourcc, pixel_format; 8964 unsigned int aligned_height; 8965 struct drm_framebuffer *fb; 8966 struct intel_framebuffer *intel_fb; 8967 8968 if (!plane->get_hw_state(plane, &pipe)) 8969 return; 8970 8971 WARN_ON(pipe != crtc->pipe); 8972 8973 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8974 if (!intel_fb) { 8975 DRM_DEBUG_KMS("failed to alloc fb\n"); 8976 return; 8977 } 8978 8979 fb = &intel_fb->base; 8980 8981 fb->dev = dev; 8982 8983 val = I915_READ(DSPCNTR(i9xx_plane)); 8984 8985 if (INTEL_GEN(dev_priv) >= 4) { 8986 if (val & DISPPLANE_TILED) { 8987 plane_config->tiling = I915_TILING_X; 8988 fb->modifier = I915_FORMAT_MOD_X_TILED; 8989 } 8990 8991 if (val & DISPPLANE_ROTATE_180) 8992 plane_config->rotation = DRM_MODE_ROTATE_180; 8993 } 8994 8995 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B && 8996 val & DISPPLANE_MIRROR) 8997 plane_config->rotation |= DRM_MODE_REFLECT_X; 8998 8999 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 9000 fourcc = i9xx_format_to_fourcc(pixel_format); 9001 fb->format = drm_format_info(fourcc); 9002 9003 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 9004 offset = I915_READ(DSPOFFSET(i9xx_plane)); 9005 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000; 9006 } else if (INTEL_GEN(dev_priv) >= 4) { 9007 if (plane_config->tiling) 9008 offset = I915_READ(DSPTILEOFF(i9xx_plane)); 9009 else 9010 offset = I915_READ(DSPLINOFF(i9xx_plane)); 9011 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000; 9012 } else { 9013 base = I915_READ(DSPADDR(i9xx_plane)); 9014 } 9015 plane_config->base = base; 9016 9017 val = I915_READ(PIPESRC(pipe)); 9018 fb->width = ((val >> 16) & 0xfff) + 1; 9019 fb->height = ((val >> 0) & 0xfff) + 1; 9020 9021 val = I915_READ(DSPSTRIDE(i9xx_plane)); 9022 fb->pitches[0] = val & 0xffffffc0; 9023 9024 aligned_height = intel_fb_align_height(fb, 0, fb->height); 9025 9026 plane_config->size = fb->pitches[0] * aligned_height; 9027 9028 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 9029 crtc->base.name, plane->base.name, fb->width, fb->height, 9030 fb->format->cpp[0] * 8, base, fb->pitches[0], 9031 plane_config->size); 9032 9033 plane_config->fb = intel_fb; 9034 } 9035 9036 static void chv_crtc_clock_get(struct intel_crtc *crtc, 9037 struct intel_crtc_state *pipe_config) 9038 { 9039 struct drm_device *dev = crtc->base.dev; 9040 struct drm_i915_private *dev_priv = to_i915(dev); 9041 enum pipe pipe = crtc->pipe; 9042 enum dpio_channel port = vlv_pipe_to_channel(pipe); 9043 struct dpll clock; 9044 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 9045 int refclk = 100000; 9046 9047 /* In case of DSI, DPLL will not be used */ 9048 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 9049 return; 9050 9051 vlv_dpio_get(dev_priv); 9052 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 9053 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 9054 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 9055 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 9056 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 9057 vlv_dpio_put(dev_priv); 9058 9059 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 9060 clock.m2 = (pll_dw0 & 0xff) << 22; 9061 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 9062 clock.m2 |= pll_dw2 & 0x3fffff; 9063 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 9064 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 9065 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 9066 9067 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 9068 } 9069 9070 static enum intel_output_format 9071 bdw_get_pipemisc_output_format(struct intel_crtc *crtc) 9072 { 9073 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9074 u32 tmp; 9075 9076 tmp = I915_READ(PIPEMISC(crtc->pipe)); 9077 9078 if (tmp & PIPEMISC_YUV420_ENABLE) { 9079 /* We support 4:2:0 in full blend mode only */ 9080 WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0); 9081 9082 return INTEL_OUTPUT_FORMAT_YCBCR420; 9083 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { 9084 return INTEL_OUTPUT_FORMAT_YCBCR444; 9085 } else { 9086 return INTEL_OUTPUT_FORMAT_RGB; 9087 } 9088 } 9089 9090 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) 9091 { 9092 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 9093 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 9094 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9095 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 9096 u32 tmp; 9097 9098 tmp = I915_READ(DSPCNTR(i9xx_plane)); 9099 9100 if (tmp & DISPPLANE_GAMMA_ENABLE) 9101 crtc_state->gamma_enable = true; 9102 9103 if (!HAS_GMCH(dev_priv) && 9104 tmp & DISPPLANE_PIPE_CSC_ENABLE) 9105 crtc_state->csc_enable = true; 9106 } 9107 9108 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 9109 struct intel_crtc_state *pipe_config) 9110 { 9111 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9112 enum intel_display_power_domain power_domain; 9113 intel_wakeref_t wakeref; 9114 u32 tmp; 9115 bool ret; 9116 9117 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9118 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 9119 if (!wakeref) 9120 return false; 9121 9122 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 9123 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9124 pipe_config->shared_dpll = NULL; 9125 pipe_config->master_transcoder = INVALID_TRANSCODER; 9126 9127 ret = false; 9128 9129 tmp = I915_READ(PIPECONF(crtc->pipe)); 9130 if (!(tmp & PIPECONF_ENABLE)) 9131 goto out; 9132 9133 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 9134 IS_CHERRYVIEW(dev_priv)) { 9135 switch (tmp & PIPECONF_BPC_MASK) { 9136 case PIPECONF_6BPC: 9137 pipe_config->pipe_bpp = 18; 9138 break; 9139 case PIPECONF_8BPC: 9140 pipe_config->pipe_bpp = 24; 9141 break; 9142 case PIPECONF_10BPC: 9143 pipe_config->pipe_bpp = 30; 9144 break; 9145 default: 9146 break; 9147 } 9148 } 9149 9150 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 9151 (tmp & PIPECONF_COLOR_RANGE_SELECT)) 9152 pipe_config->limited_color_range = true; 9153 9154 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >> 9155 PIPECONF_GAMMA_MODE_SHIFT; 9156 9157 if (IS_CHERRYVIEW(dev_priv)) 9158 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe)); 9159 9160 i9xx_get_pipe_color_config(pipe_config); 9161 intel_color_get_config(pipe_config); 9162 9163 if (INTEL_GEN(dev_priv) < 4) 9164 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 9165 9166 intel_get_pipe_timings(crtc, pipe_config); 9167 intel_get_pipe_src_size(crtc, pipe_config); 9168 9169 i9xx_get_pfit_config(crtc, pipe_config); 9170 9171 if (INTEL_GEN(dev_priv) >= 4) { 9172 /* No way to read it out on pipes B and C */ 9173 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 9174 tmp = dev_priv->chv_dpll_md[crtc->pipe]; 9175 else 9176 tmp = I915_READ(DPLL_MD(crtc->pipe)); 9177 pipe_config->pixel_multiplier = 9178 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 9179 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 9180 pipe_config->dpll_hw_state.dpll_md = tmp; 9181 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 9182 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 9183 tmp = I915_READ(DPLL(crtc->pipe)); 9184 pipe_config->pixel_multiplier = 9185 ((tmp & SDVO_MULTIPLIER_MASK) 9186 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 9187 } else { 9188 /* Note that on i915G/GM the pixel multiplier is in the sdvo 9189 * port and will be fixed up in the encoder->get_config 9190 * function. */ 9191 pipe_config->pixel_multiplier = 1; 9192 } 9193 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 9194 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 9195 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 9196 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 9197 } else { 9198 /* Mask out read-only status bits. */ 9199 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 9200 DPLL_PORTC_READY_MASK | 9201 DPLL_PORTB_READY_MASK); 9202 } 9203 9204 if (IS_CHERRYVIEW(dev_priv)) 9205 chv_crtc_clock_get(crtc, pipe_config); 9206 else if (IS_VALLEYVIEW(dev_priv)) 9207 vlv_crtc_clock_get(crtc, pipe_config); 9208 else 9209 i9xx_crtc_clock_get(crtc, pipe_config); 9210 9211 /* 9212 * Normally the dotclock is filled in by the encoder .get_config() 9213 * but in case the pipe is enabled w/o any ports we need a sane 9214 * default. 9215 */ 9216 pipe_config->hw.adjusted_mode.crtc_clock = 9217 pipe_config->port_clock / pipe_config->pixel_multiplier; 9218 9219 ret = true; 9220 9221 out: 9222 intel_display_power_put(dev_priv, power_domain, wakeref); 9223 9224 return ret; 9225 } 9226 9227 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv) 9228 { 9229 struct intel_encoder *encoder; 9230 int i; 9231 u32 val, final; 9232 bool has_lvds = false; 9233 bool has_cpu_edp = false; 9234 bool has_panel = false; 9235 bool has_ck505 = false; 9236 bool can_ssc = false; 9237 bool using_ssc_source = false; 9238 9239 /* We need to take the global config into account */ 9240 for_each_intel_encoder(&dev_priv->drm, encoder) { 9241 switch (encoder->type) { 9242 case INTEL_OUTPUT_LVDS: 9243 has_panel = true; 9244 has_lvds = true; 9245 break; 9246 case INTEL_OUTPUT_EDP: 9247 has_panel = true; 9248 if (encoder->port == PORT_A) 9249 has_cpu_edp = true; 9250 break; 9251 default: 9252 break; 9253 } 9254 } 9255 9256 if (HAS_PCH_IBX(dev_priv)) { 9257 has_ck505 = dev_priv->vbt.display_clock_mode; 9258 can_ssc = has_ck505; 9259 } else { 9260 has_ck505 = false; 9261 can_ssc = true; 9262 } 9263 9264 /* Check if any DPLLs are using the SSC source */ 9265 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 9266 u32 temp = I915_READ(PCH_DPLL(i)); 9267 9268 if (!(temp & DPLL_VCO_ENABLE)) 9269 continue; 9270 9271 if ((temp & PLL_REF_INPUT_MASK) == 9272 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 9273 using_ssc_source = true; 9274 break; 9275 } 9276 } 9277 9278 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 9279 has_panel, has_lvds, has_ck505, using_ssc_source); 9280 9281 /* Ironlake: try to setup display ref clock before DPLL 9282 * enabling. This is only under driver's control after 9283 * PCH B stepping, previous chipset stepping should be 9284 * ignoring this setting. 9285 */ 9286 val = I915_READ(PCH_DREF_CONTROL); 9287 9288 /* As we must carefully and slowly disable/enable each source in turn, 9289 * compute the final state we want first and check if we need to 9290 * make any changes at all. 9291 */ 9292 final = val; 9293 final &= ~DREF_NONSPREAD_SOURCE_MASK; 9294 if (has_ck505) 9295 final |= DREF_NONSPREAD_CK505_ENABLE; 9296 else 9297 final |= DREF_NONSPREAD_SOURCE_ENABLE; 9298 9299 final &= ~DREF_SSC_SOURCE_MASK; 9300 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9301 final &= ~DREF_SSC1_ENABLE; 9302 9303 if (has_panel) { 9304 final |= DREF_SSC_SOURCE_ENABLE; 9305 9306 if (intel_panel_use_ssc(dev_priv) && can_ssc) 9307 final |= DREF_SSC1_ENABLE; 9308 9309 if (has_cpu_edp) { 9310 if (intel_panel_use_ssc(dev_priv) && can_ssc) 9311 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 9312 else 9313 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 9314 } else 9315 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9316 } else if (using_ssc_source) { 9317 final |= DREF_SSC_SOURCE_ENABLE; 9318 final |= DREF_SSC1_ENABLE; 9319 } 9320 9321 if (final == val) 9322 return; 9323 9324 /* Always enable nonspread source */ 9325 val &= ~DREF_NONSPREAD_SOURCE_MASK; 9326 9327 if (has_ck505) 9328 val |= DREF_NONSPREAD_CK505_ENABLE; 9329 else 9330 val |= DREF_NONSPREAD_SOURCE_ENABLE; 9331 9332 if (has_panel) { 9333 val &= ~DREF_SSC_SOURCE_MASK; 9334 val |= DREF_SSC_SOURCE_ENABLE; 9335 9336 /* SSC must be turned on before enabling the CPU output */ 9337 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 9338 DRM_DEBUG_KMS("Using SSC on panel\n"); 9339 val |= DREF_SSC1_ENABLE; 9340 } else 9341 val &= ~DREF_SSC1_ENABLE; 9342 9343 /* Get SSC going before enabling the outputs */ 9344 I915_WRITE(PCH_DREF_CONTROL, val); 9345 POSTING_READ(PCH_DREF_CONTROL); 9346 udelay(200); 9347 9348 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9349 9350 /* Enable CPU source on CPU attached eDP */ 9351 if (has_cpu_edp) { 9352 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 9353 DRM_DEBUG_KMS("Using SSC on eDP\n"); 9354 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 9355 } else 9356 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 9357 } else 9358 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9359 9360 I915_WRITE(PCH_DREF_CONTROL, val); 9361 POSTING_READ(PCH_DREF_CONTROL); 9362 udelay(200); 9363 } else { 9364 DRM_DEBUG_KMS("Disabling CPU source output\n"); 9365 9366 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9367 9368 /* Turn off CPU output */ 9369 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9370 9371 I915_WRITE(PCH_DREF_CONTROL, val); 9372 POSTING_READ(PCH_DREF_CONTROL); 9373 udelay(200); 9374 9375 if (!using_ssc_source) { 9376 DRM_DEBUG_KMS("Disabling SSC source\n"); 9377 9378 /* Turn off the SSC source */ 9379 val &= ~DREF_SSC_SOURCE_MASK; 9380 val |= DREF_SSC_SOURCE_DISABLE; 9381 9382 /* Turn off SSC1 */ 9383 val &= ~DREF_SSC1_ENABLE; 9384 9385 I915_WRITE(PCH_DREF_CONTROL, val); 9386 POSTING_READ(PCH_DREF_CONTROL); 9387 udelay(200); 9388 } 9389 } 9390 9391 BUG_ON(val != final); 9392 } 9393 9394 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 9395 { 9396 u32 tmp; 9397 9398 tmp = I915_READ(SOUTH_CHICKEN2); 9399 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 9400 I915_WRITE(SOUTH_CHICKEN2, tmp); 9401 9402 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) & 9403 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 9404 DRM_ERROR("FDI mPHY reset assert timeout\n"); 9405 9406 tmp = I915_READ(SOUTH_CHICKEN2); 9407 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 9408 I915_WRITE(SOUTH_CHICKEN2, tmp); 9409 9410 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) & 9411 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 9412 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 9413 } 9414 9415 /* WaMPhyProgramming:hsw */ 9416 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 9417 { 9418 u32 tmp; 9419 9420 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 9421 tmp &= ~(0xFF << 24); 9422 tmp |= (0x12 << 24); 9423 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 9424 9425 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 9426 tmp |= (1 << 11); 9427 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 9428 9429 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 9430 tmp |= (1 << 11); 9431 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 9432 9433 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 9434 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 9435 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 9436 9437 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 9438 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 9439 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 9440 9441 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 9442 tmp &= ~(7 << 13); 9443 tmp |= (5 << 13); 9444 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 9445 9446 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 9447 tmp &= ~(7 << 13); 9448 tmp |= (5 << 13); 9449 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 9450 9451 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 9452 tmp &= ~0xFF; 9453 tmp |= 0x1C; 9454 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 9455 9456 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 9457 tmp &= ~0xFF; 9458 tmp |= 0x1C; 9459 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 9460 9461 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 9462 tmp &= ~(0xFF << 16); 9463 tmp |= (0x1C << 16); 9464 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 9465 9466 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 9467 tmp &= ~(0xFF << 16); 9468 tmp |= (0x1C << 16); 9469 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 9470 9471 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 9472 tmp |= (1 << 27); 9473 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 9474 9475 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 9476 tmp |= (1 << 27); 9477 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 9478 9479 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 9480 tmp &= ~(0xF << 28); 9481 tmp |= (4 << 28); 9482 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 9483 9484 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 9485 tmp &= ~(0xF << 28); 9486 tmp |= (4 << 28); 9487 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 9488 } 9489 9490 /* Implements 3 different sequences from BSpec chapter "Display iCLK 9491 * Programming" based on the parameters passed: 9492 * - Sequence to enable CLKOUT_DP 9493 * - Sequence to enable CLKOUT_DP without spread 9494 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 9495 */ 9496 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, 9497 bool with_spread, bool with_fdi) 9498 { 9499 u32 reg, tmp; 9500 9501 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 9502 with_spread = true; 9503 if (WARN(HAS_PCH_LPT_LP(dev_priv) && 9504 with_fdi, "LP PCH doesn't have FDI\n")) 9505 with_fdi = false; 9506 9507 mutex_lock(&dev_priv->sb_lock); 9508 9509 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9510 tmp &= ~SBI_SSCCTL_DISABLE; 9511 tmp |= SBI_SSCCTL_PATHALT; 9512 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9513 9514 udelay(24); 9515 9516 if (with_spread) { 9517 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9518 tmp &= ~SBI_SSCCTL_PATHALT; 9519 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9520 9521 if (with_fdi) { 9522 lpt_reset_fdi_mphy(dev_priv); 9523 lpt_program_fdi_mphy(dev_priv); 9524 } 9525 } 9526 9527 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 9528 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 9529 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 9530 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 9531 9532 mutex_unlock(&dev_priv->sb_lock); 9533 } 9534 9535 /* Sequence to disable CLKOUT_DP */ 9536 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) 9537 { 9538 u32 reg, tmp; 9539 9540 mutex_lock(&dev_priv->sb_lock); 9541 9542 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 9543 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 9544 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 9545 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 9546 9547 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9548 if (!(tmp & SBI_SSCCTL_DISABLE)) { 9549 if (!(tmp & SBI_SSCCTL_PATHALT)) { 9550 tmp |= SBI_SSCCTL_PATHALT; 9551 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9552 udelay(32); 9553 } 9554 tmp |= SBI_SSCCTL_DISABLE; 9555 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9556 } 9557 9558 mutex_unlock(&dev_priv->sb_lock); 9559 } 9560 9561 #define BEND_IDX(steps) ((50 + (steps)) / 5) 9562 9563 static const u16 sscdivintphase[] = { 9564 [BEND_IDX( 50)] = 0x3B23, 9565 [BEND_IDX( 45)] = 0x3B23, 9566 [BEND_IDX( 40)] = 0x3C23, 9567 [BEND_IDX( 35)] = 0x3C23, 9568 [BEND_IDX( 30)] = 0x3D23, 9569 [BEND_IDX( 25)] = 0x3D23, 9570 [BEND_IDX( 20)] = 0x3E23, 9571 [BEND_IDX( 15)] = 0x3E23, 9572 [BEND_IDX( 10)] = 0x3F23, 9573 [BEND_IDX( 5)] = 0x3F23, 9574 [BEND_IDX( 0)] = 0x0025, 9575 [BEND_IDX( -5)] = 0x0025, 9576 [BEND_IDX(-10)] = 0x0125, 9577 [BEND_IDX(-15)] = 0x0125, 9578 [BEND_IDX(-20)] = 0x0225, 9579 [BEND_IDX(-25)] = 0x0225, 9580 [BEND_IDX(-30)] = 0x0325, 9581 [BEND_IDX(-35)] = 0x0325, 9582 [BEND_IDX(-40)] = 0x0425, 9583 [BEND_IDX(-45)] = 0x0425, 9584 [BEND_IDX(-50)] = 0x0525, 9585 }; 9586 9587 /* 9588 * Bend CLKOUT_DP 9589 * steps -50 to 50 inclusive, in steps of 5 9590 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 9591 * change in clock period = -(steps / 10) * 5.787 ps 9592 */ 9593 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 9594 { 9595 u32 tmp; 9596 int idx = BEND_IDX(steps); 9597 9598 if (WARN_ON(steps % 5 != 0)) 9599 return; 9600 9601 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase))) 9602 return; 9603 9604 mutex_lock(&dev_priv->sb_lock); 9605 9606 if (steps % 10 != 0) 9607 tmp = 0xAAAAAAAB; 9608 else 9609 tmp = 0x00000000; 9610 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 9611 9612 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 9613 tmp &= 0xffff0000; 9614 tmp |= sscdivintphase[idx]; 9615 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 9616 9617 mutex_unlock(&dev_priv->sb_lock); 9618 } 9619 9620 #undef BEND_IDX 9621 9622 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) 9623 { 9624 u32 fuse_strap = I915_READ(FUSE_STRAP); 9625 u32 ctl = I915_READ(SPLL_CTL); 9626 9627 if ((ctl & SPLL_PLL_ENABLE) == 0) 9628 return false; 9629 9630 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && 9631 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 9632 return true; 9633 9634 if (IS_BROADWELL(dev_priv) && 9635 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) 9636 return true; 9637 9638 return false; 9639 } 9640 9641 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, 9642 enum intel_dpll_id id) 9643 { 9644 u32 fuse_strap = I915_READ(FUSE_STRAP); 9645 u32 ctl = I915_READ(WRPLL_CTL(id)); 9646 9647 if ((ctl & WRPLL_PLL_ENABLE) == 0) 9648 return false; 9649 9650 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) 9651 return true; 9652 9653 if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && 9654 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && 9655 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 9656 return true; 9657 9658 return false; 9659 } 9660 9661 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) 9662 { 9663 struct intel_encoder *encoder; 9664 bool has_fdi = false; 9665 9666 for_each_intel_encoder(&dev_priv->drm, encoder) { 9667 switch (encoder->type) { 9668 case INTEL_OUTPUT_ANALOG: 9669 has_fdi = true; 9670 break; 9671 default: 9672 break; 9673 } 9674 } 9675 9676 /* 9677 * The BIOS may have decided to use the PCH SSC 9678 * reference so we must not disable it until the 9679 * relevant PLLs have stopped relying on it. We'll 9680 * just leave the PCH SSC reference enabled in case 9681 * any active PLL is using it. It will get disabled 9682 * after runtime suspend if we don't have FDI. 9683 * 9684 * TODO: Move the whole reference clock handling 9685 * to the modeset sequence proper so that we can 9686 * actually enable/disable/reconfigure these things 9687 * safely. To do that we need to introduce a real 9688 * clock hierarchy. That would also allow us to do 9689 * clock bending finally. 9690 */ 9691 dev_priv->pch_ssc_use = 0; 9692 9693 if (spll_uses_pch_ssc(dev_priv)) { 9694 DRM_DEBUG_KMS("SPLL using PCH SSC\n"); 9695 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL); 9696 } 9697 9698 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { 9699 DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n"); 9700 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1); 9701 } 9702 9703 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { 9704 DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n"); 9705 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2); 9706 } 9707 9708 if (dev_priv->pch_ssc_use) 9709 return; 9710 9711 if (has_fdi) { 9712 lpt_bend_clkout_dp(dev_priv, 0); 9713 lpt_enable_clkout_dp(dev_priv, true, true); 9714 } else { 9715 lpt_disable_clkout_dp(dev_priv); 9716 } 9717 } 9718 9719 /* 9720 * Initialize reference clocks when the driver loads 9721 */ 9722 void intel_init_pch_refclk(struct drm_i915_private *dev_priv) 9723 { 9724 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 9725 ironlake_init_pch_refclk(dev_priv); 9726 else if (HAS_PCH_LPT(dev_priv)) 9727 lpt_init_pch_refclk(dev_priv); 9728 } 9729 9730 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state) 9731 { 9732 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 9733 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9734 enum pipe pipe = crtc->pipe; 9735 u32 val; 9736 9737 val = 0; 9738 9739 switch (crtc_state->pipe_bpp) { 9740 case 18: 9741 val |= PIPECONF_6BPC; 9742 break; 9743 case 24: 9744 val |= PIPECONF_8BPC; 9745 break; 9746 case 30: 9747 val |= PIPECONF_10BPC; 9748 break; 9749 case 36: 9750 val |= PIPECONF_12BPC; 9751 break; 9752 default: 9753 /* Case prevented by intel_choose_pipe_bpp_dither. */ 9754 BUG(); 9755 } 9756 9757 if (crtc_state->dither) 9758 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 9759 9760 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 9761 val |= PIPECONF_INTERLACED_ILK; 9762 else 9763 val |= PIPECONF_PROGRESSIVE; 9764 9765 /* 9766 * This would end up with an odd purple hue over 9767 * the entire display. Make sure we don't do it. 9768 */ 9769 WARN_ON(crtc_state->limited_color_range && 9770 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); 9771 9772 if (crtc_state->limited_color_range) 9773 val |= PIPECONF_COLOR_RANGE_SELECT; 9774 9775 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 9776 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709; 9777 9778 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 9779 9780 val |= PIPECONF_FRAME_START_DELAY(0); 9781 9782 I915_WRITE(PIPECONF(pipe), val); 9783 POSTING_READ(PIPECONF(pipe)); 9784 } 9785 9786 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state) 9787 { 9788 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 9789 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9790 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 9791 u32 val = 0; 9792 9793 if (IS_HASWELL(dev_priv) && crtc_state->dither) 9794 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 9795 9796 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 9797 val |= PIPECONF_INTERLACED_ILK; 9798 else 9799 val |= PIPECONF_PROGRESSIVE; 9800 9801 if (IS_HASWELL(dev_priv) && 9802 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 9803 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW; 9804 9805 I915_WRITE(PIPECONF(cpu_transcoder), val); 9806 POSTING_READ(PIPECONF(cpu_transcoder)); 9807 } 9808 9809 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) 9810 { 9811 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 9812 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9813 u32 val = 0; 9814 9815 switch (crtc_state->pipe_bpp) { 9816 case 18: 9817 val |= PIPEMISC_DITHER_6_BPC; 9818 break; 9819 case 24: 9820 val |= PIPEMISC_DITHER_8_BPC; 9821 break; 9822 case 30: 9823 val |= PIPEMISC_DITHER_10_BPC; 9824 break; 9825 case 36: 9826 val |= PIPEMISC_DITHER_12_BPC; 9827 break; 9828 default: 9829 MISSING_CASE(crtc_state->pipe_bpp); 9830 break; 9831 } 9832 9833 if (crtc_state->dither) 9834 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 9835 9836 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 9837 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 9838 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV; 9839 9840 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 9841 val |= PIPEMISC_YUV420_ENABLE | 9842 PIPEMISC_YUV420_MODE_FULL_BLEND; 9843 9844 if (INTEL_GEN(dev_priv) >= 11 && 9845 (crtc_state->active_planes & ~(icl_hdr_plane_mask() | 9846 BIT(PLANE_CURSOR))) == 0) 9847 val |= PIPEMISC_HDR_MODE_PRECISION; 9848 9849 I915_WRITE(PIPEMISC(crtc->pipe), val); 9850 } 9851 9852 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) 9853 { 9854 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9855 u32 tmp; 9856 9857 tmp = I915_READ(PIPEMISC(crtc->pipe)); 9858 9859 switch (tmp & PIPEMISC_DITHER_BPC_MASK) { 9860 case PIPEMISC_DITHER_6_BPC: 9861 return 18; 9862 case PIPEMISC_DITHER_8_BPC: 9863 return 24; 9864 case PIPEMISC_DITHER_10_BPC: 9865 return 30; 9866 case PIPEMISC_DITHER_12_BPC: 9867 return 36; 9868 default: 9869 MISSING_CASE(tmp); 9870 return 0; 9871 } 9872 } 9873 9874 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 9875 { 9876 /* 9877 * Account for spread spectrum to avoid 9878 * oversubscribing the link. Max center spread 9879 * is 2.5%; use 5% for safety's sake. 9880 */ 9881 u32 bps = target_clock * bpp * 21 / 20; 9882 return DIV_ROUND_UP(bps, link_bw * 8); 9883 } 9884 9885 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) 9886 { 9887 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 9888 } 9889 9890 static void ironlake_compute_dpll(struct intel_crtc *crtc, 9891 struct intel_crtc_state *crtc_state, 9892 struct dpll *reduced_clock) 9893 { 9894 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9895 u32 dpll, fp, fp2; 9896 int factor; 9897 9898 /* Enable autotuning of the PLL clock (if permissible) */ 9899 factor = 21; 9900 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9901 if ((intel_panel_use_ssc(dev_priv) && 9902 dev_priv->vbt.lvds_ssc_freq == 100000) || 9903 (HAS_PCH_IBX(dev_priv) && 9904 intel_is_dual_link_lvds(dev_priv))) 9905 factor = 25; 9906 } else if (crtc_state->sdvo_tv_clock) { 9907 factor = 20; 9908 } 9909 9910 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 9911 9912 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor)) 9913 fp |= FP_CB_TUNE; 9914 9915 if (reduced_clock) { 9916 fp2 = i9xx_dpll_compute_fp(reduced_clock); 9917 9918 if (reduced_clock->m < factor * reduced_clock->n) 9919 fp2 |= FP_CB_TUNE; 9920 } else { 9921 fp2 = fp; 9922 } 9923 9924 dpll = 0; 9925 9926 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 9927 dpll |= DPLLB_MODE_LVDS; 9928 else 9929 dpll |= DPLLB_MODE_DAC_SERIAL; 9930 9931 dpll |= (crtc_state->pixel_multiplier - 1) 9932 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 9933 9934 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 9935 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 9936 dpll |= DPLL_SDVO_HIGH_SPEED; 9937 9938 if (intel_crtc_has_dp_encoder(crtc_state)) 9939 dpll |= DPLL_SDVO_HIGH_SPEED; 9940 9941 /* 9942 * The high speed IO clock is only really required for 9943 * SDVO/HDMI/DP, but we also enable it for CRT to make it 9944 * possible to share the DPLL between CRT and HDMI. Enabling 9945 * the clock needlessly does no real harm, except use up a 9946 * bit of power potentially. 9947 * 9948 * We'll limit this to IVB with 3 pipes, since it has only two 9949 * DPLLs and so DPLL sharing is the only way to get three pipes 9950 * driving PCH ports at the same time. On SNB we could do this, 9951 * and potentially avoid enabling the second DPLL, but it's not 9952 * clear if it''s a win or loss power wise. No point in doing 9953 * this on ILK at all since it has a fixed DPLL<->pipe mapping. 9954 */ 9955 if (INTEL_NUM_PIPES(dev_priv) == 3 && 9956 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) 9957 dpll |= DPLL_SDVO_HIGH_SPEED; 9958 9959 /* compute bitmask from p1 value */ 9960 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 9961 /* also FPA1 */ 9962 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 9963 9964 switch (crtc_state->dpll.p2) { 9965 case 5: 9966 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 9967 break; 9968 case 7: 9969 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 9970 break; 9971 case 10: 9972 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 9973 break; 9974 case 14: 9975 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 9976 break; 9977 } 9978 9979 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 9980 intel_panel_use_ssc(dev_priv)) 9981 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 9982 else 9983 dpll |= PLL_REF_INPUT_DREFCLK; 9984 9985 dpll |= DPLL_VCO_ENABLE; 9986 9987 crtc_state->dpll_hw_state.dpll = dpll; 9988 crtc_state->dpll_hw_state.fp0 = fp; 9989 crtc_state->dpll_hw_state.fp1 = fp2; 9990 } 9991 9992 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, 9993 struct intel_crtc_state *crtc_state) 9994 { 9995 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9996 struct intel_atomic_state *state = 9997 to_intel_atomic_state(crtc_state->uapi.state); 9998 const struct intel_limit *limit; 9999 int refclk = 120000; 10000 10001 memset(&crtc_state->dpll_hw_state, 0, 10002 sizeof(crtc_state->dpll_hw_state)); 10003 10004 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 10005 if (!crtc_state->has_pch_encoder) 10006 return 0; 10007 10008 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 10009 if (intel_panel_use_ssc(dev_priv)) { 10010 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 10011 dev_priv->vbt.lvds_ssc_freq); 10012 refclk = dev_priv->vbt.lvds_ssc_freq; 10013 } 10014 10015 if (intel_is_dual_link_lvds(dev_priv)) { 10016 if (refclk == 100000) 10017 limit = &intel_limits_ironlake_dual_lvds_100m; 10018 else 10019 limit = &intel_limits_ironlake_dual_lvds; 10020 } else { 10021 if (refclk == 100000) 10022 limit = &intel_limits_ironlake_single_lvds_100m; 10023 else 10024 limit = &intel_limits_ironlake_single_lvds; 10025 } 10026 } else { 10027 limit = &intel_limits_ironlake_dac; 10028 } 10029 10030 if (!crtc_state->clock_set && 10031 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 10032 refclk, NULL, &crtc_state->dpll)) { 10033 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 10034 return -EINVAL; 10035 } 10036 10037 ironlake_compute_dpll(crtc, crtc_state, NULL); 10038 10039 if (!intel_reserve_shared_dplls(state, crtc, NULL)) { 10040 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", 10041 pipe_name(crtc->pipe)); 10042 return -EINVAL; 10043 } 10044 10045 return 0; 10046 } 10047 10048 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 10049 struct intel_link_m_n *m_n) 10050 { 10051 struct drm_device *dev = crtc->base.dev; 10052 struct drm_i915_private *dev_priv = to_i915(dev); 10053 enum pipe pipe = crtc->pipe; 10054 10055 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 10056 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); 10057 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) 10058 & ~TU_SIZE_MASK; 10059 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); 10060 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) 10061 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 10062 } 10063 10064 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 10065 enum transcoder transcoder, 10066 struct intel_link_m_n *m_n, 10067 struct intel_link_m_n *m2_n2) 10068 { 10069 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10070 enum pipe pipe = crtc->pipe; 10071 10072 if (INTEL_GEN(dev_priv) >= 5) { 10073 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); 10074 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); 10075 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 10076 & ~TU_SIZE_MASK; 10077 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 10078 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 10079 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 10080 10081 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) { 10082 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 10083 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 10084 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 10085 & ~TU_SIZE_MASK; 10086 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); 10087 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) 10088 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 10089 } 10090 } else { 10091 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 10092 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 10093 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) 10094 & ~TU_SIZE_MASK; 10095 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); 10096 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) 10097 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 10098 } 10099 } 10100 10101 void intel_dp_get_m_n(struct intel_crtc *crtc, 10102 struct intel_crtc_state *pipe_config) 10103 { 10104 if (pipe_config->has_pch_encoder) 10105 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 10106 else 10107 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 10108 &pipe_config->dp_m_n, 10109 &pipe_config->dp_m2_n2); 10110 } 10111 10112 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 10113 struct intel_crtc_state *pipe_config) 10114 { 10115 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 10116 &pipe_config->fdi_m_n, NULL); 10117 } 10118 10119 static void skylake_get_pfit_config(struct intel_crtc *crtc, 10120 struct intel_crtc_state *pipe_config) 10121 { 10122 struct drm_device *dev = crtc->base.dev; 10123 struct drm_i915_private *dev_priv = to_i915(dev); 10124 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; 10125 u32 ps_ctrl = 0; 10126 int id = -1; 10127 int i; 10128 10129 /* find scaler attached to this pipe */ 10130 for (i = 0; i < crtc->num_scalers; i++) { 10131 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i)); 10132 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { 10133 id = i; 10134 pipe_config->pch_pfit.enabled = true; 10135 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); 10136 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i)); 10137 scaler_state->scalers[i].in_use = true; 10138 break; 10139 } 10140 } 10141 10142 scaler_state->scaler_id = id; 10143 if (id >= 0) { 10144 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 10145 } else { 10146 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 10147 } 10148 } 10149 10150 static void 10151 skylake_get_initial_plane_config(struct intel_crtc *crtc, 10152 struct intel_initial_plane_config *plane_config) 10153 { 10154 struct drm_device *dev = crtc->base.dev; 10155 struct drm_i915_private *dev_priv = to_i915(dev); 10156 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 10157 enum plane_id plane_id = plane->id; 10158 enum pipe pipe; 10159 u32 val, base, offset, stride_mult, tiling, alpha; 10160 int fourcc, pixel_format; 10161 unsigned int aligned_height; 10162 struct drm_framebuffer *fb; 10163 struct intel_framebuffer *intel_fb; 10164 10165 if (!plane->get_hw_state(plane, &pipe)) 10166 return; 10167 10168 WARN_ON(pipe != crtc->pipe); 10169 10170 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 10171 if (!intel_fb) { 10172 DRM_DEBUG_KMS("failed to alloc fb\n"); 10173 return; 10174 } 10175 10176 fb = &intel_fb->base; 10177 10178 fb->dev = dev; 10179 10180 val = I915_READ(PLANE_CTL(pipe, plane_id)); 10181 10182 if (INTEL_GEN(dev_priv) >= 11) 10183 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK; 10184 else 10185 pixel_format = val & PLANE_CTL_FORMAT_MASK; 10186 10187 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { 10188 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id)); 10189 alpha &= PLANE_COLOR_ALPHA_MASK; 10190 } else { 10191 alpha = val & PLANE_CTL_ALPHA_MASK; 10192 } 10193 10194 fourcc = skl_format_to_fourcc(pixel_format, 10195 val & PLANE_CTL_ORDER_RGBX, alpha); 10196 fb->format = drm_format_info(fourcc); 10197 10198 tiling = val & PLANE_CTL_TILED_MASK; 10199 switch (tiling) { 10200 case PLANE_CTL_TILED_LINEAR: 10201 fb->modifier = DRM_FORMAT_MOD_LINEAR; 10202 break; 10203 case PLANE_CTL_TILED_X: 10204 plane_config->tiling = I915_TILING_X; 10205 fb->modifier = I915_FORMAT_MOD_X_TILED; 10206 break; 10207 case PLANE_CTL_TILED_Y: 10208 plane_config->tiling = I915_TILING_Y; 10209 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) 10210 fb->modifier = INTEL_GEN(dev_priv) >= 12 ? 10211 I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS : 10212 I915_FORMAT_MOD_Y_TILED_CCS; 10213 else 10214 fb->modifier = I915_FORMAT_MOD_Y_TILED; 10215 break; 10216 case PLANE_CTL_TILED_YF: 10217 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) 10218 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS; 10219 else 10220 fb->modifier = I915_FORMAT_MOD_Yf_TILED; 10221 break; 10222 default: 10223 MISSING_CASE(tiling); 10224 goto error; 10225 } 10226 10227 /* 10228 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 10229 * while i915 HW rotation is clockwise, thats why this swapping. 10230 */ 10231 switch (val & PLANE_CTL_ROTATE_MASK) { 10232 case PLANE_CTL_ROTATE_0: 10233 plane_config->rotation = DRM_MODE_ROTATE_0; 10234 break; 10235 case PLANE_CTL_ROTATE_90: 10236 plane_config->rotation = DRM_MODE_ROTATE_270; 10237 break; 10238 case PLANE_CTL_ROTATE_180: 10239 plane_config->rotation = DRM_MODE_ROTATE_180; 10240 break; 10241 case PLANE_CTL_ROTATE_270: 10242 plane_config->rotation = DRM_MODE_ROTATE_90; 10243 break; 10244 } 10245 10246 if (INTEL_GEN(dev_priv) >= 10 && 10247 val & PLANE_CTL_FLIP_HORIZONTAL) 10248 plane_config->rotation |= DRM_MODE_REFLECT_X; 10249 10250 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000; 10251 plane_config->base = base; 10252 10253 offset = I915_READ(PLANE_OFFSET(pipe, plane_id)); 10254 10255 val = I915_READ(PLANE_SIZE(pipe, plane_id)); 10256 fb->height = ((val >> 16) & 0xffff) + 1; 10257 fb->width = ((val >> 0) & 0xffff) + 1; 10258 10259 val = I915_READ(PLANE_STRIDE(pipe, plane_id)); 10260 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); 10261 fb->pitches[0] = (val & 0x3ff) * stride_mult; 10262 10263 aligned_height = intel_fb_align_height(fb, 0, fb->height); 10264 10265 plane_config->size = fb->pitches[0] * aligned_height; 10266 10267 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 10268 crtc->base.name, plane->base.name, fb->width, fb->height, 10269 fb->format->cpp[0] * 8, base, fb->pitches[0], 10270 plane_config->size); 10271 10272 plane_config->fb = intel_fb; 10273 return; 10274 10275 error: 10276 kfree(intel_fb); 10277 } 10278 10279 static void ironlake_get_pfit_config(struct intel_crtc *crtc, 10280 struct intel_crtc_state *pipe_config) 10281 { 10282 struct drm_device *dev = crtc->base.dev; 10283 struct drm_i915_private *dev_priv = to_i915(dev); 10284 u32 tmp; 10285 10286 tmp = I915_READ(PF_CTL(crtc->pipe)); 10287 10288 if (tmp & PF_ENABLE) { 10289 pipe_config->pch_pfit.enabled = true; 10290 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 10291 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 10292 10293 /* We currently do not free assignements of panel fitters on 10294 * ivb/hsw (since we don't use the higher upscaling modes which 10295 * differentiates them) so just WARN about this case for now. */ 10296 if (IS_GEN(dev_priv, 7)) { 10297 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 10298 PF_PIPE_SEL_IVB(crtc->pipe)); 10299 } 10300 } 10301 } 10302 10303 static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 10304 struct intel_crtc_state *pipe_config) 10305 { 10306 struct drm_device *dev = crtc->base.dev; 10307 struct drm_i915_private *dev_priv = to_i915(dev); 10308 enum intel_display_power_domain power_domain; 10309 intel_wakeref_t wakeref; 10310 u32 tmp; 10311 bool ret; 10312 10313 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 10314 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 10315 if (!wakeref) 10316 return false; 10317 10318 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 10319 pipe_config->shared_dpll = NULL; 10320 pipe_config->master_transcoder = INVALID_TRANSCODER; 10321 10322 ret = false; 10323 tmp = I915_READ(PIPECONF(crtc->pipe)); 10324 if (!(tmp & PIPECONF_ENABLE)) 10325 goto out; 10326 10327 switch (tmp & PIPECONF_BPC_MASK) { 10328 case PIPECONF_6BPC: 10329 pipe_config->pipe_bpp = 18; 10330 break; 10331 case PIPECONF_8BPC: 10332 pipe_config->pipe_bpp = 24; 10333 break; 10334 case PIPECONF_10BPC: 10335 pipe_config->pipe_bpp = 30; 10336 break; 10337 case PIPECONF_12BPC: 10338 pipe_config->pipe_bpp = 36; 10339 break; 10340 default: 10341 break; 10342 } 10343 10344 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 10345 pipe_config->limited_color_range = true; 10346 10347 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) { 10348 case PIPECONF_OUTPUT_COLORSPACE_YUV601: 10349 case PIPECONF_OUTPUT_COLORSPACE_YUV709: 10350 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 10351 break; 10352 default: 10353 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 10354 break; 10355 } 10356 10357 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >> 10358 PIPECONF_GAMMA_MODE_SHIFT; 10359 10360 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe)); 10361 10362 i9xx_get_pipe_color_config(pipe_config); 10363 intel_color_get_config(pipe_config); 10364 10365 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 10366 struct intel_shared_dpll *pll; 10367 enum intel_dpll_id pll_id; 10368 10369 pipe_config->has_pch_encoder = true; 10370 10371 tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); 10372 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 10373 FDI_DP_PORT_WIDTH_SHIFT) + 1; 10374 10375 ironlake_get_fdi_m_n_config(crtc, pipe_config); 10376 10377 if (HAS_PCH_IBX(dev_priv)) { 10378 /* 10379 * The pipe->pch transcoder and pch transcoder->pll 10380 * mapping is fixed. 10381 */ 10382 pll_id = (enum intel_dpll_id) crtc->pipe; 10383 } else { 10384 tmp = I915_READ(PCH_DPLL_SEL); 10385 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 10386 pll_id = DPLL_ID_PCH_PLL_B; 10387 else 10388 pll_id= DPLL_ID_PCH_PLL_A; 10389 } 10390 10391 pipe_config->shared_dpll = 10392 intel_get_shared_dpll_by_id(dev_priv, pll_id); 10393 pll = pipe_config->shared_dpll; 10394 10395 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll, 10396 &pipe_config->dpll_hw_state)); 10397 10398 tmp = pipe_config->dpll_hw_state.dpll; 10399 pipe_config->pixel_multiplier = 10400 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 10401 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 10402 10403 ironlake_pch_clock_get(crtc, pipe_config); 10404 } else { 10405 pipe_config->pixel_multiplier = 1; 10406 } 10407 10408 intel_get_pipe_timings(crtc, pipe_config); 10409 intel_get_pipe_src_size(crtc, pipe_config); 10410 10411 ironlake_get_pfit_config(crtc, pipe_config); 10412 10413 ret = true; 10414 10415 out: 10416 intel_display_power_put(dev_priv, power_domain, wakeref); 10417 10418 return ret; 10419 } 10420 static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 10421 struct intel_crtc_state *crtc_state) 10422 { 10423 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10424 struct intel_atomic_state *state = 10425 to_intel_atomic_state(crtc_state->uapi.state); 10426 10427 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) || 10428 INTEL_GEN(dev_priv) >= 11) { 10429 struct intel_encoder *encoder = 10430 intel_get_crtc_new_encoder(state, crtc_state); 10431 10432 if (!intel_reserve_shared_dplls(state, crtc, encoder)) { 10433 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", 10434 pipe_name(crtc->pipe)); 10435 return -EINVAL; 10436 } 10437 } 10438 10439 return 0; 10440 } 10441 10442 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv, 10443 enum port port, 10444 struct intel_crtc_state *pipe_config) 10445 { 10446 enum intel_dpll_id id; 10447 u32 temp; 10448 10449 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); 10450 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); 10451 10452 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2)) 10453 return; 10454 10455 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10456 } 10457 10458 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv, 10459 enum port port, 10460 struct intel_crtc_state *pipe_config) 10461 { 10462 enum phy phy = intel_port_to_phy(dev_priv, port); 10463 enum icl_port_dpll_id port_dpll_id; 10464 enum intel_dpll_id id; 10465 u32 temp; 10466 10467 if (intel_phy_is_combo(dev_priv, phy)) { 10468 temp = I915_READ(ICL_DPCLKA_CFGCR0) & 10469 ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); 10470 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); 10471 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 10472 } else if (intel_phy_is_tc(dev_priv, phy)) { 10473 u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; 10474 10475 if (clk_sel == DDI_CLK_SEL_MG) { 10476 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, 10477 port)); 10478 port_dpll_id = ICL_PORT_DPLL_MG_PHY; 10479 } else { 10480 WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162); 10481 id = DPLL_ID_ICL_TBTPLL; 10482 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 10483 } 10484 } else { 10485 WARN(1, "Invalid port %x\n", port); 10486 return; 10487 } 10488 10489 pipe_config->icl_port_dplls[port_dpll_id].pll = 10490 intel_get_shared_dpll_by_id(dev_priv, id); 10491 10492 icl_set_active_port_dpll(pipe_config, port_dpll_id); 10493 } 10494 10495 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 10496 enum port port, 10497 struct intel_crtc_state *pipe_config) 10498 { 10499 enum intel_dpll_id id; 10500 10501 switch (port) { 10502 case PORT_A: 10503 id = DPLL_ID_SKL_DPLL0; 10504 break; 10505 case PORT_B: 10506 id = DPLL_ID_SKL_DPLL1; 10507 break; 10508 case PORT_C: 10509 id = DPLL_ID_SKL_DPLL2; 10510 break; 10511 default: 10512 DRM_ERROR("Incorrect port type\n"); 10513 return; 10514 } 10515 10516 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10517 } 10518 10519 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, 10520 enum port port, 10521 struct intel_crtc_state *pipe_config) 10522 { 10523 enum intel_dpll_id id; 10524 u32 temp; 10525 10526 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 10527 id = temp >> (port * 3 + 1); 10528 10529 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3)) 10530 return; 10531 10532 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10533 } 10534 10535 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, 10536 enum port port, 10537 struct intel_crtc_state *pipe_config) 10538 { 10539 enum intel_dpll_id id; 10540 u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 10541 10542 switch (ddi_pll_sel) { 10543 case PORT_CLK_SEL_WRPLL1: 10544 id = DPLL_ID_WRPLL1; 10545 break; 10546 case PORT_CLK_SEL_WRPLL2: 10547 id = DPLL_ID_WRPLL2; 10548 break; 10549 case PORT_CLK_SEL_SPLL: 10550 id = DPLL_ID_SPLL; 10551 break; 10552 case PORT_CLK_SEL_LCPLL_810: 10553 id = DPLL_ID_LCPLL_810; 10554 break; 10555 case PORT_CLK_SEL_LCPLL_1350: 10556 id = DPLL_ID_LCPLL_1350; 10557 break; 10558 case PORT_CLK_SEL_LCPLL_2700: 10559 id = DPLL_ID_LCPLL_2700; 10560 break; 10561 default: 10562 MISSING_CASE(ddi_pll_sel); 10563 /* fall through */ 10564 case PORT_CLK_SEL_NONE: 10565 return; 10566 } 10567 10568 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10569 } 10570 10571 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 10572 struct intel_crtc_state *pipe_config, 10573 u64 *power_domain_mask, 10574 intel_wakeref_t *wakerefs) 10575 { 10576 struct drm_device *dev = crtc->base.dev; 10577 struct drm_i915_private *dev_priv = to_i915(dev); 10578 enum intel_display_power_domain power_domain; 10579 unsigned long panel_transcoder_mask = 0; 10580 unsigned long enabled_panel_transcoders = 0; 10581 enum transcoder panel_transcoder; 10582 intel_wakeref_t wf; 10583 u32 tmp; 10584 10585 if (INTEL_GEN(dev_priv) >= 11) 10586 panel_transcoder_mask |= 10587 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 10588 10589 if (HAS_TRANSCODER_EDP(dev_priv)) 10590 panel_transcoder_mask |= BIT(TRANSCODER_EDP); 10591 10592 /* 10593 * The pipe->transcoder mapping is fixed with the exception of the eDP 10594 * and DSI transcoders handled below. 10595 */ 10596 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 10597 10598 /* 10599 * XXX: Do intel_display_power_get_if_enabled before reading this (for 10600 * consistency and less surprising code; it's in always on power). 10601 */ 10602 for_each_set_bit(panel_transcoder, 10603 &panel_transcoder_mask, 10604 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) { 10605 bool force_thru = false; 10606 enum pipe trans_pipe; 10607 10608 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder)); 10609 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 10610 continue; 10611 10612 /* 10613 * Log all enabled ones, only use the first one. 10614 * 10615 * FIXME: This won't work for two separate DSI displays. 10616 */ 10617 enabled_panel_transcoders |= BIT(panel_transcoder); 10618 if (enabled_panel_transcoders != BIT(panel_transcoder)) 10619 continue; 10620 10621 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 10622 default: 10623 WARN(1, "unknown pipe linked to transcoder %s\n", 10624 transcoder_name(panel_transcoder)); 10625 /* fall through */ 10626 case TRANS_DDI_EDP_INPUT_A_ONOFF: 10627 force_thru = true; 10628 /* fall through */ 10629 case TRANS_DDI_EDP_INPUT_A_ON: 10630 trans_pipe = PIPE_A; 10631 break; 10632 case TRANS_DDI_EDP_INPUT_B_ONOFF: 10633 trans_pipe = PIPE_B; 10634 break; 10635 case TRANS_DDI_EDP_INPUT_C_ONOFF: 10636 trans_pipe = PIPE_C; 10637 break; 10638 case TRANS_DDI_EDP_INPUT_D_ONOFF: 10639 trans_pipe = PIPE_D; 10640 break; 10641 } 10642 10643 if (trans_pipe == crtc->pipe) { 10644 pipe_config->cpu_transcoder = panel_transcoder; 10645 pipe_config->pch_pfit.force_thru = force_thru; 10646 } 10647 } 10648 10649 /* 10650 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1 10651 */ 10652 WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) && 10653 enabled_panel_transcoders != BIT(TRANSCODER_EDP)); 10654 10655 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); 10656 WARN_ON(*power_domain_mask & BIT_ULL(power_domain)); 10657 10658 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10659 if (!wf) 10660 return false; 10661 10662 wakerefs[power_domain] = wf; 10663 *power_domain_mask |= BIT_ULL(power_domain); 10664 10665 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 10666 10667 return tmp & PIPECONF_ENABLE; 10668 } 10669 10670 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 10671 struct intel_crtc_state *pipe_config, 10672 u64 *power_domain_mask, 10673 intel_wakeref_t *wakerefs) 10674 { 10675 struct drm_device *dev = crtc->base.dev; 10676 struct drm_i915_private *dev_priv = to_i915(dev); 10677 enum intel_display_power_domain power_domain; 10678 enum transcoder cpu_transcoder; 10679 intel_wakeref_t wf; 10680 enum port port; 10681 u32 tmp; 10682 10683 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 10684 if (port == PORT_A) 10685 cpu_transcoder = TRANSCODER_DSI_A; 10686 else 10687 cpu_transcoder = TRANSCODER_DSI_C; 10688 10689 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 10690 WARN_ON(*power_domain_mask & BIT_ULL(power_domain)); 10691 10692 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10693 if (!wf) 10694 continue; 10695 10696 wakerefs[power_domain] = wf; 10697 *power_domain_mask |= BIT_ULL(power_domain); 10698 10699 /* 10700 * The PLL needs to be enabled with a valid divider 10701 * configuration, otherwise accessing DSI registers will hang 10702 * the machine. See BSpec North Display Engine 10703 * registers/MIPI[BXT]. We can break out here early, since we 10704 * need the same DSI PLL to be enabled for both DSI ports. 10705 */ 10706 if (!bxt_dsi_pll_is_enabled(dev_priv)) 10707 break; 10708 10709 /* XXX: this works for video mode only */ 10710 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port)); 10711 if (!(tmp & DPI_ENABLE)) 10712 continue; 10713 10714 tmp = I915_READ(MIPI_CTRL(port)); 10715 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 10716 continue; 10717 10718 pipe_config->cpu_transcoder = cpu_transcoder; 10719 break; 10720 } 10721 10722 return transcoder_is_dsi(pipe_config->cpu_transcoder); 10723 } 10724 10725 static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 10726 struct intel_crtc_state *pipe_config) 10727 { 10728 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10729 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 10730 struct intel_shared_dpll *pll; 10731 enum port port; 10732 u32 tmp; 10733 10734 if (transcoder_is_dsi(cpu_transcoder)) { 10735 port = (cpu_transcoder == TRANSCODER_DSI_A) ? 10736 PORT_A : PORT_B; 10737 } else { 10738 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 10739 if (INTEL_GEN(dev_priv) >= 12) 10740 port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); 10741 else 10742 port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); 10743 } 10744 10745 if (INTEL_GEN(dev_priv) >= 11) 10746 icelake_get_ddi_pll(dev_priv, port, pipe_config); 10747 else if (IS_CANNONLAKE(dev_priv)) 10748 cannonlake_get_ddi_pll(dev_priv, port, pipe_config); 10749 else if (IS_GEN9_BC(dev_priv)) 10750 skylake_get_ddi_pll(dev_priv, port, pipe_config); 10751 else if (IS_GEN9_LP(dev_priv)) 10752 bxt_get_ddi_pll(dev_priv, port, pipe_config); 10753 else 10754 haswell_get_ddi_pll(dev_priv, port, pipe_config); 10755 10756 pll = pipe_config->shared_dpll; 10757 if (pll) { 10758 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll, 10759 &pipe_config->dpll_hw_state)); 10760 } 10761 10762 /* 10763 * Haswell has only FDI/PCH transcoder A. It is which is connected to 10764 * DDI E. So just check whether this pipe is wired to DDI E and whether 10765 * the PCH transcoder is on. 10766 */ 10767 if (INTEL_GEN(dev_priv) < 9 && 10768 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 10769 pipe_config->has_pch_encoder = true; 10770 10771 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 10772 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 10773 FDI_DP_PORT_WIDTH_SHIFT) + 1; 10774 10775 ironlake_get_fdi_m_n_config(crtc, pipe_config); 10776 } 10777 } 10778 10779 static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv, 10780 enum transcoder cpu_transcoder) 10781 { 10782 u32 trans_port_sync, master_select; 10783 10784 trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder)); 10785 10786 if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0) 10787 return INVALID_TRANSCODER; 10788 10789 master_select = trans_port_sync & 10790 PORT_SYNC_MODE_MASTER_SELECT_MASK; 10791 if (master_select == 0) 10792 return TRANSCODER_EDP; 10793 else 10794 return master_select - 1; 10795 } 10796 10797 static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state) 10798 { 10799 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 10800 u32 transcoders; 10801 enum transcoder cpu_transcoder; 10802 10803 crtc_state->master_transcoder = transcoder_master_readout(dev_priv, 10804 crtc_state->cpu_transcoder); 10805 10806 transcoders = BIT(TRANSCODER_A) | 10807 BIT(TRANSCODER_B) | 10808 BIT(TRANSCODER_C) | 10809 BIT(TRANSCODER_D); 10810 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { 10811 enum intel_display_power_domain power_domain; 10812 intel_wakeref_t trans_wakeref; 10813 10814 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 10815 trans_wakeref = intel_display_power_get_if_enabled(dev_priv, 10816 power_domain); 10817 10818 if (!trans_wakeref) 10819 continue; 10820 10821 if (transcoder_master_readout(dev_priv, cpu_transcoder) == 10822 crtc_state->cpu_transcoder) 10823 crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder); 10824 10825 intel_display_power_put(dev_priv, power_domain, trans_wakeref); 10826 } 10827 10828 WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER && 10829 crtc_state->sync_mode_slaves_mask); 10830 } 10831 10832 static bool haswell_get_pipe_config(struct intel_crtc *crtc, 10833 struct intel_crtc_state *pipe_config) 10834 { 10835 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10836 intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf; 10837 enum intel_display_power_domain power_domain; 10838 u64 power_domain_mask; 10839 bool active; 10840 10841 pipe_config->master_transcoder = INVALID_TRANSCODER; 10842 10843 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 10844 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10845 if (!wf) 10846 return false; 10847 10848 wakerefs[power_domain] = wf; 10849 power_domain_mask = BIT_ULL(power_domain); 10850 10851 pipe_config->shared_dpll = NULL; 10852 10853 active = hsw_get_transcoder_state(crtc, pipe_config, 10854 &power_domain_mask, wakerefs); 10855 10856 if (IS_GEN9_LP(dev_priv) && 10857 bxt_get_dsi_transcoder_state(crtc, pipe_config, 10858 &power_domain_mask, wakerefs)) { 10859 WARN_ON(active); 10860 active = true; 10861 } 10862 10863 if (!active) 10864 goto out; 10865 10866 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 10867 INTEL_GEN(dev_priv) >= 11) { 10868 haswell_get_ddi_port_state(crtc, pipe_config); 10869 intel_get_pipe_timings(crtc, pipe_config); 10870 } 10871 10872 intel_get_pipe_src_size(crtc, pipe_config); 10873 10874 if (IS_HASWELL(dev_priv)) { 10875 u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 10876 10877 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW) 10878 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 10879 else 10880 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 10881 } else { 10882 pipe_config->output_format = 10883 bdw_get_pipemisc_output_format(crtc); 10884 10885 /* 10886 * Currently there is no interface defined to 10887 * check user preference between RGB/YCBCR444 10888 * or YCBCR420. So the only possible case for 10889 * YCBCR444 usage is driving YCBCR420 output 10890 * with LSPCON, when pipe is configured for 10891 * YCBCR444 output and LSPCON takes care of 10892 * downsampling it. 10893 */ 10894 pipe_config->lspcon_downsampling = 10895 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444; 10896 } 10897 10898 pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe)); 10899 10900 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe)); 10901 10902 if (INTEL_GEN(dev_priv) >= 9) { 10903 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe)); 10904 10905 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) 10906 pipe_config->gamma_enable = true; 10907 10908 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE) 10909 pipe_config->csc_enable = true; 10910 } else { 10911 i9xx_get_pipe_color_config(pipe_config); 10912 } 10913 10914 intel_color_get_config(pipe_config); 10915 10916 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 10917 WARN_ON(power_domain_mask & BIT_ULL(power_domain)); 10918 10919 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10920 if (wf) { 10921 wakerefs[power_domain] = wf; 10922 power_domain_mask |= BIT_ULL(power_domain); 10923 10924 if (INTEL_GEN(dev_priv) >= 9) 10925 skylake_get_pfit_config(crtc, pipe_config); 10926 else 10927 ironlake_get_pfit_config(crtc, pipe_config); 10928 } 10929 10930 if (hsw_crtc_supports_ips(crtc)) { 10931 if (IS_HASWELL(dev_priv)) 10932 pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE; 10933 else { 10934 /* 10935 * We cannot readout IPS state on broadwell, set to 10936 * true so we can set it to a defined state on first 10937 * commit. 10938 */ 10939 pipe_config->ips_enabled = true; 10940 } 10941 } 10942 10943 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 10944 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 10945 pipe_config->pixel_multiplier = 10946 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 10947 } else { 10948 pipe_config->pixel_multiplier = 1; 10949 } 10950 10951 if (INTEL_GEN(dev_priv) >= 11 && 10952 !transcoder_is_dsi(pipe_config->cpu_transcoder)) 10953 icelake_get_trans_port_sync_config(pipe_config); 10954 10955 out: 10956 for_each_power_domain(power_domain, power_domain_mask) 10957 intel_display_power_put(dev_priv, 10958 power_domain, wakerefs[power_domain]); 10959 10960 return active; 10961 } 10962 10963 static u32 intel_cursor_base(const struct intel_plane_state *plane_state) 10964 { 10965 struct drm_i915_private *dev_priv = 10966 to_i915(plane_state->uapi.plane->dev); 10967 const struct drm_framebuffer *fb = plane_state->hw.fb; 10968 const struct drm_i915_gem_object *obj = intel_fb_obj(fb); 10969 u32 base; 10970 10971 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) 10972 base = obj->phys_handle->busaddr; 10973 else 10974 base = intel_plane_ggtt_offset(plane_state); 10975 10976 return base + plane_state->color_plane[0].offset; 10977 } 10978 10979 static u32 intel_cursor_position(const struct intel_plane_state *plane_state) 10980 { 10981 int x = plane_state->uapi.dst.x1; 10982 int y = plane_state->uapi.dst.y1; 10983 u32 pos = 0; 10984 10985 if (x < 0) { 10986 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 10987 x = -x; 10988 } 10989 pos |= x << CURSOR_X_SHIFT; 10990 10991 if (y < 0) { 10992 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 10993 y = -y; 10994 } 10995 pos |= y << CURSOR_Y_SHIFT; 10996 10997 return pos; 10998 } 10999 11000 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) 11001 { 11002 const struct drm_mode_config *config = 11003 &plane_state->uapi.plane->dev->mode_config; 11004 int width = drm_rect_width(&plane_state->uapi.dst); 11005 int height = drm_rect_height(&plane_state->uapi.dst); 11006 11007 return width > 0 && width <= config->cursor_width && 11008 height > 0 && height <= config->cursor_height; 11009 } 11010 11011 static int intel_cursor_check_surface(struct intel_plane_state *plane_state) 11012 { 11013 struct drm_i915_private *dev_priv = 11014 to_i915(plane_state->uapi.plane->dev); 11015 unsigned int rotation = plane_state->hw.rotation; 11016 int src_x, src_y; 11017 u32 offset; 11018 int ret; 11019 11020 ret = intel_plane_compute_gtt(plane_state); 11021 if (ret) 11022 return ret; 11023 11024 if (!plane_state->uapi.visible) 11025 return 0; 11026 11027 src_x = plane_state->uapi.src.x1 >> 16; 11028 src_y = plane_state->uapi.src.y1 >> 16; 11029 11030 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 11031 offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 11032 plane_state, 0); 11033 11034 if (src_x != 0 || src_y != 0) { 11035 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n"); 11036 return -EINVAL; 11037 } 11038 11039 /* 11040 * Put the final coordinates back so that the src 11041 * coordinate checks will see the right values. 11042 */ 11043 drm_rect_translate_to(&plane_state->uapi.src, 11044 src_x << 16, src_y << 16); 11045 11046 /* ILK+ do this automagically in hardware */ 11047 if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) { 11048 const struct drm_framebuffer *fb = plane_state->hw.fb; 11049 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 11050 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 11051 11052 offset += (src_h * src_w - 1) * fb->format->cpp[0]; 11053 } 11054 11055 plane_state->color_plane[0].offset = offset; 11056 plane_state->color_plane[0].x = src_x; 11057 plane_state->color_plane[0].y = src_y; 11058 11059 return 0; 11060 } 11061 11062 static int intel_check_cursor(struct intel_crtc_state *crtc_state, 11063 struct intel_plane_state *plane_state) 11064 { 11065 const struct drm_framebuffer *fb = plane_state->hw.fb; 11066 int ret; 11067 11068 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) { 11069 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 11070 return -EINVAL; 11071 } 11072 11073 ret = drm_atomic_helper_check_plane_state(&plane_state->uapi, 11074 &crtc_state->uapi, 11075 DRM_PLANE_HELPER_NO_SCALING, 11076 DRM_PLANE_HELPER_NO_SCALING, 11077 true, true); 11078 if (ret) 11079 return ret; 11080 11081 /* Use the unclipped src/dst rectangles, which we program to hw */ 11082 plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi); 11083 plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi); 11084 11085 ret = intel_cursor_check_surface(plane_state); 11086 if (ret) 11087 return ret; 11088 11089 if (!plane_state->uapi.visible) 11090 return 0; 11091 11092 ret = intel_plane_check_src_coordinates(plane_state); 11093 if (ret) 11094 return ret; 11095 11096 return 0; 11097 } 11098 11099 static unsigned int 11100 i845_cursor_max_stride(struct intel_plane *plane, 11101 u32 pixel_format, u64 modifier, 11102 unsigned int rotation) 11103 { 11104 return 2048; 11105 } 11106 11107 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 11108 { 11109 u32 cntl = 0; 11110 11111 if (crtc_state->gamma_enable) 11112 cntl |= CURSOR_GAMMA_ENABLE; 11113 11114 return cntl; 11115 } 11116 11117 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, 11118 const struct intel_plane_state *plane_state) 11119 { 11120 return CURSOR_ENABLE | 11121 CURSOR_FORMAT_ARGB | 11122 CURSOR_STRIDE(plane_state->color_plane[0].stride); 11123 } 11124 11125 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) 11126 { 11127 int width = drm_rect_width(&plane_state->uapi.dst); 11128 11129 /* 11130 * 845g/865g are only limited by the width of their cursors, 11131 * the height is arbitrary up to the precision of the register. 11132 */ 11133 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64); 11134 } 11135 11136 static int i845_check_cursor(struct intel_crtc_state *crtc_state, 11137 struct intel_plane_state *plane_state) 11138 { 11139 const struct drm_framebuffer *fb = plane_state->hw.fb; 11140 int ret; 11141 11142 ret = intel_check_cursor(crtc_state, plane_state); 11143 if (ret) 11144 return ret; 11145 11146 /* if we want to turn off the cursor ignore width and height */ 11147 if (!fb) 11148 return 0; 11149 11150 /* Check for which cursor types we support */ 11151 if (!i845_cursor_size_ok(plane_state)) { 11152 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 11153 drm_rect_width(&plane_state->uapi.dst), 11154 drm_rect_height(&plane_state->uapi.dst)); 11155 return -EINVAL; 11156 } 11157 11158 WARN_ON(plane_state->uapi.visible && 11159 plane_state->color_plane[0].stride != fb->pitches[0]); 11160 11161 switch (fb->pitches[0]) { 11162 case 256: 11163 case 512: 11164 case 1024: 11165 case 2048: 11166 break; 11167 default: 11168 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n", 11169 fb->pitches[0]); 11170 return -EINVAL; 11171 } 11172 11173 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state); 11174 11175 return 0; 11176 } 11177 11178 static void i845_update_cursor(struct intel_plane *plane, 11179 const struct intel_crtc_state *crtc_state, 11180 const struct intel_plane_state *plane_state) 11181 { 11182 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11183 u32 cntl = 0, base = 0, pos = 0, size = 0; 11184 unsigned long irqflags; 11185 11186 if (plane_state && plane_state->uapi.visible) { 11187 unsigned int width = drm_rect_width(&plane_state->uapi.dst); 11188 unsigned int height = drm_rect_height(&plane_state->uapi.dst); 11189 11190 cntl = plane_state->ctl | 11191 i845_cursor_ctl_crtc(crtc_state); 11192 11193 size = (height << 12) | width; 11194 11195 base = intel_cursor_base(plane_state); 11196 pos = intel_cursor_position(plane_state); 11197 } 11198 11199 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 11200 11201 /* On these chipsets we can only modify the base/size/stride 11202 * whilst the cursor is disabled. 11203 */ 11204 if (plane->cursor.base != base || 11205 plane->cursor.size != size || 11206 plane->cursor.cntl != cntl) { 11207 I915_WRITE_FW(CURCNTR(PIPE_A), 0); 11208 I915_WRITE_FW(CURBASE(PIPE_A), base); 11209 I915_WRITE_FW(CURSIZE, size); 11210 I915_WRITE_FW(CURPOS(PIPE_A), pos); 11211 I915_WRITE_FW(CURCNTR(PIPE_A), cntl); 11212 11213 plane->cursor.base = base; 11214 plane->cursor.size = size; 11215 plane->cursor.cntl = cntl; 11216 } else { 11217 I915_WRITE_FW(CURPOS(PIPE_A), pos); 11218 } 11219 11220 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 11221 } 11222 11223 static void i845_disable_cursor(struct intel_plane *plane, 11224 const struct intel_crtc_state *crtc_state) 11225 { 11226 i845_update_cursor(plane, crtc_state, NULL); 11227 } 11228 11229 static bool i845_cursor_get_hw_state(struct intel_plane *plane, 11230 enum pipe *pipe) 11231 { 11232 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11233 enum intel_display_power_domain power_domain; 11234 intel_wakeref_t wakeref; 11235 bool ret; 11236 11237 power_domain = POWER_DOMAIN_PIPE(PIPE_A); 11238 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 11239 if (!wakeref) 11240 return false; 11241 11242 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 11243 11244 *pipe = PIPE_A; 11245 11246 intel_display_power_put(dev_priv, power_domain, wakeref); 11247 11248 return ret; 11249 } 11250 11251 static unsigned int 11252 i9xx_cursor_max_stride(struct intel_plane *plane, 11253 u32 pixel_format, u64 modifier, 11254 unsigned int rotation) 11255 { 11256 return plane->base.dev->mode_config.cursor_width * 4; 11257 } 11258 11259 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 11260 { 11261 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 11262 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11263 u32 cntl = 0; 11264 11265 if (INTEL_GEN(dev_priv) >= 11) 11266 return cntl; 11267 11268 if (crtc_state->gamma_enable) 11269 cntl = MCURSOR_GAMMA_ENABLE; 11270 11271 if (crtc_state->csc_enable) 11272 cntl |= MCURSOR_PIPE_CSC_ENABLE; 11273 11274 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 11275 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); 11276 11277 return cntl; 11278 } 11279 11280 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, 11281 const struct intel_plane_state *plane_state) 11282 { 11283 struct drm_i915_private *dev_priv = 11284 to_i915(plane_state->uapi.plane->dev); 11285 u32 cntl = 0; 11286 11287 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 11288 cntl |= MCURSOR_TRICKLE_FEED_DISABLE; 11289 11290 switch (drm_rect_width(&plane_state->uapi.dst)) { 11291 case 64: 11292 cntl |= MCURSOR_MODE_64_ARGB_AX; 11293 break; 11294 case 128: 11295 cntl |= MCURSOR_MODE_128_ARGB_AX; 11296 break; 11297 case 256: 11298 cntl |= MCURSOR_MODE_256_ARGB_AX; 11299 break; 11300 default: 11301 MISSING_CASE(drm_rect_width(&plane_state->uapi.dst)); 11302 return 0; 11303 } 11304 11305 if (plane_state->hw.rotation & DRM_MODE_ROTATE_180) 11306 cntl |= MCURSOR_ROTATE_180; 11307 11308 return cntl; 11309 } 11310 11311 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) 11312 { 11313 struct drm_i915_private *dev_priv = 11314 to_i915(plane_state->uapi.plane->dev); 11315 int width = drm_rect_width(&plane_state->uapi.dst); 11316 int height = drm_rect_height(&plane_state->uapi.dst); 11317 11318 if (!intel_cursor_size_ok(plane_state)) 11319 return false; 11320 11321 /* Cursor width is limited to a few power-of-two sizes */ 11322 switch (width) { 11323 case 256: 11324 case 128: 11325 case 64: 11326 break; 11327 default: 11328 return false; 11329 } 11330 11331 /* 11332 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor 11333 * height from 8 lines up to the cursor width, when the 11334 * cursor is not rotated. Everything else requires square 11335 * cursors. 11336 */ 11337 if (HAS_CUR_FBC(dev_priv) && 11338 plane_state->hw.rotation & DRM_MODE_ROTATE_0) { 11339 if (height < 8 || height > width) 11340 return false; 11341 } else { 11342 if (height != width) 11343 return false; 11344 } 11345 11346 return true; 11347 } 11348 11349 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, 11350 struct intel_plane_state *plane_state) 11351 { 11352 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 11353 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11354 const struct drm_framebuffer *fb = plane_state->hw.fb; 11355 enum pipe pipe = plane->pipe; 11356 int ret; 11357 11358 ret = intel_check_cursor(crtc_state, plane_state); 11359 if (ret) 11360 return ret; 11361 11362 /* if we want to turn off the cursor ignore width and height */ 11363 if (!fb) 11364 return 0; 11365 11366 /* Check for which cursor types we support */ 11367 if (!i9xx_cursor_size_ok(plane_state)) { 11368 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 11369 drm_rect_width(&plane_state->uapi.dst), 11370 drm_rect_height(&plane_state->uapi.dst)); 11371 return -EINVAL; 11372 } 11373 11374 WARN_ON(plane_state->uapi.visible && 11375 plane_state->color_plane[0].stride != fb->pitches[0]); 11376 11377 if (fb->pitches[0] != 11378 drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) { 11379 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n", 11380 fb->pitches[0], 11381 drm_rect_width(&plane_state->uapi.dst)); 11382 return -EINVAL; 11383 } 11384 11385 /* 11386 * There's something wrong with the cursor on CHV pipe C. 11387 * If it straddles the left edge of the screen then 11388 * moving it away from the edge or disabling it often 11389 * results in a pipe underrun, and often that can lead to 11390 * dead pipe (constant underrun reported, and it scans 11391 * out just a solid color). To recover from that, the 11392 * display power well must be turned off and on again. 11393 * Refuse the put the cursor into that compromised position. 11394 */ 11395 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && 11396 plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) { 11397 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); 11398 return -EINVAL; 11399 } 11400 11401 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state); 11402 11403 return 0; 11404 } 11405 11406 static void i9xx_update_cursor(struct intel_plane *plane, 11407 const struct intel_crtc_state *crtc_state, 11408 const struct intel_plane_state *plane_state) 11409 { 11410 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11411 enum pipe pipe = plane->pipe; 11412 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0; 11413 unsigned long irqflags; 11414 11415 if (plane_state && plane_state->uapi.visible) { 11416 unsigned width = drm_rect_width(&plane_state->uapi.dst); 11417 unsigned height = drm_rect_height(&plane_state->uapi.dst); 11418 11419 cntl = plane_state->ctl | 11420 i9xx_cursor_ctl_crtc(crtc_state); 11421 11422 if (width != height) 11423 fbc_ctl = CUR_FBC_CTL_EN | (height - 1); 11424 11425 base = intel_cursor_base(plane_state); 11426 pos = intel_cursor_position(plane_state); 11427 } 11428 11429 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 11430 11431 /* 11432 * On some platforms writing CURCNTR first will also 11433 * cause CURPOS to be armed by the CURBASE write. 11434 * Without the CURCNTR write the CURPOS write would 11435 * arm itself. Thus we always update CURCNTR before 11436 * CURPOS. 11437 * 11438 * On other platforms CURPOS always requires the 11439 * CURBASE write to arm the update. Additonally 11440 * a write to any of the cursor register will cancel 11441 * an already armed cursor update. Thus leaving out 11442 * the CURBASE write after CURPOS could lead to a 11443 * cursor that doesn't appear to move, or even change 11444 * shape. Thus we always write CURBASE. 11445 * 11446 * The other registers are armed by by the CURBASE write 11447 * except when the plane is getting enabled at which time 11448 * the CURCNTR write arms the update. 11449 */ 11450 11451 if (INTEL_GEN(dev_priv) >= 9) 11452 skl_write_cursor_wm(plane, crtc_state); 11453 11454 if (plane->cursor.base != base || 11455 plane->cursor.size != fbc_ctl || 11456 plane->cursor.cntl != cntl) { 11457 if (HAS_CUR_FBC(dev_priv)) 11458 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl); 11459 I915_WRITE_FW(CURCNTR(pipe), cntl); 11460 I915_WRITE_FW(CURPOS(pipe), pos); 11461 I915_WRITE_FW(CURBASE(pipe), base); 11462 11463 plane->cursor.base = base; 11464 plane->cursor.size = fbc_ctl; 11465 plane->cursor.cntl = cntl; 11466 } else { 11467 I915_WRITE_FW(CURPOS(pipe), pos); 11468 I915_WRITE_FW(CURBASE(pipe), base); 11469 } 11470 11471 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 11472 } 11473 11474 static void i9xx_disable_cursor(struct intel_plane *plane, 11475 const struct intel_crtc_state *crtc_state) 11476 { 11477 i9xx_update_cursor(plane, crtc_state, NULL); 11478 } 11479 11480 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, 11481 enum pipe *pipe) 11482 { 11483 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11484 enum intel_display_power_domain power_domain; 11485 intel_wakeref_t wakeref; 11486 bool ret; 11487 u32 val; 11488 11489 /* 11490 * Not 100% correct for planes that can move between pipes, 11491 * but that's only the case for gen2-3 which don't have any 11492 * display power wells. 11493 */ 11494 power_domain = POWER_DOMAIN_PIPE(plane->pipe); 11495 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 11496 if (!wakeref) 11497 return false; 11498 11499 val = I915_READ(CURCNTR(plane->pipe)); 11500 11501 ret = val & MCURSOR_MODE; 11502 11503 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 11504 *pipe = plane->pipe; 11505 else 11506 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >> 11507 MCURSOR_PIPE_SELECT_SHIFT; 11508 11509 intel_display_power_put(dev_priv, power_domain, wakeref); 11510 11511 return ret; 11512 } 11513 11514 /* VESA 640x480x72Hz mode to set on the pipe */ 11515 static const struct drm_display_mode load_detect_mode = { 11516 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 11517 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 11518 }; 11519 11520 struct drm_framebuffer * 11521 intel_framebuffer_create(struct drm_i915_gem_object *obj, 11522 struct drm_mode_fb_cmd2 *mode_cmd) 11523 { 11524 struct intel_framebuffer *intel_fb; 11525 int ret; 11526 11527 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 11528 if (!intel_fb) 11529 return ERR_PTR(-ENOMEM); 11530 11531 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd); 11532 if (ret) 11533 goto err; 11534 11535 return &intel_fb->base; 11536 11537 err: 11538 kfree(intel_fb); 11539 return ERR_PTR(ret); 11540 } 11541 11542 static int intel_modeset_disable_planes(struct drm_atomic_state *state, 11543 struct drm_crtc *crtc) 11544 { 11545 struct drm_plane *plane; 11546 struct drm_plane_state *plane_state; 11547 int ret, i; 11548 11549 ret = drm_atomic_add_affected_planes(state, crtc); 11550 if (ret) 11551 return ret; 11552 11553 for_each_new_plane_in_state(state, plane, plane_state, i) { 11554 if (plane_state->crtc != crtc) 11555 continue; 11556 11557 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); 11558 if (ret) 11559 return ret; 11560 11561 drm_atomic_set_fb_for_plane(plane_state, NULL); 11562 } 11563 11564 return 0; 11565 } 11566 11567 int intel_get_load_detect_pipe(struct drm_connector *connector, 11568 struct intel_load_detect_pipe *old, 11569 struct drm_modeset_acquire_ctx *ctx) 11570 { 11571 struct intel_crtc *intel_crtc; 11572 struct intel_encoder *intel_encoder = 11573 intel_attached_encoder(connector); 11574 struct drm_crtc *possible_crtc; 11575 struct drm_encoder *encoder = &intel_encoder->base; 11576 struct drm_crtc *crtc = NULL; 11577 struct drm_device *dev = encoder->dev; 11578 struct drm_i915_private *dev_priv = to_i915(dev); 11579 struct drm_mode_config *config = &dev->mode_config; 11580 struct drm_atomic_state *state = NULL, *restore_state = NULL; 11581 struct drm_connector_state *connector_state; 11582 struct intel_crtc_state *crtc_state; 11583 int ret, i = -1; 11584 11585 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 11586 connector->base.id, connector->name, 11587 encoder->base.id, encoder->name); 11588 11589 old->restore_state = NULL; 11590 11591 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); 11592 11593 /* 11594 * Algorithm gets a little messy: 11595 * 11596 * - if the connector already has an assigned crtc, use it (but make 11597 * sure it's on first) 11598 * 11599 * - try to find the first unused crtc that can drive this connector, 11600 * and use that if we find one 11601 */ 11602 11603 /* See if we already have a CRTC for this connector */ 11604 if (connector->state->crtc) { 11605 crtc = connector->state->crtc; 11606 11607 ret = drm_modeset_lock(&crtc->mutex, ctx); 11608 if (ret) 11609 goto fail; 11610 11611 /* Make sure the crtc and connector are running */ 11612 goto found; 11613 } 11614 11615 /* Find an unused one (if possible) */ 11616 for_each_crtc(dev, possible_crtc) { 11617 i++; 11618 if (!(encoder->possible_crtcs & (1 << i))) 11619 continue; 11620 11621 ret = drm_modeset_lock(&possible_crtc->mutex, ctx); 11622 if (ret) 11623 goto fail; 11624 11625 if (possible_crtc->state->enable) { 11626 drm_modeset_unlock(&possible_crtc->mutex); 11627 continue; 11628 } 11629 11630 crtc = possible_crtc; 11631 break; 11632 } 11633 11634 /* 11635 * If we didn't find an unused CRTC, don't use any. 11636 */ 11637 if (!crtc) { 11638 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 11639 ret = -ENODEV; 11640 goto fail; 11641 } 11642 11643 found: 11644 intel_crtc = to_intel_crtc(crtc); 11645 11646 state = drm_atomic_state_alloc(dev); 11647 restore_state = drm_atomic_state_alloc(dev); 11648 if (!state || !restore_state) { 11649 ret = -ENOMEM; 11650 goto fail; 11651 } 11652 11653 state->acquire_ctx = ctx; 11654 restore_state->acquire_ctx = ctx; 11655 11656 connector_state = drm_atomic_get_connector_state(state, connector); 11657 if (IS_ERR(connector_state)) { 11658 ret = PTR_ERR(connector_state); 11659 goto fail; 11660 } 11661 11662 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc); 11663 if (ret) 11664 goto fail; 11665 11666 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 11667 if (IS_ERR(crtc_state)) { 11668 ret = PTR_ERR(crtc_state); 11669 goto fail; 11670 } 11671 11672 crtc_state->uapi.active = true; 11673 11674 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi, 11675 &load_detect_mode); 11676 if (ret) 11677 goto fail; 11678 11679 ret = intel_modeset_disable_planes(state, crtc); 11680 if (ret) 11681 goto fail; 11682 11683 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); 11684 if (!ret) 11685 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc)); 11686 if (!ret) 11687 ret = drm_atomic_add_affected_planes(restore_state, crtc); 11688 if (ret) { 11689 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret); 11690 goto fail; 11691 } 11692 11693 ret = drm_atomic_commit(state); 11694 if (ret) { 11695 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 11696 goto fail; 11697 } 11698 11699 old->restore_state = restore_state; 11700 drm_atomic_state_put(state); 11701 11702 /* let the connector get through one full cycle before testing */ 11703 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 11704 return true; 11705 11706 fail: 11707 if (state) { 11708 drm_atomic_state_put(state); 11709 state = NULL; 11710 } 11711 if (restore_state) { 11712 drm_atomic_state_put(restore_state); 11713 restore_state = NULL; 11714 } 11715 11716 if (ret == -EDEADLK) 11717 return ret; 11718 11719 return false; 11720 } 11721 11722 void intel_release_load_detect_pipe(struct drm_connector *connector, 11723 struct intel_load_detect_pipe *old, 11724 struct drm_modeset_acquire_ctx *ctx) 11725 { 11726 struct intel_encoder *intel_encoder = 11727 intel_attached_encoder(connector); 11728 struct drm_encoder *encoder = &intel_encoder->base; 11729 struct drm_atomic_state *state = old->restore_state; 11730 int ret; 11731 11732 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 11733 connector->base.id, connector->name, 11734 encoder->base.id, encoder->name); 11735 11736 if (!state) 11737 return; 11738 11739 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 11740 if (ret) 11741 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret); 11742 drm_atomic_state_put(state); 11743 } 11744 11745 static int i9xx_pll_refclk(struct drm_device *dev, 11746 const struct intel_crtc_state *pipe_config) 11747 { 11748 struct drm_i915_private *dev_priv = to_i915(dev); 11749 u32 dpll = pipe_config->dpll_hw_state.dpll; 11750 11751 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 11752 return dev_priv->vbt.lvds_ssc_freq; 11753 else if (HAS_PCH_SPLIT(dev_priv)) 11754 return 120000; 11755 else if (!IS_GEN(dev_priv, 2)) 11756 return 96000; 11757 else 11758 return 48000; 11759 } 11760 11761 /* Returns the clock of the currently programmed mode of the given pipe. */ 11762 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 11763 struct intel_crtc_state *pipe_config) 11764 { 11765 struct drm_device *dev = crtc->base.dev; 11766 struct drm_i915_private *dev_priv = to_i915(dev); 11767 enum pipe pipe = crtc->pipe; 11768 u32 dpll = pipe_config->dpll_hw_state.dpll; 11769 u32 fp; 11770 struct dpll clock; 11771 int port_clock; 11772 int refclk = i9xx_pll_refclk(dev, pipe_config); 11773 11774 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 11775 fp = pipe_config->dpll_hw_state.fp0; 11776 else 11777 fp = pipe_config->dpll_hw_state.fp1; 11778 11779 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 11780 if (IS_PINEVIEW(dev_priv)) { 11781 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 11782 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 11783 } else { 11784 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 11785 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 11786 } 11787 11788 if (!IS_GEN(dev_priv, 2)) { 11789 if (IS_PINEVIEW(dev_priv)) 11790 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 11791 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 11792 else 11793 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 11794 DPLL_FPA01_P1_POST_DIV_SHIFT); 11795 11796 switch (dpll & DPLL_MODE_MASK) { 11797 case DPLLB_MODE_DAC_SERIAL: 11798 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 11799 5 : 10; 11800 break; 11801 case DPLLB_MODE_LVDS: 11802 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 11803 7 : 14; 11804 break; 11805 default: 11806 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 11807 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 11808 return; 11809 } 11810 11811 if (IS_PINEVIEW(dev_priv)) 11812 port_clock = pnv_calc_dpll_params(refclk, &clock); 11813 else 11814 port_clock = i9xx_calc_dpll_params(refclk, &clock); 11815 } else { 11816 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS); 11817 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 11818 11819 if (is_lvds) { 11820 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 11821 DPLL_FPA01_P1_POST_DIV_SHIFT); 11822 11823 if (lvds & LVDS_CLKB_POWER_UP) 11824 clock.p2 = 7; 11825 else 11826 clock.p2 = 14; 11827 } else { 11828 if (dpll & PLL_P1_DIVIDE_BY_TWO) 11829 clock.p1 = 2; 11830 else { 11831 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 11832 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 11833 } 11834 if (dpll & PLL_P2_DIVIDE_BY_4) 11835 clock.p2 = 4; 11836 else 11837 clock.p2 = 2; 11838 } 11839 11840 port_clock = i9xx_calc_dpll_params(refclk, &clock); 11841 } 11842 11843 /* 11844 * This value includes pixel_multiplier. We will use 11845 * port_clock to compute adjusted_mode.crtc_clock in the 11846 * encoder's get_config() function. 11847 */ 11848 pipe_config->port_clock = port_clock; 11849 } 11850 11851 int intel_dotclock_calculate(int link_freq, 11852 const struct intel_link_m_n *m_n) 11853 { 11854 /* 11855 * The calculation for the data clock is: 11856 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 11857 * But we want to avoid losing precison if possible, so: 11858 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 11859 * 11860 * and the link clock is simpler: 11861 * link_clock = (m * link_clock) / n 11862 */ 11863 11864 if (!m_n->link_n) 11865 return 0; 11866 11867 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); 11868 } 11869 11870 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 11871 struct intel_crtc_state *pipe_config) 11872 { 11873 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11874 11875 /* read out port_clock from the DPLL */ 11876 i9xx_crtc_clock_get(crtc, pipe_config); 11877 11878 /* 11879 * In case there is an active pipe without active ports, 11880 * we may need some idea for the dotclock anyway. 11881 * Calculate one based on the FDI configuration. 11882 */ 11883 pipe_config->hw.adjusted_mode.crtc_clock = 11884 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 11885 &pipe_config->fdi_m_n); 11886 } 11887 11888 static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state, 11889 struct intel_crtc *crtc) 11890 { 11891 memset(crtc_state, 0, sizeof(*crtc_state)); 11892 11893 __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base); 11894 11895 crtc_state->cpu_transcoder = INVALID_TRANSCODER; 11896 crtc_state->master_transcoder = INVALID_TRANSCODER; 11897 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 11898 crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID; 11899 crtc_state->scaler_state.scaler_id = -1; 11900 } 11901 11902 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc) 11903 { 11904 struct intel_crtc_state *crtc_state; 11905 11906 crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL); 11907 11908 if (crtc_state) 11909 intel_crtc_state_reset(crtc_state, crtc); 11910 11911 return crtc_state; 11912 } 11913 11914 /* Returns the currently programmed mode of the given encoder. */ 11915 struct drm_display_mode * 11916 intel_encoder_current_mode(struct intel_encoder *encoder) 11917 { 11918 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 11919 struct intel_crtc_state *crtc_state; 11920 struct drm_display_mode *mode; 11921 struct intel_crtc *crtc; 11922 enum pipe pipe; 11923 11924 if (!encoder->get_hw_state(encoder, &pipe)) 11925 return NULL; 11926 11927 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 11928 11929 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 11930 if (!mode) 11931 return NULL; 11932 11933 crtc_state = intel_crtc_state_alloc(crtc); 11934 if (!crtc_state) { 11935 kfree(mode); 11936 return NULL; 11937 } 11938 11939 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) { 11940 kfree(crtc_state); 11941 kfree(mode); 11942 return NULL; 11943 } 11944 11945 encoder->get_config(encoder, crtc_state); 11946 11947 intel_mode_from_pipe_config(mode, crtc_state); 11948 11949 kfree(crtc_state); 11950 11951 return mode; 11952 } 11953 11954 static void intel_crtc_destroy(struct drm_crtc *crtc) 11955 { 11956 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11957 11958 drm_crtc_cleanup(crtc); 11959 kfree(intel_crtc); 11960 } 11961 11962 /** 11963 * intel_wm_need_update - Check whether watermarks need updating 11964 * @cur: current plane state 11965 * @new: new plane state 11966 * 11967 * Check current plane state versus the new one to determine whether 11968 * watermarks need to be recalculated. 11969 * 11970 * Returns true or false. 11971 */ 11972 static bool intel_wm_need_update(const struct intel_plane_state *cur, 11973 struct intel_plane_state *new) 11974 { 11975 /* Update watermarks on tiling or size changes. */ 11976 if (new->uapi.visible != cur->uapi.visible) 11977 return true; 11978 11979 if (!cur->hw.fb || !new->hw.fb) 11980 return false; 11981 11982 if (cur->hw.fb->modifier != new->hw.fb->modifier || 11983 cur->hw.rotation != new->hw.rotation || 11984 drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) || 11985 drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) || 11986 drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) || 11987 drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst)) 11988 return true; 11989 11990 return false; 11991 } 11992 11993 static bool needs_scaling(const struct intel_plane_state *state) 11994 { 11995 int src_w = drm_rect_width(&state->uapi.src) >> 16; 11996 int src_h = drm_rect_height(&state->uapi.src) >> 16; 11997 int dst_w = drm_rect_width(&state->uapi.dst); 11998 int dst_h = drm_rect_height(&state->uapi.dst); 11999 12000 return (src_w != dst_w || src_h != dst_h); 12001 } 12002 12003 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, 12004 struct intel_crtc_state *crtc_state, 12005 const struct intel_plane_state *old_plane_state, 12006 struct intel_plane_state *plane_state) 12007 { 12008 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 12009 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 12010 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12011 bool mode_changed = needs_modeset(crtc_state); 12012 bool was_crtc_enabled = old_crtc_state->hw.active; 12013 bool is_crtc_enabled = crtc_state->hw.active; 12014 bool turn_off, turn_on, visible, was_visible; 12015 int ret; 12016 12017 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { 12018 ret = skl_update_scaler_plane(crtc_state, plane_state); 12019 if (ret) 12020 return ret; 12021 } 12022 12023 was_visible = old_plane_state->uapi.visible; 12024 visible = plane_state->uapi.visible; 12025 12026 if (!was_crtc_enabled && WARN_ON(was_visible)) 12027 was_visible = false; 12028 12029 /* 12030 * Visibility is calculated as if the crtc was on, but 12031 * after scaler setup everything depends on it being off 12032 * when the crtc isn't active. 12033 * 12034 * FIXME this is wrong for watermarks. Watermarks should also 12035 * be computed as if the pipe would be active. Perhaps move 12036 * per-plane wm computation to the .check_plane() hook, and 12037 * only combine the results from all planes in the current place? 12038 */ 12039 if (!is_crtc_enabled) { 12040 plane_state->uapi.visible = visible = false; 12041 crtc_state->active_planes &= ~BIT(plane->id); 12042 crtc_state->data_rate[plane->id] = 0; 12043 crtc_state->min_cdclk[plane->id] = 0; 12044 } 12045 12046 if (!was_visible && !visible) 12047 return 0; 12048 12049 turn_off = was_visible && (!visible || mode_changed); 12050 turn_on = visible && (!was_visible || mode_changed); 12051 12052 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", 12053 crtc->base.base.id, crtc->base.name, 12054 plane->base.base.id, plane->base.name, 12055 was_visible, visible, 12056 turn_off, turn_on, mode_changed); 12057 12058 if (turn_on) { 12059 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 12060 crtc_state->update_wm_pre = true; 12061 12062 /* must disable cxsr around plane enable/disable */ 12063 if (plane->id != PLANE_CURSOR) 12064 crtc_state->disable_cxsr = true; 12065 } else if (turn_off) { 12066 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 12067 crtc_state->update_wm_post = true; 12068 12069 /* must disable cxsr around plane enable/disable */ 12070 if (plane->id != PLANE_CURSOR) 12071 crtc_state->disable_cxsr = true; 12072 } else if (intel_wm_need_update(old_plane_state, plane_state)) { 12073 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) { 12074 /* FIXME bollocks */ 12075 crtc_state->update_wm_pre = true; 12076 crtc_state->update_wm_post = true; 12077 } 12078 } 12079 12080 if (visible || was_visible) 12081 crtc_state->fb_bits |= plane->frontbuffer_bit; 12082 12083 /* 12084 * ILK/SNB DVSACNTR/Sprite Enable 12085 * IVB SPR_CTL/Sprite Enable 12086 * "When in Self Refresh Big FIFO mode, a write to enable the 12087 * plane will be internally buffered and delayed while Big FIFO 12088 * mode is exiting." 12089 * 12090 * Which means that enabling the sprite can take an extra frame 12091 * when we start in big FIFO mode (LP1+). Thus we need to drop 12092 * down to LP0 and wait for vblank in order to make sure the 12093 * sprite gets enabled on the next vblank after the register write. 12094 * Doing otherwise would risk enabling the sprite one frame after 12095 * we've already signalled flip completion. We can resume LP1+ 12096 * once the sprite has been enabled. 12097 * 12098 * 12099 * WaCxSRDisabledForSpriteScaling:ivb 12100 * IVB SPR_SCALE/Scaling Enable 12101 * "Low Power watermarks must be disabled for at least one 12102 * frame before enabling sprite scaling, and kept disabled 12103 * until sprite scaling is disabled." 12104 * 12105 * ILK/SNB DVSASCALE/Scaling Enable 12106 * "When in Self Refresh Big FIFO mode, scaling enable will be 12107 * masked off while Big FIFO mode is exiting." 12108 * 12109 * Despite the w/a only being listed for IVB we assume that 12110 * the ILK/SNB note has similar ramifications, hence we apply 12111 * the w/a on all three platforms. 12112 * 12113 * With experimental results seems this is needed also for primary 12114 * plane, not only sprite plane. 12115 */ 12116 if (plane->id != PLANE_CURSOR && 12117 (IS_GEN_RANGE(dev_priv, 5, 6) || 12118 IS_IVYBRIDGE(dev_priv)) && 12119 (turn_on || (!needs_scaling(old_plane_state) && 12120 needs_scaling(plane_state)))) 12121 crtc_state->disable_lp_wm = true; 12122 12123 return 0; 12124 } 12125 12126 static bool encoders_cloneable(const struct intel_encoder *a, 12127 const struct intel_encoder *b) 12128 { 12129 /* masks could be asymmetric, so check both ways */ 12130 return a == b || (a->cloneable & (1 << b->type) && 12131 b->cloneable & (1 << a->type)); 12132 } 12133 12134 static bool check_single_encoder_cloning(struct drm_atomic_state *state, 12135 struct intel_crtc *crtc, 12136 struct intel_encoder *encoder) 12137 { 12138 struct intel_encoder *source_encoder; 12139 struct drm_connector *connector; 12140 struct drm_connector_state *connector_state; 12141 int i; 12142 12143 for_each_new_connector_in_state(state, connector, connector_state, i) { 12144 if (connector_state->crtc != &crtc->base) 12145 continue; 12146 12147 source_encoder = 12148 to_intel_encoder(connector_state->best_encoder); 12149 if (!encoders_cloneable(encoder, source_encoder)) 12150 return false; 12151 } 12152 12153 return true; 12154 } 12155 12156 static int icl_add_linked_planes(struct intel_atomic_state *state) 12157 { 12158 struct intel_plane *plane, *linked; 12159 struct intel_plane_state *plane_state, *linked_plane_state; 12160 int i; 12161 12162 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12163 linked = plane_state->planar_linked_plane; 12164 12165 if (!linked) 12166 continue; 12167 12168 linked_plane_state = intel_atomic_get_plane_state(state, linked); 12169 if (IS_ERR(linked_plane_state)) 12170 return PTR_ERR(linked_plane_state); 12171 12172 WARN_ON(linked_plane_state->planar_linked_plane != plane); 12173 WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave); 12174 } 12175 12176 return 0; 12177 } 12178 12179 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) 12180 { 12181 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 12182 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12183 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); 12184 struct intel_plane *plane, *linked; 12185 struct intel_plane_state *plane_state; 12186 int i; 12187 12188 if (INTEL_GEN(dev_priv) < 11) 12189 return 0; 12190 12191 /* 12192 * Destroy all old plane links and make the slave plane invisible 12193 * in the crtc_state->active_planes mask. 12194 */ 12195 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12196 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane) 12197 continue; 12198 12199 plane_state->planar_linked_plane = NULL; 12200 if (plane_state->planar_slave && !plane_state->uapi.visible) { 12201 crtc_state->active_planes &= ~BIT(plane->id); 12202 crtc_state->update_planes |= BIT(plane->id); 12203 } 12204 12205 plane_state->planar_slave = false; 12206 } 12207 12208 if (!crtc_state->nv12_planes) 12209 return 0; 12210 12211 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12212 struct intel_plane_state *linked_state = NULL; 12213 12214 if (plane->pipe != crtc->pipe || 12215 !(crtc_state->nv12_planes & BIT(plane->id))) 12216 continue; 12217 12218 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { 12219 if (!icl_is_nv12_y_plane(linked->id)) 12220 continue; 12221 12222 if (crtc_state->active_planes & BIT(linked->id)) 12223 continue; 12224 12225 linked_state = intel_atomic_get_plane_state(state, linked); 12226 if (IS_ERR(linked_state)) 12227 return PTR_ERR(linked_state); 12228 12229 break; 12230 } 12231 12232 if (!linked_state) { 12233 DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n", 12234 hweight8(crtc_state->nv12_planes)); 12235 12236 return -EINVAL; 12237 } 12238 12239 plane_state->planar_linked_plane = linked; 12240 12241 linked_state->planar_slave = true; 12242 linked_state->planar_linked_plane = plane; 12243 crtc_state->active_planes |= BIT(linked->id); 12244 crtc_state->update_planes |= BIT(linked->id); 12245 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name); 12246 12247 /* Copy parameters to slave plane */ 12248 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; 12249 linked_state->color_ctl = plane_state->color_ctl; 12250 memcpy(linked_state->color_plane, plane_state->color_plane, 12251 sizeof(linked_state->color_plane)); 12252 12253 intel_plane_copy_uapi_to_hw_state(linked_state, plane_state); 12254 linked_state->uapi.src = plane_state->uapi.src; 12255 linked_state->uapi.dst = plane_state->uapi.dst; 12256 12257 if (icl_is_hdr_plane(dev_priv, plane->id)) { 12258 if (linked->id == PLANE_SPRITE5) 12259 plane_state->cus_ctl |= PLANE_CUS_PLANE_7; 12260 else if (linked->id == PLANE_SPRITE4) 12261 plane_state->cus_ctl |= PLANE_CUS_PLANE_6; 12262 else 12263 MISSING_CASE(linked->id); 12264 } 12265 } 12266 12267 return 0; 12268 } 12269 12270 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) 12271 { 12272 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 12273 struct intel_atomic_state *state = 12274 to_intel_atomic_state(new_crtc_state->uapi.state); 12275 const struct intel_crtc_state *old_crtc_state = 12276 intel_atomic_get_old_crtc_state(state, crtc); 12277 12278 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; 12279 } 12280 12281 static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state) 12282 { 12283 struct drm_crtc *crtc = crtc_state->uapi.crtc; 12284 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); 12285 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 12286 struct drm_connector *master_connector, *connector; 12287 struct drm_connector_state *connector_state; 12288 struct drm_connector_list_iter conn_iter; 12289 struct drm_crtc *master_crtc = NULL; 12290 struct drm_crtc_state *master_crtc_state; 12291 struct intel_crtc_state *master_pipe_config; 12292 int i, tile_group_id; 12293 12294 if (INTEL_GEN(dev_priv) < 11) 12295 return 0; 12296 12297 /* 12298 * In case of tiled displays there could be one or more slaves but there is 12299 * only one master. Lets make the CRTC used by the connector corresponding 12300 * to the last horizonal and last vertical tile a master/genlock CRTC. 12301 * All the other CRTCs corresponding to other tiles of the same Tile group 12302 * are the slave CRTCs and hold a pointer to their genlock CRTC. 12303 */ 12304 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 12305 if (connector_state->crtc != crtc) 12306 continue; 12307 if (!connector->has_tile) 12308 continue; 12309 if (crtc_state->hw.mode.hdisplay != connector->tile_h_size || 12310 crtc_state->hw.mode.vdisplay != connector->tile_v_size) 12311 return 0; 12312 if (connector->tile_h_loc == connector->num_h_tile - 1 && 12313 connector->tile_v_loc == connector->num_v_tile - 1) 12314 continue; 12315 crtc_state->sync_mode_slaves_mask = 0; 12316 tile_group_id = connector->tile_group->id; 12317 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 12318 drm_for_each_connector_iter(master_connector, &conn_iter) { 12319 struct drm_connector_state *master_conn_state = NULL; 12320 12321 if (!master_connector->has_tile) 12322 continue; 12323 if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 || 12324 master_connector->tile_v_loc != master_connector->num_v_tile - 1) 12325 continue; 12326 if (master_connector->tile_group->id != tile_group_id) 12327 continue; 12328 12329 master_conn_state = drm_atomic_get_connector_state(&state->base, 12330 master_connector); 12331 if (IS_ERR(master_conn_state)) { 12332 drm_connector_list_iter_end(&conn_iter); 12333 return PTR_ERR(master_conn_state); 12334 } 12335 if (master_conn_state->crtc) { 12336 master_crtc = master_conn_state->crtc; 12337 break; 12338 } 12339 } 12340 drm_connector_list_iter_end(&conn_iter); 12341 12342 if (!master_crtc) { 12343 DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n", 12344 connector_state->crtc->base.id); 12345 return -EINVAL; 12346 } 12347 12348 master_crtc_state = drm_atomic_get_crtc_state(&state->base, 12349 master_crtc); 12350 if (IS_ERR(master_crtc_state)) 12351 return PTR_ERR(master_crtc_state); 12352 12353 master_pipe_config = to_intel_crtc_state(master_crtc_state); 12354 crtc_state->master_transcoder = master_pipe_config->cpu_transcoder; 12355 master_pipe_config->sync_mode_slaves_mask |= 12356 BIT(crtc_state->cpu_transcoder); 12357 DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n", 12358 transcoder_name(crtc_state->master_transcoder), 12359 crtc_state->uapi.crtc->base.id, 12360 master_pipe_config->sync_mode_slaves_mask); 12361 } 12362 12363 return 0; 12364 } 12365 12366 static int intel_crtc_atomic_check(struct intel_atomic_state *state, 12367 struct intel_crtc *crtc) 12368 { 12369 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12370 struct intel_crtc_state *crtc_state = 12371 intel_atomic_get_new_crtc_state(state, crtc); 12372 bool mode_changed = needs_modeset(crtc_state); 12373 int ret; 12374 12375 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) && 12376 mode_changed && !crtc_state->hw.active) 12377 crtc_state->update_wm_post = true; 12378 12379 if (mode_changed && crtc_state->hw.enable && 12380 dev_priv->display.crtc_compute_clock && 12381 !WARN_ON(crtc_state->shared_dpll)) { 12382 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state); 12383 if (ret) 12384 return ret; 12385 } 12386 12387 /* 12388 * May need to update pipe gamma enable bits 12389 * when C8 planes are getting enabled/disabled. 12390 */ 12391 if (c8_planes_changed(crtc_state)) 12392 crtc_state->uapi.color_mgmt_changed = true; 12393 12394 if (mode_changed || crtc_state->update_pipe || 12395 crtc_state->uapi.color_mgmt_changed) { 12396 ret = intel_color_check(crtc_state); 12397 if (ret) 12398 return ret; 12399 } 12400 12401 ret = 0; 12402 if (dev_priv->display.compute_pipe_wm) { 12403 ret = dev_priv->display.compute_pipe_wm(crtc_state); 12404 if (ret) { 12405 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n"); 12406 return ret; 12407 } 12408 } 12409 12410 if (dev_priv->display.compute_intermediate_wm) { 12411 if (WARN_ON(!dev_priv->display.compute_pipe_wm)) 12412 return 0; 12413 12414 /* 12415 * Calculate 'intermediate' watermarks that satisfy both the 12416 * old state and the new state. We can program these 12417 * immediately. 12418 */ 12419 ret = dev_priv->display.compute_intermediate_wm(crtc_state); 12420 if (ret) { 12421 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); 12422 return ret; 12423 } 12424 } 12425 12426 if (INTEL_GEN(dev_priv) >= 9) { 12427 if (mode_changed || crtc_state->update_pipe) 12428 ret = skl_update_scaler_crtc(crtc_state); 12429 if (!ret) 12430 ret = intel_atomic_setup_scalers(dev_priv, crtc, 12431 crtc_state); 12432 } 12433 12434 if (HAS_IPS(dev_priv)) 12435 crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state); 12436 12437 return ret; 12438 } 12439 12440 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 12441 { 12442 struct intel_connector *connector; 12443 struct drm_connector_list_iter conn_iter; 12444 12445 drm_connector_list_iter_begin(dev, &conn_iter); 12446 for_each_intel_connector_iter(connector, &conn_iter) { 12447 if (connector->base.state->crtc) 12448 drm_connector_put(&connector->base); 12449 12450 if (connector->base.encoder) { 12451 connector->base.state->best_encoder = 12452 connector->base.encoder; 12453 connector->base.state->crtc = 12454 connector->base.encoder->crtc; 12455 12456 drm_connector_get(&connector->base); 12457 } else { 12458 connector->base.state->best_encoder = NULL; 12459 connector->base.state->crtc = NULL; 12460 } 12461 } 12462 drm_connector_list_iter_end(&conn_iter); 12463 } 12464 12465 static int 12466 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 12467 struct intel_crtc_state *pipe_config) 12468 { 12469 struct drm_connector *connector = conn_state->connector; 12470 const struct drm_display_info *info = &connector->display_info; 12471 int bpp; 12472 12473 switch (conn_state->max_bpc) { 12474 case 6 ... 7: 12475 bpp = 6 * 3; 12476 break; 12477 case 8 ... 9: 12478 bpp = 8 * 3; 12479 break; 12480 case 10 ... 11: 12481 bpp = 10 * 3; 12482 break; 12483 case 12: 12484 bpp = 12 * 3; 12485 break; 12486 default: 12487 return -EINVAL; 12488 } 12489 12490 if (bpp < pipe_config->pipe_bpp) { 12491 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of " 12492 "EDID bpp %d, requested bpp %d, max platform bpp %d\n", 12493 connector->base.id, connector->name, 12494 bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc, 12495 pipe_config->pipe_bpp); 12496 12497 pipe_config->pipe_bpp = bpp; 12498 } 12499 12500 return 0; 12501 } 12502 12503 static int 12504 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 12505 struct intel_crtc_state *pipe_config) 12506 { 12507 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12508 struct drm_atomic_state *state = pipe_config->uapi.state; 12509 struct drm_connector *connector; 12510 struct drm_connector_state *connector_state; 12511 int bpp, i; 12512 12513 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 12514 IS_CHERRYVIEW(dev_priv))) 12515 bpp = 10*3; 12516 else if (INTEL_GEN(dev_priv) >= 5) 12517 bpp = 12*3; 12518 else 12519 bpp = 8*3; 12520 12521 pipe_config->pipe_bpp = bpp; 12522 12523 /* Clamp display bpp to connector max bpp */ 12524 for_each_new_connector_in_state(state, connector, connector_state, i) { 12525 int ret; 12526 12527 if (connector_state->crtc != &crtc->base) 12528 continue; 12529 12530 ret = compute_sink_pipe_bpp(connector_state, pipe_config); 12531 if (ret) 12532 return ret; 12533 } 12534 12535 return 0; 12536 } 12537 12538 static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 12539 { 12540 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 12541 "type: 0x%x flags: 0x%x\n", 12542 mode->crtc_clock, 12543 mode->crtc_hdisplay, mode->crtc_hsync_start, 12544 mode->crtc_hsync_end, mode->crtc_htotal, 12545 mode->crtc_vdisplay, mode->crtc_vsync_start, 12546 mode->crtc_vsync_end, mode->crtc_vtotal, 12547 mode->type, mode->flags); 12548 } 12549 12550 static inline void 12551 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config, 12552 const char *id, unsigned int lane_count, 12553 const struct intel_link_m_n *m_n) 12554 { 12555 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 12556 id, lane_count, 12557 m_n->gmch_m, m_n->gmch_n, 12558 m_n->link_m, m_n->link_n, m_n->tu); 12559 } 12560 12561 static void 12562 intel_dump_infoframe(struct drm_i915_private *dev_priv, 12563 const union hdmi_infoframe *frame) 12564 { 12565 if (!drm_debug_enabled(DRM_UT_KMS)) 12566 return; 12567 12568 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame); 12569 } 12570 12571 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x 12572 12573 static const char * const output_type_str[] = { 12574 OUTPUT_TYPE(UNUSED), 12575 OUTPUT_TYPE(ANALOG), 12576 OUTPUT_TYPE(DVO), 12577 OUTPUT_TYPE(SDVO), 12578 OUTPUT_TYPE(LVDS), 12579 OUTPUT_TYPE(TVOUT), 12580 OUTPUT_TYPE(HDMI), 12581 OUTPUT_TYPE(DP), 12582 OUTPUT_TYPE(EDP), 12583 OUTPUT_TYPE(DSI), 12584 OUTPUT_TYPE(DDI), 12585 OUTPUT_TYPE(DP_MST), 12586 }; 12587 12588 #undef OUTPUT_TYPE 12589 12590 static void snprintf_output_types(char *buf, size_t len, 12591 unsigned int output_types) 12592 { 12593 char *str = buf; 12594 int i; 12595 12596 str[0] = '\0'; 12597 12598 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) { 12599 int r; 12600 12601 if ((output_types & BIT(i)) == 0) 12602 continue; 12603 12604 r = snprintf(str, len, "%s%s", 12605 str != buf ? "," : "", output_type_str[i]); 12606 if (r >= len) 12607 break; 12608 str += r; 12609 len -= r; 12610 12611 output_types &= ~BIT(i); 12612 } 12613 12614 WARN_ON_ONCE(output_types != 0); 12615 } 12616 12617 static const char * const output_format_str[] = { 12618 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid", 12619 [INTEL_OUTPUT_FORMAT_RGB] = "RGB", 12620 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0", 12621 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4", 12622 }; 12623 12624 static const char *output_formats(enum intel_output_format format) 12625 { 12626 if (format >= ARRAY_SIZE(output_format_str)) 12627 format = INTEL_OUTPUT_FORMAT_INVALID; 12628 return output_format_str[format]; 12629 } 12630 12631 static void intel_dump_plane_state(const struct intel_plane_state *plane_state) 12632 { 12633 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 12634 const struct drm_framebuffer *fb = plane_state->hw.fb; 12635 struct drm_format_name_buf format_name; 12636 12637 if (!fb) { 12638 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n", 12639 plane->base.base.id, plane->base.name, 12640 yesno(plane_state->uapi.visible)); 12641 return; 12642 } 12643 12644 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n", 12645 plane->base.base.id, plane->base.name, 12646 fb->base.id, fb->width, fb->height, 12647 drm_get_format_name(fb->format->format, &format_name), 12648 yesno(plane_state->uapi.visible)); 12649 DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n", 12650 plane_state->hw.rotation, plane_state->scaler_id); 12651 if (plane_state->uapi.visible) 12652 DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n", 12653 DRM_RECT_FP_ARG(&plane_state->uapi.src), 12654 DRM_RECT_ARG(&plane_state->uapi.dst)); 12655 } 12656 12657 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, 12658 struct intel_atomic_state *state, 12659 const char *context) 12660 { 12661 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 12662 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12663 const struct intel_plane_state *plane_state; 12664 struct intel_plane *plane; 12665 char buf[64]; 12666 int i; 12667 12668 DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n", 12669 crtc->base.base.id, crtc->base.name, 12670 yesno(pipe_config->hw.enable), context); 12671 12672 if (!pipe_config->hw.enable) 12673 goto dump_planes; 12674 12675 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types); 12676 DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n", 12677 yesno(pipe_config->hw.active), 12678 buf, pipe_config->output_types, 12679 output_formats(pipe_config->output_format)); 12680 12681 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", 12682 transcoder_name(pipe_config->cpu_transcoder), 12683 pipe_config->pipe_bpp, pipe_config->dither); 12684 12685 if (pipe_config->has_pch_encoder) 12686 intel_dump_m_n_config(pipe_config, "fdi", 12687 pipe_config->fdi_lanes, 12688 &pipe_config->fdi_m_n); 12689 12690 if (intel_crtc_has_dp_encoder(pipe_config)) { 12691 intel_dump_m_n_config(pipe_config, "dp m_n", 12692 pipe_config->lane_count, &pipe_config->dp_m_n); 12693 if (pipe_config->has_drrs) 12694 intel_dump_m_n_config(pipe_config, "dp m2_n2", 12695 pipe_config->lane_count, 12696 &pipe_config->dp_m2_n2); 12697 } 12698 12699 DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n", 12700 pipe_config->has_audio, pipe_config->has_infoframe, 12701 pipe_config->infoframes.enable); 12702 12703 if (pipe_config->infoframes.enable & 12704 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) 12705 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp); 12706 if (pipe_config->infoframes.enable & 12707 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) 12708 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi); 12709 if (pipe_config->infoframes.enable & 12710 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD)) 12711 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd); 12712 if (pipe_config->infoframes.enable & 12713 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR)) 12714 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi); 12715 12716 DRM_DEBUG_KMS("requested mode:\n"); 12717 drm_mode_debug_printmodeline(&pipe_config->hw.mode); 12718 DRM_DEBUG_KMS("adjusted mode:\n"); 12719 drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode); 12720 intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode); 12721 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n", 12722 pipe_config->port_clock, 12723 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 12724 pipe_config->pixel_rate); 12725 12726 if (INTEL_GEN(dev_priv) >= 9) 12727 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 12728 crtc->num_scalers, 12729 pipe_config->scaler_state.scaler_users, 12730 pipe_config->scaler_state.scaler_id); 12731 12732 if (HAS_GMCH(dev_priv)) 12733 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 12734 pipe_config->gmch_pfit.control, 12735 pipe_config->gmch_pfit.pgm_ratios, 12736 pipe_config->gmch_pfit.lvds_border_bits); 12737 else 12738 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n", 12739 pipe_config->pch_pfit.pos, 12740 pipe_config->pch_pfit.size, 12741 enableddisabled(pipe_config->pch_pfit.enabled), 12742 yesno(pipe_config->pch_pfit.force_thru)); 12743 12744 DRM_DEBUG_KMS("ips: %i, double wide: %i\n", 12745 pipe_config->ips_enabled, pipe_config->double_wide); 12746 12747 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state); 12748 12749 if (IS_CHERRYVIEW(dev_priv)) 12750 DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", 12751 pipe_config->cgm_mode, pipe_config->gamma_mode, 12752 pipe_config->gamma_enable, pipe_config->csc_enable); 12753 else 12754 DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", 12755 pipe_config->csc_mode, pipe_config->gamma_mode, 12756 pipe_config->gamma_enable, pipe_config->csc_enable); 12757 12758 dump_planes: 12759 if (!state) 12760 return; 12761 12762 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12763 if (plane->pipe == crtc->pipe) 12764 intel_dump_plane_state(plane_state); 12765 } 12766 } 12767 12768 static bool check_digital_port_conflicts(struct intel_atomic_state *state) 12769 { 12770 struct drm_device *dev = state->base.dev; 12771 struct drm_connector *connector; 12772 struct drm_connector_list_iter conn_iter; 12773 unsigned int used_ports = 0; 12774 unsigned int used_mst_ports = 0; 12775 bool ret = true; 12776 12777 /* 12778 * We're going to peek into connector->state, 12779 * hence connection_mutex must be held. 12780 */ 12781 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex); 12782 12783 /* 12784 * Walk the connector list instead of the encoder 12785 * list to detect the problem on ddi platforms 12786 * where there's just one encoder per digital port. 12787 */ 12788 drm_connector_list_iter_begin(dev, &conn_iter); 12789 drm_for_each_connector_iter(connector, &conn_iter) { 12790 struct drm_connector_state *connector_state; 12791 struct intel_encoder *encoder; 12792 12793 connector_state = 12794 drm_atomic_get_new_connector_state(&state->base, 12795 connector); 12796 if (!connector_state) 12797 connector_state = connector->state; 12798 12799 if (!connector_state->best_encoder) 12800 continue; 12801 12802 encoder = to_intel_encoder(connector_state->best_encoder); 12803 12804 WARN_ON(!connector_state->crtc); 12805 12806 switch (encoder->type) { 12807 unsigned int port_mask; 12808 case INTEL_OUTPUT_DDI: 12809 if (WARN_ON(!HAS_DDI(to_i915(dev)))) 12810 break; 12811 /* else, fall through */ 12812 case INTEL_OUTPUT_DP: 12813 case INTEL_OUTPUT_HDMI: 12814 case INTEL_OUTPUT_EDP: 12815 port_mask = 1 << encoder->port; 12816 12817 /* the same port mustn't appear more than once */ 12818 if (used_ports & port_mask) 12819 ret = false; 12820 12821 used_ports |= port_mask; 12822 break; 12823 case INTEL_OUTPUT_DP_MST: 12824 used_mst_ports |= 12825 1 << encoder->port; 12826 break; 12827 default: 12828 break; 12829 } 12830 } 12831 drm_connector_list_iter_end(&conn_iter); 12832 12833 /* can't mix MST and SST/HDMI on the same port */ 12834 if (used_ports & used_mst_ports) 12835 return false; 12836 12837 return ret; 12838 } 12839 12840 static void 12841 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state) 12842 { 12843 intel_crtc_copy_color_blobs(crtc_state); 12844 } 12845 12846 static void 12847 intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state) 12848 { 12849 crtc_state->hw.enable = crtc_state->uapi.enable; 12850 crtc_state->hw.active = crtc_state->uapi.active; 12851 crtc_state->hw.mode = crtc_state->uapi.mode; 12852 crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode; 12853 intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state); 12854 } 12855 12856 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state) 12857 { 12858 crtc_state->uapi.enable = crtc_state->hw.enable; 12859 crtc_state->uapi.active = crtc_state->hw.active; 12860 WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0); 12861 12862 crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode; 12863 12864 /* copy color blobs to uapi */ 12865 drm_property_replace_blob(&crtc_state->uapi.degamma_lut, 12866 crtc_state->hw.degamma_lut); 12867 drm_property_replace_blob(&crtc_state->uapi.gamma_lut, 12868 crtc_state->hw.gamma_lut); 12869 drm_property_replace_blob(&crtc_state->uapi.ctm, 12870 crtc_state->hw.ctm); 12871 } 12872 12873 static int 12874 intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state) 12875 { 12876 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 12877 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12878 struct intel_crtc_state *saved_state; 12879 12880 saved_state = intel_crtc_state_alloc(crtc); 12881 if (!saved_state) 12882 return -ENOMEM; 12883 12884 /* free the old crtc_state->hw members */ 12885 intel_crtc_free_hw_state(crtc_state); 12886 12887 /* FIXME: before the switch to atomic started, a new pipe_config was 12888 * kzalloc'd. Code that depends on any field being zero should be 12889 * fixed, so that the crtc_state can be safely duplicated. For now, 12890 * only fields that are know to not cause problems are preserved. */ 12891 12892 saved_state->uapi = crtc_state->uapi; 12893 saved_state->scaler_state = crtc_state->scaler_state; 12894 saved_state->shared_dpll = crtc_state->shared_dpll; 12895 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 12896 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 12897 sizeof(saved_state->icl_port_dplls)); 12898 saved_state->crc_enabled = crtc_state->crc_enabled; 12899 if (IS_G4X(dev_priv) || 12900 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 12901 saved_state->wm = crtc_state->wm; 12902 /* 12903 * Save the slave bitmask which gets filled for master crtc state during 12904 * slave atomic check call. 12905 */ 12906 if (is_trans_port_sync_master(crtc_state)) 12907 saved_state->sync_mode_slaves_mask = 12908 crtc_state->sync_mode_slaves_mask; 12909 12910 memcpy(crtc_state, saved_state, sizeof(*crtc_state)); 12911 kfree(saved_state); 12912 12913 intel_crtc_copy_uapi_to_hw_state(crtc_state); 12914 12915 return 0; 12916 } 12917 12918 static int 12919 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config) 12920 { 12921 struct drm_crtc *crtc = pipe_config->uapi.crtc; 12922 struct drm_atomic_state *state = pipe_config->uapi.state; 12923 struct intel_encoder *encoder; 12924 struct drm_connector *connector; 12925 struct drm_connector_state *connector_state; 12926 int base_bpp, ret; 12927 int i; 12928 bool retry = true; 12929 12930 pipe_config->cpu_transcoder = 12931 (enum transcoder) to_intel_crtc(crtc)->pipe; 12932 12933 /* 12934 * Sanitize sync polarity flags based on requested ones. If neither 12935 * positive or negative polarity is requested, treat this as meaning 12936 * negative polarity. 12937 */ 12938 if (!(pipe_config->hw.adjusted_mode.flags & 12939 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 12940 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 12941 12942 if (!(pipe_config->hw.adjusted_mode.flags & 12943 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 12944 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 12945 12946 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 12947 pipe_config); 12948 if (ret) 12949 return ret; 12950 12951 base_bpp = pipe_config->pipe_bpp; 12952 12953 /* 12954 * Determine the real pipe dimensions. Note that stereo modes can 12955 * increase the actual pipe size due to the frame doubling and 12956 * insertion of additional space for blanks between the frame. This 12957 * is stored in the crtc timings. We use the requested mode to do this 12958 * computation to clearly distinguish it from the adjusted mode, which 12959 * can be changed by the connectors in the below retry loop. 12960 */ 12961 drm_mode_get_hv_timing(&pipe_config->hw.mode, 12962 &pipe_config->pipe_src_w, 12963 &pipe_config->pipe_src_h); 12964 12965 for_each_new_connector_in_state(state, connector, connector_state, i) { 12966 if (connector_state->crtc != crtc) 12967 continue; 12968 12969 encoder = to_intel_encoder(connector_state->best_encoder); 12970 12971 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { 12972 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 12973 return -EINVAL; 12974 } 12975 12976 /* 12977 * Determine output_types before calling the .compute_config() 12978 * hooks so that the hooks can use this information safely. 12979 */ 12980 if (encoder->compute_output_type) 12981 pipe_config->output_types |= 12982 BIT(encoder->compute_output_type(encoder, pipe_config, 12983 connector_state)); 12984 else 12985 pipe_config->output_types |= BIT(encoder->type); 12986 } 12987 12988 encoder_retry: 12989 /* Ensure the port clock defaults are reset when retrying. */ 12990 pipe_config->port_clock = 0; 12991 pipe_config->pixel_multiplier = 1; 12992 12993 /* Fill in default crtc timings, allow encoders to overwrite them. */ 12994 drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode, 12995 CRTC_STEREO_DOUBLE); 12996 12997 /* Set the crtc_state defaults for trans_port_sync */ 12998 pipe_config->master_transcoder = INVALID_TRANSCODER; 12999 ret = icl_add_sync_mode_crtcs(pipe_config); 13000 if (ret) { 13001 DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n", 13002 ret); 13003 return ret; 13004 } 13005 13006 /* Pass our mode to the connectors and the CRTC to give them a chance to 13007 * adjust it according to limitations or connector properties, and also 13008 * a chance to reject the mode entirely. 13009 */ 13010 for_each_new_connector_in_state(state, connector, connector_state, i) { 13011 if (connector_state->crtc != crtc) 13012 continue; 13013 13014 encoder = to_intel_encoder(connector_state->best_encoder); 13015 ret = encoder->compute_config(encoder, pipe_config, 13016 connector_state); 13017 if (ret < 0) { 13018 if (ret != -EDEADLK) 13019 DRM_DEBUG_KMS("Encoder config failure: %d\n", 13020 ret); 13021 return ret; 13022 } 13023 } 13024 13025 /* Set default port clock if not overwritten by the encoder. Needs to be 13026 * done afterwards in case the encoder adjusts the mode. */ 13027 if (!pipe_config->port_clock) 13028 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock 13029 * pipe_config->pixel_multiplier; 13030 13031 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 13032 if (ret == -EDEADLK) 13033 return ret; 13034 if (ret < 0) { 13035 DRM_DEBUG_KMS("CRTC fixup failed\n"); 13036 return ret; 13037 } 13038 13039 if (ret == RETRY) { 13040 if (WARN(!retry, "loop in pipe configuration computation\n")) 13041 return -EINVAL; 13042 13043 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 13044 retry = false; 13045 goto encoder_retry; 13046 } 13047 13048 /* Dithering seems to not pass-through bits correctly when it should, so 13049 * only enable it on 6bpc panels and when its not a compliance 13050 * test requesting 6bpc video pattern. 13051 */ 13052 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) && 13053 !pipe_config->dither_force_disable; 13054 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 13055 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 13056 13057 /* 13058 * Make drm_calc_timestamping_constants in 13059 * drm_atomic_helper_update_legacy_modeset_state() happy 13060 */ 13061 pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode; 13062 13063 return 0; 13064 } 13065 13066 bool intel_fuzzy_clock_check(int clock1, int clock2) 13067 { 13068 int diff; 13069 13070 if (clock1 == clock2) 13071 return true; 13072 13073 if (!clock1 || !clock2) 13074 return false; 13075 13076 diff = abs(clock1 - clock2); 13077 13078 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 13079 return true; 13080 13081 return false; 13082 } 13083 13084 static bool 13085 intel_compare_m_n(unsigned int m, unsigned int n, 13086 unsigned int m2, unsigned int n2, 13087 bool exact) 13088 { 13089 if (m == m2 && n == n2) 13090 return true; 13091 13092 if (exact || !m || !n || !m2 || !n2) 13093 return false; 13094 13095 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 13096 13097 if (n > n2) { 13098 while (n > n2) { 13099 m2 <<= 1; 13100 n2 <<= 1; 13101 } 13102 } else if (n < n2) { 13103 while (n < n2) { 13104 m <<= 1; 13105 n <<= 1; 13106 } 13107 } 13108 13109 if (n != n2) 13110 return false; 13111 13112 return intel_fuzzy_clock_check(m, m2); 13113 } 13114 13115 static bool 13116 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 13117 const struct intel_link_m_n *m2_n2, 13118 bool exact) 13119 { 13120 return m_n->tu == m2_n2->tu && 13121 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, 13122 m2_n2->gmch_m, m2_n2->gmch_n, exact) && 13123 intel_compare_m_n(m_n->link_m, m_n->link_n, 13124 m2_n2->link_m, m2_n2->link_n, exact); 13125 } 13126 13127 static bool 13128 intel_compare_infoframe(const union hdmi_infoframe *a, 13129 const union hdmi_infoframe *b) 13130 { 13131 return memcmp(a, b, sizeof(*a)) == 0; 13132 } 13133 13134 static void 13135 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, 13136 bool fastset, const char *name, 13137 const union hdmi_infoframe *a, 13138 const union hdmi_infoframe *b) 13139 { 13140 if (fastset) { 13141 if (!drm_debug_enabled(DRM_UT_KMS)) 13142 return; 13143 13144 DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name); 13145 DRM_DEBUG_KMS("expected:\n"); 13146 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); 13147 DRM_DEBUG_KMS("found:\n"); 13148 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); 13149 } else { 13150 DRM_ERROR("mismatch in %s infoframe\n", name); 13151 DRM_ERROR("expected:\n"); 13152 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); 13153 DRM_ERROR("found:\n"); 13154 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); 13155 } 13156 } 13157 13158 static void __printf(4, 5) 13159 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, 13160 const char *name, const char *format, ...) 13161 { 13162 struct va_format vaf; 13163 va_list args; 13164 13165 va_start(args, format); 13166 vaf.fmt = format; 13167 vaf.va = &args; 13168 13169 if (fastset) 13170 DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n", 13171 crtc->base.base.id, crtc->base.name, name, &vaf); 13172 else 13173 DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n", 13174 crtc->base.base.id, crtc->base.name, name, &vaf); 13175 13176 va_end(args); 13177 } 13178 13179 static bool fastboot_enabled(struct drm_i915_private *dev_priv) 13180 { 13181 if (i915_modparams.fastboot != -1) 13182 return i915_modparams.fastboot; 13183 13184 /* Enable fastboot by default on Skylake and newer */ 13185 if (INTEL_GEN(dev_priv) >= 9) 13186 return true; 13187 13188 /* Enable fastboot by default on VLV and CHV */ 13189 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 13190 return true; 13191 13192 /* Disabled by default on all others */ 13193 return false; 13194 } 13195 13196 static bool 13197 intel_pipe_config_compare(const struct intel_crtc_state *current_config, 13198 const struct intel_crtc_state *pipe_config, 13199 bool fastset) 13200 { 13201 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev); 13202 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 13203 bool ret = true; 13204 u32 bp_gamma = 0; 13205 bool fixup_inherited = fastset && 13206 (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) && 13207 !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED); 13208 13209 if (fixup_inherited && !fastboot_enabled(dev_priv)) { 13210 DRM_DEBUG_KMS("initial modeset and fastboot not set\n"); 13211 ret = false; 13212 } 13213 13214 #define PIPE_CONF_CHECK_X(name) do { \ 13215 if (current_config->name != pipe_config->name) { \ 13216 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13217 "(expected 0x%08x, found 0x%08x)", \ 13218 current_config->name, \ 13219 pipe_config->name); \ 13220 ret = false; \ 13221 } \ 13222 } while (0) 13223 13224 #define PIPE_CONF_CHECK_I(name) do { \ 13225 if (current_config->name != pipe_config->name) { \ 13226 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13227 "(expected %i, found %i)", \ 13228 current_config->name, \ 13229 pipe_config->name); \ 13230 ret = false; \ 13231 } \ 13232 } while (0) 13233 13234 #define PIPE_CONF_CHECK_BOOL(name) do { \ 13235 if (current_config->name != pipe_config->name) { \ 13236 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13237 "(expected %s, found %s)", \ 13238 yesno(current_config->name), \ 13239 yesno(pipe_config->name)); \ 13240 ret = false; \ 13241 } \ 13242 } while (0) 13243 13244 /* 13245 * Checks state where we only read out the enabling, but not the entire 13246 * state itself (like full infoframes or ELD for audio). These states 13247 * require a full modeset on bootup to fix up. 13248 */ 13249 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \ 13250 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \ 13251 PIPE_CONF_CHECK_BOOL(name); \ 13252 } else { \ 13253 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13254 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \ 13255 yesno(current_config->name), \ 13256 yesno(pipe_config->name)); \ 13257 ret = false; \ 13258 } \ 13259 } while (0) 13260 13261 #define PIPE_CONF_CHECK_P(name) do { \ 13262 if (current_config->name != pipe_config->name) { \ 13263 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13264 "(expected %p, found %p)", \ 13265 current_config->name, \ 13266 pipe_config->name); \ 13267 ret = false; \ 13268 } \ 13269 } while (0) 13270 13271 #define PIPE_CONF_CHECK_M_N(name) do { \ 13272 if (!intel_compare_link_m_n(¤t_config->name, \ 13273 &pipe_config->name,\ 13274 !fastset)) { \ 13275 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13276 "(expected tu %i gmch %i/%i link %i/%i, " \ 13277 "found tu %i, gmch %i/%i link %i/%i)", \ 13278 current_config->name.tu, \ 13279 current_config->name.gmch_m, \ 13280 current_config->name.gmch_n, \ 13281 current_config->name.link_m, \ 13282 current_config->name.link_n, \ 13283 pipe_config->name.tu, \ 13284 pipe_config->name.gmch_m, \ 13285 pipe_config->name.gmch_n, \ 13286 pipe_config->name.link_m, \ 13287 pipe_config->name.link_n); \ 13288 ret = false; \ 13289 } \ 13290 } while (0) 13291 13292 /* This is required for BDW+ where there is only one set of registers for 13293 * switching between high and low RR. 13294 * This macro can be used whenever a comparison has to be made between one 13295 * hw state and multiple sw state variables. 13296 */ 13297 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \ 13298 if (!intel_compare_link_m_n(¤t_config->name, \ 13299 &pipe_config->name, !fastset) && \ 13300 !intel_compare_link_m_n(¤t_config->alt_name, \ 13301 &pipe_config->name, !fastset)) { \ 13302 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13303 "(expected tu %i gmch %i/%i link %i/%i, " \ 13304 "or tu %i gmch %i/%i link %i/%i, " \ 13305 "found tu %i, gmch %i/%i link %i/%i)", \ 13306 current_config->name.tu, \ 13307 current_config->name.gmch_m, \ 13308 current_config->name.gmch_n, \ 13309 current_config->name.link_m, \ 13310 current_config->name.link_n, \ 13311 current_config->alt_name.tu, \ 13312 current_config->alt_name.gmch_m, \ 13313 current_config->alt_name.gmch_n, \ 13314 current_config->alt_name.link_m, \ 13315 current_config->alt_name.link_n, \ 13316 pipe_config->name.tu, \ 13317 pipe_config->name.gmch_m, \ 13318 pipe_config->name.gmch_n, \ 13319 pipe_config->name.link_m, \ 13320 pipe_config->name.link_n); \ 13321 ret = false; \ 13322 } \ 13323 } while (0) 13324 13325 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 13326 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 13327 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13328 "(%x) (expected %i, found %i)", \ 13329 (mask), \ 13330 current_config->name & (mask), \ 13331 pipe_config->name & (mask)); \ 13332 ret = false; \ 13333 } \ 13334 } while (0) 13335 13336 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \ 13337 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 13338 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13339 "(expected %i, found %i)", \ 13340 current_config->name, \ 13341 pipe_config->name); \ 13342 ret = false; \ 13343 } \ 13344 } while (0) 13345 13346 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 13347 if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 13348 &pipe_config->infoframes.name)) { \ 13349 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \ 13350 ¤t_config->infoframes.name, \ 13351 &pipe_config->infoframes.name); \ 13352 ret = false; \ 13353 } \ 13354 } while (0) 13355 13356 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \ 13357 if (current_config->name1 != pipe_config->name1) { \ 13358 pipe_config_mismatch(fastset, crtc, __stringify(name1), \ 13359 "(expected %i, found %i, won't compare lut values)", \ 13360 current_config->name1, \ 13361 pipe_config->name1); \ 13362 ret = false;\ 13363 } else { \ 13364 if (!intel_color_lut_equal(current_config->name2, \ 13365 pipe_config->name2, pipe_config->name1, \ 13366 bit_precision)) { \ 13367 pipe_config_mismatch(fastset, crtc, __stringify(name2), \ 13368 "hw_state doesn't match sw_state"); \ 13369 ret = false; \ 13370 } \ 13371 } \ 13372 } while (0) 13373 13374 #define PIPE_CONF_QUIRK(quirk) \ 13375 ((current_config->quirks | pipe_config->quirks) & (quirk)) 13376 13377 PIPE_CONF_CHECK_I(cpu_transcoder); 13378 13379 PIPE_CONF_CHECK_BOOL(has_pch_encoder); 13380 PIPE_CONF_CHECK_I(fdi_lanes); 13381 PIPE_CONF_CHECK_M_N(fdi_m_n); 13382 13383 PIPE_CONF_CHECK_I(lane_count); 13384 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 13385 13386 if (INTEL_GEN(dev_priv) < 8) { 13387 PIPE_CONF_CHECK_M_N(dp_m_n); 13388 13389 if (current_config->has_drrs) 13390 PIPE_CONF_CHECK_M_N(dp_m2_n2); 13391 } else 13392 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 13393 13394 PIPE_CONF_CHECK_X(output_types); 13395 13396 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay); 13397 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal); 13398 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start); 13399 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end); 13400 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start); 13401 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end); 13402 13403 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay); 13404 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal); 13405 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start); 13406 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end); 13407 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start); 13408 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end); 13409 13410 PIPE_CONF_CHECK_I(pixel_multiplier); 13411 PIPE_CONF_CHECK_I(output_format); 13412 PIPE_CONF_CHECK_I(dc3co_exitline); 13413 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 13414 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 13415 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 13416 PIPE_CONF_CHECK_BOOL(limited_color_range); 13417 13418 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 13419 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 13420 PIPE_CONF_CHECK_BOOL(has_infoframe); 13421 PIPE_CONF_CHECK_BOOL(fec_enable); 13422 13423 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); 13424 13425 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13426 DRM_MODE_FLAG_INTERLACE); 13427 13428 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 13429 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13430 DRM_MODE_FLAG_PHSYNC); 13431 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13432 DRM_MODE_FLAG_NHSYNC); 13433 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13434 DRM_MODE_FLAG_PVSYNC); 13435 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13436 DRM_MODE_FLAG_NVSYNC); 13437 } 13438 13439 PIPE_CONF_CHECK_X(gmch_pfit.control); 13440 /* pfit ratios are autocomputed by the hw on gen4+ */ 13441 if (INTEL_GEN(dev_priv) < 4) 13442 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 13443 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 13444 13445 /* 13446 * Changing the EDP transcoder input mux 13447 * (A_ONOFF vs. A_ON) requires a full modeset. 13448 */ 13449 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 13450 13451 if (!fastset) { 13452 PIPE_CONF_CHECK_I(pipe_src_w); 13453 PIPE_CONF_CHECK_I(pipe_src_h); 13454 13455 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 13456 if (current_config->pch_pfit.enabled) { 13457 PIPE_CONF_CHECK_X(pch_pfit.pos); 13458 PIPE_CONF_CHECK_X(pch_pfit.size); 13459 } 13460 13461 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 13462 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); 13463 13464 PIPE_CONF_CHECK_X(gamma_mode); 13465 if (IS_CHERRYVIEW(dev_priv)) 13466 PIPE_CONF_CHECK_X(cgm_mode); 13467 else 13468 PIPE_CONF_CHECK_X(csc_mode); 13469 PIPE_CONF_CHECK_BOOL(gamma_enable); 13470 PIPE_CONF_CHECK_BOOL(csc_enable); 13471 13472 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config); 13473 if (bp_gamma) 13474 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma); 13475 13476 } 13477 13478 PIPE_CONF_CHECK_BOOL(double_wide); 13479 13480 PIPE_CONF_CHECK_P(shared_dpll); 13481 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 13482 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 13483 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 13484 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 13485 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 13486 PIPE_CONF_CHECK_X(dpll_hw_state.spll); 13487 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 13488 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 13489 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 13490 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0); 13491 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0); 13492 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4); 13493 PIPE_CONF_CHECK_X(dpll_hw_state.pll0); 13494 PIPE_CONF_CHECK_X(dpll_hw_state.pll1); 13495 PIPE_CONF_CHECK_X(dpll_hw_state.pll2); 13496 PIPE_CONF_CHECK_X(dpll_hw_state.pll3); 13497 PIPE_CONF_CHECK_X(dpll_hw_state.pll6); 13498 PIPE_CONF_CHECK_X(dpll_hw_state.pll8); 13499 PIPE_CONF_CHECK_X(dpll_hw_state.pll9); 13500 PIPE_CONF_CHECK_X(dpll_hw_state.pll10); 13501 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12); 13502 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl); 13503 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1); 13504 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl); 13505 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0); 13506 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1); 13507 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf); 13508 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock); 13509 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc); 13510 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias); 13511 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); 13512 13513 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 13514 PIPE_CONF_CHECK_X(dsi_pll.div); 13515 13516 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) 13517 PIPE_CONF_CHECK_I(pipe_bpp); 13518 13519 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); 13520 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 13521 13522 PIPE_CONF_CHECK_I(min_voltage_level); 13523 13524 PIPE_CONF_CHECK_X(infoframes.enable); 13525 PIPE_CONF_CHECK_X(infoframes.gcp); 13526 PIPE_CONF_CHECK_INFOFRAME(avi); 13527 PIPE_CONF_CHECK_INFOFRAME(spd); 13528 PIPE_CONF_CHECK_INFOFRAME(hdmi); 13529 PIPE_CONF_CHECK_INFOFRAME(drm); 13530 13531 PIPE_CONF_CHECK_I(sync_mode_slaves_mask); 13532 PIPE_CONF_CHECK_I(master_transcoder); 13533 13534 PIPE_CONF_CHECK_I(dsc.compression_enable); 13535 PIPE_CONF_CHECK_I(dsc.dsc_split); 13536 PIPE_CONF_CHECK_I(dsc.compressed_bpp); 13537 13538 #undef PIPE_CONF_CHECK_X 13539 #undef PIPE_CONF_CHECK_I 13540 #undef PIPE_CONF_CHECK_BOOL 13541 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE 13542 #undef PIPE_CONF_CHECK_P 13543 #undef PIPE_CONF_CHECK_FLAGS 13544 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 13545 #undef PIPE_CONF_CHECK_COLOR_LUT 13546 #undef PIPE_CONF_QUIRK 13547 13548 return ret; 13549 } 13550 13551 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, 13552 const struct intel_crtc_state *pipe_config) 13553 { 13554 if (pipe_config->has_pch_encoder) { 13555 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 13556 &pipe_config->fdi_m_n); 13557 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock; 13558 13559 /* 13560 * FDI already provided one idea for the dotclock. 13561 * Yell if the encoder disagrees. 13562 */ 13563 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock), 13564 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 13565 fdi_dotclock, dotclock); 13566 } 13567 } 13568 13569 static void verify_wm_state(struct intel_crtc *crtc, 13570 struct intel_crtc_state *new_crtc_state) 13571 { 13572 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13573 struct skl_hw_state { 13574 struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; 13575 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES]; 13576 struct skl_ddb_allocation ddb; 13577 struct skl_pipe_wm wm; 13578 } *hw; 13579 struct skl_ddb_allocation *sw_ddb; 13580 struct skl_pipe_wm *sw_wm; 13581 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; 13582 const enum pipe pipe = crtc->pipe; 13583 int plane, level, max_level = ilk_wm_max_level(dev_priv); 13584 13585 if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active) 13586 return; 13587 13588 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 13589 if (!hw) 13590 return; 13591 13592 skl_pipe_wm_get_hw_state(crtc, &hw->wm); 13593 sw_wm = &new_crtc_state->wm.skl.optimal; 13594 13595 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv); 13596 13597 skl_ddb_get_hw_state(dev_priv, &hw->ddb); 13598 sw_ddb = &dev_priv->wm.skl_hw.ddb; 13599 13600 if (INTEL_GEN(dev_priv) >= 11 && 13601 hw->ddb.enabled_slices != sw_ddb->enabled_slices) 13602 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n", 13603 sw_ddb->enabled_slices, 13604 hw->ddb.enabled_slices); 13605 13606 /* planes */ 13607 for_each_universal_plane(dev_priv, pipe, plane) { 13608 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 13609 13610 hw_plane_wm = &hw->wm.planes[plane]; 13611 sw_plane_wm = &sw_wm->planes[plane]; 13612 13613 /* Watermarks */ 13614 for (level = 0; level <= max_level; level++) { 13615 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 13616 &sw_plane_wm->wm[level])) 13617 continue; 13618 13619 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 13620 pipe_name(pipe), plane + 1, level, 13621 sw_plane_wm->wm[level].plane_en, 13622 sw_plane_wm->wm[level].plane_res_b, 13623 sw_plane_wm->wm[level].plane_res_l, 13624 hw_plane_wm->wm[level].plane_en, 13625 hw_plane_wm->wm[level].plane_res_b, 13626 hw_plane_wm->wm[level].plane_res_l); 13627 } 13628 13629 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 13630 &sw_plane_wm->trans_wm)) { 13631 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 13632 pipe_name(pipe), plane + 1, 13633 sw_plane_wm->trans_wm.plane_en, 13634 sw_plane_wm->trans_wm.plane_res_b, 13635 sw_plane_wm->trans_wm.plane_res_l, 13636 hw_plane_wm->trans_wm.plane_en, 13637 hw_plane_wm->trans_wm.plane_res_b, 13638 hw_plane_wm->trans_wm.plane_res_l); 13639 } 13640 13641 /* DDB */ 13642 hw_ddb_entry = &hw->ddb_y[plane]; 13643 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane]; 13644 13645 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 13646 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", 13647 pipe_name(pipe), plane + 1, 13648 sw_ddb_entry->start, sw_ddb_entry->end, 13649 hw_ddb_entry->start, hw_ddb_entry->end); 13650 } 13651 } 13652 13653 /* 13654 * cursor 13655 * If the cursor plane isn't active, we may not have updated it's ddb 13656 * allocation. In that case since the ddb allocation will be updated 13657 * once the plane becomes visible, we can skip this check 13658 */ 13659 if (1) { 13660 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 13661 13662 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR]; 13663 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR]; 13664 13665 /* Watermarks */ 13666 for (level = 0; level <= max_level; level++) { 13667 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 13668 &sw_plane_wm->wm[level])) 13669 continue; 13670 13671 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 13672 pipe_name(pipe), level, 13673 sw_plane_wm->wm[level].plane_en, 13674 sw_plane_wm->wm[level].plane_res_b, 13675 sw_plane_wm->wm[level].plane_res_l, 13676 hw_plane_wm->wm[level].plane_en, 13677 hw_plane_wm->wm[level].plane_res_b, 13678 hw_plane_wm->wm[level].plane_res_l); 13679 } 13680 13681 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 13682 &sw_plane_wm->trans_wm)) { 13683 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 13684 pipe_name(pipe), 13685 sw_plane_wm->trans_wm.plane_en, 13686 sw_plane_wm->trans_wm.plane_res_b, 13687 sw_plane_wm->trans_wm.plane_res_l, 13688 hw_plane_wm->trans_wm.plane_en, 13689 hw_plane_wm->trans_wm.plane_res_b, 13690 hw_plane_wm->trans_wm.plane_res_l); 13691 } 13692 13693 /* DDB */ 13694 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR]; 13695 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; 13696 13697 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 13698 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", 13699 pipe_name(pipe), 13700 sw_ddb_entry->start, sw_ddb_entry->end, 13701 hw_ddb_entry->start, hw_ddb_entry->end); 13702 } 13703 } 13704 13705 kfree(hw); 13706 } 13707 13708 static void 13709 verify_connector_state(struct intel_atomic_state *state, 13710 struct intel_crtc *crtc) 13711 { 13712 struct drm_connector *connector; 13713 struct drm_connector_state *new_conn_state; 13714 int i; 13715 13716 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { 13717 struct drm_encoder *encoder = connector->encoder; 13718 struct intel_crtc_state *crtc_state = NULL; 13719 13720 if (new_conn_state->crtc != &crtc->base) 13721 continue; 13722 13723 if (crtc) 13724 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 13725 13726 intel_connector_verify_state(crtc_state, new_conn_state); 13727 13728 I915_STATE_WARN(new_conn_state->best_encoder != encoder, 13729 "connector's atomic encoder doesn't match legacy encoder\n"); 13730 } 13731 } 13732 13733 static void 13734 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state) 13735 { 13736 struct intel_encoder *encoder; 13737 struct drm_connector *connector; 13738 struct drm_connector_state *old_conn_state, *new_conn_state; 13739 int i; 13740 13741 for_each_intel_encoder(&dev_priv->drm, encoder) { 13742 bool enabled = false, found = false; 13743 enum pipe pipe; 13744 13745 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 13746 encoder->base.base.id, 13747 encoder->base.name); 13748 13749 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state, 13750 new_conn_state, i) { 13751 if (old_conn_state->best_encoder == &encoder->base) 13752 found = true; 13753 13754 if (new_conn_state->best_encoder != &encoder->base) 13755 continue; 13756 found = enabled = true; 13757 13758 I915_STATE_WARN(new_conn_state->crtc != 13759 encoder->base.crtc, 13760 "connector's crtc doesn't match encoder crtc\n"); 13761 } 13762 13763 if (!found) 13764 continue; 13765 13766 I915_STATE_WARN(!!encoder->base.crtc != enabled, 13767 "encoder's enabled state mismatch " 13768 "(expected %i, found %i)\n", 13769 !!encoder->base.crtc, enabled); 13770 13771 if (!encoder->base.crtc) { 13772 bool active; 13773 13774 active = encoder->get_hw_state(encoder, &pipe); 13775 I915_STATE_WARN(active, 13776 "encoder detached but still enabled on pipe %c.\n", 13777 pipe_name(pipe)); 13778 } 13779 } 13780 } 13781 13782 static void 13783 verify_crtc_state(struct intel_crtc *crtc, 13784 struct intel_crtc_state *old_crtc_state, 13785 struct intel_crtc_state *new_crtc_state) 13786 { 13787 struct drm_device *dev = crtc->base.dev; 13788 struct drm_i915_private *dev_priv = to_i915(dev); 13789 struct intel_encoder *encoder; 13790 struct intel_crtc_state *pipe_config = old_crtc_state; 13791 struct drm_atomic_state *state = old_crtc_state->uapi.state; 13792 bool active; 13793 13794 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi); 13795 intel_crtc_free_hw_state(old_crtc_state); 13796 intel_crtc_state_reset(old_crtc_state, crtc); 13797 old_crtc_state->uapi.state = state; 13798 13799 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name); 13800 13801 active = dev_priv->display.get_pipe_config(crtc, pipe_config); 13802 13803 /* we keep both pipes enabled on 830 */ 13804 if (IS_I830(dev_priv)) 13805 active = new_crtc_state->hw.active; 13806 13807 I915_STATE_WARN(new_crtc_state->hw.active != active, 13808 "crtc active state doesn't match with hw state " 13809 "(expected %i, found %i)\n", 13810 new_crtc_state->hw.active, active); 13811 13812 I915_STATE_WARN(crtc->active != new_crtc_state->hw.active, 13813 "transitional active state does not match atomic hw state " 13814 "(expected %i, found %i)\n", 13815 new_crtc_state->hw.active, crtc->active); 13816 13817 for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 13818 enum pipe pipe; 13819 13820 active = encoder->get_hw_state(encoder, &pipe); 13821 I915_STATE_WARN(active != new_crtc_state->hw.active, 13822 "[ENCODER:%i] active %i with crtc active %i\n", 13823 encoder->base.base.id, active, 13824 new_crtc_state->hw.active); 13825 13826 I915_STATE_WARN(active && crtc->pipe != pipe, 13827 "Encoder connected to wrong pipe %c\n", 13828 pipe_name(pipe)); 13829 13830 if (active) 13831 encoder->get_config(encoder, pipe_config); 13832 } 13833 13834 intel_crtc_compute_pixel_rate(pipe_config); 13835 13836 if (!new_crtc_state->hw.active) 13837 return; 13838 13839 intel_pipe_config_sanity_check(dev_priv, pipe_config); 13840 13841 if (!intel_pipe_config_compare(new_crtc_state, 13842 pipe_config, false)) { 13843 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 13844 intel_dump_pipe_config(pipe_config, NULL, "[hw state]"); 13845 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]"); 13846 } 13847 } 13848 13849 static void 13850 intel_verify_planes(struct intel_atomic_state *state) 13851 { 13852 struct intel_plane *plane; 13853 const struct intel_plane_state *plane_state; 13854 int i; 13855 13856 for_each_new_intel_plane_in_state(state, plane, 13857 plane_state, i) 13858 assert_plane(plane, plane_state->planar_slave || 13859 plane_state->uapi.visible); 13860 } 13861 13862 static void 13863 verify_single_dpll_state(struct drm_i915_private *dev_priv, 13864 struct intel_shared_dpll *pll, 13865 struct intel_crtc *crtc, 13866 struct intel_crtc_state *new_crtc_state) 13867 { 13868 struct intel_dpll_hw_state dpll_hw_state; 13869 unsigned int crtc_mask; 13870 bool active; 13871 13872 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 13873 13874 DRM_DEBUG_KMS("%s\n", pll->info->name); 13875 13876 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state); 13877 13878 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) { 13879 I915_STATE_WARN(!pll->on && pll->active_mask, 13880 "pll in active use but not on in sw tracking\n"); 13881 I915_STATE_WARN(pll->on && !pll->active_mask, 13882 "pll is on but not used by any active crtc\n"); 13883 I915_STATE_WARN(pll->on != active, 13884 "pll on state mismatch (expected %i, found %i)\n", 13885 pll->on, active); 13886 } 13887 13888 if (!crtc) { 13889 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask, 13890 "more active pll users than references: %x vs %x\n", 13891 pll->active_mask, pll->state.crtc_mask); 13892 13893 return; 13894 } 13895 13896 crtc_mask = drm_crtc_mask(&crtc->base); 13897 13898 if (new_crtc_state->hw.active) 13899 I915_STATE_WARN(!(pll->active_mask & crtc_mask), 13900 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", 13901 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); 13902 else 13903 I915_STATE_WARN(pll->active_mask & crtc_mask, 13904 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", 13905 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); 13906 13907 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), 13908 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", 13909 crtc_mask, pll->state.crtc_mask); 13910 13911 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state, 13912 &dpll_hw_state, 13913 sizeof(dpll_hw_state)), 13914 "pll hw state mismatch\n"); 13915 } 13916 13917 static void 13918 verify_shared_dpll_state(struct intel_crtc *crtc, 13919 struct intel_crtc_state *old_crtc_state, 13920 struct intel_crtc_state *new_crtc_state) 13921 { 13922 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13923 13924 if (new_crtc_state->shared_dpll) 13925 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state); 13926 13927 if (old_crtc_state->shared_dpll && 13928 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) { 13929 unsigned int crtc_mask = drm_crtc_mask(&crtc->base); 13930 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll; 13931 13932 I915_STATE_WARN(pll->active_mask & crtc_mask, 13933 "pll active mismatch (didn't expect pipe %c in active mask)\n", 13934 pipe_name(drm_crtc_index(&crtc->base))); 13935 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, 13936 "pll enabled crtcs mismatch (found %x in enabled mask)\n", 13937 pipe_name(drm_crtc_index(&crtc->base))); 13938 } 13939 } 13940 13941 static void 13942 intel_modeset_verify_crtc(struct intel_crtc *crtc, 13943 struct intel_atomic_state *state, 13944 struct intel_crtc_state *old_crtc_state, 13945 struct intel_crtc_state *new_crtc_state) 13946 { 13947 if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) 13948 return; 13949 13950 verify_wm_state(crtc, new_crtc_state); 13951 verify_connector_state(state, crtc); 13952 verify_crtc_state(crtc, old_crtc_state, new_crtc_state); 13953 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state); 13954 } 13955 13956 static void 13957 verify_disabled_dpll_state(struct drm_i915_private *dev_priv) 13958 { 13959 int i; 13960 13961 for (i = 0; i < dev_priv->num_shared_dpll; i++) 13962 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL); 13963 } 13964 13965 static void 13966 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv, 13967 struct intel_atomic_state *state) 13968 { 13969 verify_encoder_state(dev_priv, state); 13970 verify_connector_state(state, NULL); 13971 verify_disabled_dpll_state(dev_priv); 13972 } 13973 13974 static void 13975 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) 13976 { 13977 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 13978 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13979 const struct drm_display_mode *adjusted_mode = 13980 &crtc_state->hw.adjusted_mode; 13981 13982 drm_calc_timestamping_constants(&crtc->base, adjusted_mode); 13983 13984 /* 13985 * The scanline counter increments at the leading edge of hsync. 13986 * 13987 * On most platforms it starts counting from vtotal-1 on the 13988 * first active line. That means the scanline counter value is 13989 * always one less than what we would expect. Ie. just after 13990 * start of vblank, which also occurs at start of hsync (on the 13991 * last active line), the scanline counter will read vblank_start-1. 13992 * 13993 * On gen2 the scanline counter starts counting from 1 instead 13994 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 13995 * to keep the value positive), instead of adding one. 13996 * 13997 * On HSW+ the behaviour of the scanline counter depends on the output 13998 * type. For DP ports it behaves like most other platforms, but on HDMI 13999 * there's an extra 1 line difference. So we need to add two instead of 14000 * one to the value. 14001 * 14002 * On VLV/CHV DSI the scanline counter would appear to increment 14003 * approx. 1/3 of a scanline before start of vblank. Unfortunately 14004 * that means we can't tell whether we're in vblank or not while 14005 * we're on that particular line. We must still set scanline_offset 14006 * to 1 so that the vblank timestamps come out correct when we query 14007 * the scanline counter from within the vblank interrupt handler. 14008 * However if queried just before the start of vblank we'll get an 14009 * answer that's slightly in the future. 14010 */ 14011 if (IS_GEN(dev_priv, 2)) { 14012 int vtotal; 14013 14014 vtotal = adjusted_mode->crtc_vtotal; 14015 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 14016 vtotal /= 2; 14017 14018 crtc->scanline_offset = vtotal - 1; 14019 } else if (HAS_DDI(dev_priv) && 14020 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 14021 crtc->scanline_offset = 2; 14022 } else { 14023 crtc->scanline_offset = 1; 14024 } 14025 } 14026 14027 static void intel_modeset_clear_plls(struct intel_atomic_state *state) 14028 { 14029 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14030 struct intel_crtc_state *new_crtc_state; 14031 struct intel_crtc *crtc; 14032 int i; 14033 14034 if (!dev_priv->display.crtc_compute_clock) 14035 return; 14036 14037 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14038 if (!needs_modeset(new_crtc_state)) 14039 continue; 14040 14041 intel_release_shared_dplls(state, crtc); 14042 } 14043 } 14044 14045 /* 14046 * This implements the workaround described in the "notes" section of the mode 14047 * set sequence documentation. When going from no pipes or single pipe to 14048 * multiple pipes, and planes are enabled after the pipe, we need to wait at 14049 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 14050 */ 14051 static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state) 14052 { 14053 struct intel_crtc_state *crtc_state; 14054 struct intel_crtc *crtc; 14055 struct intel_crtc_state *first_crtc_state = NULL; 14056 struct intel_crtc_state *other_crtc_state = NULL; 14057 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 14058 int i; 14059 14060 /* look at all crtc's that are going to be enabled in during modeset */ 14061 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 14062 if (!crtc_state->hw.active || 14063 !needs_modeset(crtc_state)) 14064 continue; 14065 14066 if (first_crtc_state) { 14067 other_crtc_state = crtc_state; 14068 break; 14069 } else { 14070 first_crtc_state = crtc_state; 14071 first_pipe = crtc->pipe; 14072 } 14073 } 14074 14075 /* No workaround needed? */ 14076 if (!first_crtc_state) 14077 return 0; 14078 14079 /* w/a possibly needed, check how many crtc's are already enabled. */ 14080 for_each_intel_crtc(state->base.dev, crtc) { 14081 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 14082 if (IS_ERR(crtc_state)) 14083 return PTR_ERR(crtc_state); 14084 14085 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 14086 14087 if (!crtc_state->hw.active || 14088 needs_modeset(crtc_state)) 14089 continue; 14090 14091 /* 2 or more enabled crtcs means no need for w/a */ 14092 if (enabled_pipe != INVALID_PIPE) 14093 return 0; 14094 14095 enabled_pipe = crtc->pipe; 14096 } 14097 14098 if (enabled_pipe != INVALID_PIPE) 14099 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 14100 else if (other_crtc_state) 14101 other_crtc_state->hsw_workaround_pipe = first_pipe; 14102 14103 return 0; 14104 } 14105 14106 static int intel_modeset_checks(struct intel_atomic_state *state) 14107 { 14108 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14109 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 14110 struct intel_crtc *crtc; 14111 int ret, i; 14112 14113 /* keep the current setting */ 14114 if (!state->cdclk.force_min_cdclk_changed) 14115 state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk; 14116 14117 state->modeset = true; 14118 state->active_pipes = dev_priv->active_pipes; 14119 state->cdclk.logical = dev_priv->cdclk.logical; 14120 state->cdclk.actual = dev_priv->cdclk.actual; 14121 14122 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14123 new_crtc_state, i) { 14124 if (new_crtc_state->hw.active) 14125 state->active_pipes |= BIT(crtc->pipe); 14126 else 14127 state->active_pipes &= ~BIT(crtc->pipe); 14128 14129 if (old_crtc_state->hw.active != new_crtc_state->hw.active) 14130 state->active_pipe_changes |= BIT(crtc->pipe); 14131 } 14132 14133 if (state->active_pipe_changes) { 14134 ret = intel_atomic_lock_global_state(state); 14135 if (ret) 14136 return ret; 14137 } 14138 14139 ret = intel_modeset_calc_cdclk(state); 14140 if (ret) 14141 return ret; 14142 14143 intel_modeset_clear_plls(state); 14144 14145 if (IS_HASWELL(dev_priv)) 14146 return haswell_mode_set_planes_workaround(state); 14147 14148 return 0; 14149 } 14150 14151 /* 14152 * Handle calculation of various watermark data at the end of the atomic check 14153 * phase. The code here should be run after the per-crtc and per-plane 'check' 14154 * handlers to ensure that all derived state has been updated. 14155 */ 14156 static int calc_watermark_data(struct intel_atomic_state *state) 14157 { 14158 struct drm_device *dev = state->base.dev; 14159 struct drm_i915_private *dev_priv = to_i915(dev); 14160 14161 /* Is there platform-specific watermark information to calculate? */ 14162 if (dev_priv->display.compute_global_watermarks) 14163 return dev_priv->display.compute_global_watermarks(state); 14164 14165 return 0; 14166 } 14167 14168 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 14169 struct intel_crtc_state *new_crtc_state) 14170 { 14171 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) 14172 return; 14173 14174 new_crtc_state->uapi.mode_changed = false; 14175 new_crtc_state->update_pipe = true; 14176 14177 /* 14178 * If we're not doing the full modeset we want to 14179 * keep the current M/N values as they may be 14180 * sufficiently different to the computed values 14181 * to cause problems. 14182 * 14183 * FIXME: should really copy more fuzzy state here 14184 */ 14185 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n; 14186 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n; 14187 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2; 14188 new_crtc_state->has_drrs = old_crtc_state->has_drrs; 14189 } 14190 14191 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, 14192 struct intel_crtc *crtc, 14193 u8 plane_ids_mask) 14194 { 14195 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14196 struct intel_plane *plane; 14197 14198 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 14199 struct intel_plane_state *plane_state; 14200 14201 if ((plane_ids_mask & BIT(plane->id)) == 0) 14202 continue; 14203 14204 plane_state = intel_atomic_get_plane_state(state, plane); 14205 if (IS_ERR(plane_state)) 14206 return PTR_ERR(plane_state); 14207 } 14208 14209 return 0; 14210 } 14211 14212 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) 14213 { 14214 /* See {hsw,vlv,ivb}_plane_ratio() */ 14215 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) || 14216 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) || 14217 IS_IVYBRIDGE(dev_priv); 14218 } 14219 14220 static int intel_atomic_check_planes(struct intel_atomic_state *state, 14221 bool *need_modeset) 14222 { 14223 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14224 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 14225 struct intel_plane_state *plane_state; 14226 struct intel_plane *plane; 14227 struct intel_crtc *crtc; 14228 int i, ret; 14229 14230 ret = icl_add_linked_planes(state); 14231 if (ret) 14232 return ret; 14233 14234 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 14235 ret = intel_plane_atomic_check(state, plane); 14236 if (ret) { 14237 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n", 14238 plane->base.base.id, plane->base.name); 14239 return ret; 14240 } 14241 } 14242 14243 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14244 new_crtc_state, i) { 14245 u8 old_active_planes, new_active_planes; 14246 14247 ret = icl_check_nv12_planes(new_crtc_state); 14248 if (ret) 14249 return ret; 14250 14251 /* 14252 * On some platforms the number of active planes affects 14253 * the planes' minimum cdclk calculation. Add such planes 14254 * to the state before we compute the minimum cdclk. 14255 */ 14256 if (!active_planes_affects_min_cdclk(dev_priv)) 14257 continue; 14258 14259 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 14260 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 14261 14262 if (hweight8(old_active_planes) == hweight8(new_active_planes)) 14263 continue; 14264 14265 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes); 14266 if (ret) 14267 return ret; 14268 } 14269 14270 /* 14271 * active_planes bitmask has been updated, and potentially 14272 * affected planes are part of the state. We can now 14273 * compute the minimum cdclk for each plane. 14274 */ 14275 for_each_new_intel_plane_in_state(state, plane, plane_state, i) 14276 *need_modeset |= intel_plane_calc_min_cdclk(state, plane); 14277 14278 return 0; 14279 } 14280 14281 static int intel_atomic_check_crtcs(struct intel_atomic_state *state) 14282 { 14283 struct intel_crtc_state *crtc_state; 14284 struct intel_crtc *crtc; 14285 int i; 14286 14287 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 14288 int ret = intel_crtc_atomic_check(state, crtc); 14289 if (ret) { 14290 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", 14291 crtc->base.base.id, crtc->base.name); 14292 return ret; 14293 } 14294 } 14295 14296 return 0; 14297 } 14298 14299 /** 14300 * intel_atomic_check - validate state object 14301 * @dev: drm device 14302 * @_state: state to validate 14303 */ 14304 static int intel_atomic_check(struct drm_device *dev, 14305 struct drm_atomic_state *_state) 14306 { 14307 struct drm_i915_private *dev_priv = to_i915(dev); 14308 struct intel_atomic_state *state = to_intel_atomic_state(_state); 14309 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 14310 struct intel_crtc *crtc; 14311 int ret, i; 14312 bool any_ms = false; 14313 14314 /* Catch I915_MODE_FLAG_INHERITED */ 14315 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14316 new_crtc_state, i) { 14317 if (new_crtc_state->hw.mode.private_flags != 14318 old_crtc_state->hw.mode.private_flags) 14319 new_crtc_state->uapi.mode_changed = true; 14320 } 14321 14322 ret = drm_atomic_helper_check_modeset(dev, &state->base); 14323 if (ret) 14324 goto fail; 14325 14326 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14327 new_crtc_state, i) { 14328 if (!needs_modeset(new_crtc_state)) { 14329 /* Light copy */ 14330 intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state); 14331 14332 continue; 14333 } 14334 14335 if (!new_crtc_state->uapi.enable) { 14336 intel_crtc_copy_uapi_to_hw_state(new_crtc_state); 14337 14338 any_ms = true; 14339 continue; 14340 } 14341 14342 ret = intel_crtc_prepare_cleared_state(new_crtc_state); 14343 if (ret) 14344 goto fail; 14345 14346 ret = intel_modeset_pipe_config(new_crtc_state); 14347 if (ret) 14348 goto fail; 14349 14350 intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 14351 14352 if (needs_modeset(new_crtc_state)) 14353 any_ms = true; 14354 } 14355 14356 if (any_ms && !check_digital_port_conflicts(state)) { 14357 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); 14358 ret = EINVAL; 14359 goto fail; 14360 } 14361 14362 ret = drm_dp_mst_atomic_check(&state->base); 14363 if (ret) 14364 goto fail; 14365 14366 any_ms |= state->cdclk.force_min_cdclk_changed; 14367 14368 ret = intel_atomic_check_planes(state, &any_ms); 14369 if (ret) 14370 goto fail; 14371 14372 if (any_ms) { 14373 ret = intel_modeset_checks(state); 14374 if (ret) 14375 goto fail; 14376 } else { 14377 state->cdclk.logical = dev_priv->cdclk.logical; 14378 } 14379 14380 ret = intel_atomic_check_crtcs(state); 14381 if (ret) 14382 goto fail; 14383 14384 intel_fbc_choose_crtc(dev_priv, state); 14385 ret = calc_watermark_data(state); 14386 if (ret) 14387 goto fail; 14388 14389 ret = intel_bw_atomic_check(state); 14390 if (ret) 14391 goto fail; 14392 14393 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14394 new_crtc_state, i) { 14395 if (!needs_modeset(new_crtc_state) && 14396 !new_crtc_state->update_pipe) 14397 continue; 14398 14399 intel_dump_pipe_config(new_crtc_state, state, 14400 needs_modeset(new_crtc_state) ? 14401 "[modeset]" : "[fastset]"); 14402 } 14403 14404 return 0; 14405 14406 fail: 14407 if (ret == -EDEADLK) 14408 return ret; 14409 14410 /* 14411 * FIXME would probably be nice to know which crtc specifically 14412 * caused the failure, in cases where we can pinpoint it. 14413 */ 14414 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14415 new_crtc_state, i) 14416 intel_dump_pipe_config(new_crtc_state, state, "[failed]"); 14417 14418 return ret; 14419 } 14420 14421 static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 14422 { 14423 return drm_atomic_helper_prepare_planes(state->base.dev, 14424 &state->base); 14425 } 14426 14427 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) 14428 { 14429 struct drm_device *dev = crtc->base.dev; 14430 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)]; 14431 14432 if (!vblank->max_vblank_count) 14433 return (u32)drm_crtc_accurate_vblank_count(&crtc->base); 14434 14435 return crtc->base.funcs->get_vblank_counter(&crtc->base); 14436 } 14437 14438 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 14439 struct intel_crtc_state *crtc_state) 14440 { 14441 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14442 14443 if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes) 14444 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 14445 14446 if (crtc_state->has_pch_encoder) { 14447 enum pipe pch_transcoder = 14448 intel_crtc_pch_transcoder(crtc); 14449 14450 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); 14451 } 14452 } 14453 14454 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, 14455 const struct intel_crtc_state *new_crtc_state) 14456 { 14457 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 14458 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14459 14460 /* 14461 * Update pipe size and adjust fitter if needed: the reason for this is 14462 * that in compute_mode_changes we check the native mode (not the pfit 14463 * mode) to see if we can flip rather than do a full mode set. In the 14464 * fastboot case, we'll flip, but if we don't update the pipesrc and 14465 * pfit state, we'll end up with a big fb scanned out into the wrong 14466 * sized surface. 14467 */ 14468 intel_set_pipe_src_size(new_crtc_state); 14469 14470 /* on skylake this is done by detaching scalers */ 14471 if (INTEL_GEN(dev_priv) >= 9) { 14472 skl_detach_scalers(new_crtc_state); 14473 14474 if (new_crtc_state->pch_pfit.enabled) 14475 skylake_pfit_enable(new_crtc_state); 14476 } else if (HAS_PCH_SPLIT(dev_priv)) { 14477 if (new_crtc_state->pch_pfit.enabled) 14478 ironlake_pfit_enable(new_crtc_state); 14479 else if (old_crtc_state->pch_pfit.enabled) 14480 ironlake_pfit_disable(old_crtc_state); 14481 } 14482 14483 if (INTEL_GEN(dev_priv) >= 11) 14484 icl_set_pipe_chicken(crtc); 14485 } 14486 14487 static void commit_pipe_config(struct intel_atomic_state *state, 14488 struct intel_crtc_state *old_crtc_state, 14489 struct intel_crtc_state *new_crtc_state) 14490 { 14491 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 14492 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14493 bool modeset = needs_modeset(new_crtc_state); 14494 14495 /* 14496 * During modesets pipe configuration was programmed as the 14497 * CRTC was enabled. 14498 */ 14499 if (!modeset) { 14500 if (new_crtc_state->uapi.color_mgmt_changed || 14501 new_crtc_state->update_pipe) 14502 intel_color_commit(new_crtc_state); 14503 14504 if (INTEL_GEN(dev_priv) >= 9) 14505 skl_detach_scalers(new_crtc_state); 14506 14507 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 14508 bdw_set_pipemisc(new_crtc_state); 14509 14510 if (new_crtc_state->update_pipe) 14511 intel_pipe_fastset(old_crtc_state, new_crtc_state); 14512 } 14513 14514 if (dev_priv->display.atomic_update_watermarks) 14515 dev_priv->display.atomic_update_watermarks(state, crtc); 14516 } 14517 14518 static void intel_update_crtc(struct intel_crtc *crtc, 14519 struct intel_atomic_state *state, 14520 struct intel_crtc_state *old_crtc_state, 14521 struct intel_crtc_state *new_crtc_state) 14522 { 14523 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14524 bool modeset = needs_modeset(new_crtc_state); 14525 struct intel_plane_state *new_plane_state = 14526 intel_atomic_get_new_plane_state(state, 14527 to_intel_plane(crtc->base.primary)); 14528 14529 if (modeset) { 14530 intel_crtc_update_active_timings(new_crtc_state); 14531 14532 dev_priv->display.crtc_enable(state, crtc); 14533 14534 /* vblanks work again, re-enable pipe CRC. */ 14535 intel_crtc_enable_pipe_crc(crtc); 14536 } else { 14537 if (new_crtc_state->preload_luts && 14538 (new_crtc_state->uapi.color_mgmt_changed || 14539 new_crtc_state->update_pipe)) 14540 intel_color_load_luts(new_crtc_state); 14541 14542 intel_pre_plane_update(state, crtc); 14543 14544 if (new_crtc_state->update_pipe) 14545 intel_encoders_update_pipe(state, crtc); 14546 } 14547 14548 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) 14549 intel_fbc_disable(crtc); 14550 else if (new_plane_state) 14551 intel_fbc_enable(crtc, new_crtc_state, new_plane_state); 14552 14553 /* Perform vblank evasion around commit operation */ 14554 intel_pipe_update_start(new_crtc_state); 14555 14556 commit_pipe_config(state, old_crtc_state, new_crtc_state); 14557 14558 if (INTEL_GEN(dev_priv) >= 9) 14559 skl_update_planes_on_crtc(state, crtc); 14560 else 14561 i9xx_update_planes_on_crtc(state, crtc); 14562 14563 intel_pipe_update_end(new_crtc_state); 14564 14565 /* 14566 * We usually enable FIFO underrun interrupts as part of the 14567 * CRTC enable sequence during modesets. But when we inherit a 14568 * valid pipe configuration from the BIOS we need to take care 14569 * of enabling them on the CRTC's first fastset. 14570 */ 14571 if (new_crtc_state->update_pipe && !modeset && 14572 old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) 14573 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 14574 } 14575 14576 static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state) 14577 { 14578 struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev); 14579 enum transcoder slave_transcoder; 14580 14581 WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask)); 14582 14583 slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1; 14584 return intel_get_crtc_for_pipe(dev_priv, 14585 (enum pipe)slave_transcoder); 14586 } 14587 14588 static void intel_old_crtc_state_disables(struct intel_atomic_state *state, 14589 struct intel_crtc_state *old_crtc_state, 14590 struct intel_crtc_state *new_crtc_state, 14591 struct intel_crtc *crtc) 14592 { 14593 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14594 14595 intel_crtc_disable_planes(state, crtc); 14596 14597 /* 14598 * We need to disable pipe CRC before disabling the pipe, 14599 * or we race against vblank off. 14600 */ 14601 intel_crtc_disable_pipe_crc(crtc); 14602 14603 dev_priv->display.crtc_disable(state, crtc); 14604 crtc->active = false; 14605 intel_fbc_disable(crtc); 14606 intel_disable_shared_dpll(old_crtc_state); 14607 14608 /* FIXME unify this for all platforms */ 14609 if (!new_crtc_state->hw.active && 14610 !HAS_GMCH(dev_priv) && 14611 dev_priv->display.initial_watermarks) 14612 dev_priv->display.initial_watermarks(state, crtc); 14613 } 14614 14615 static void intel_commit_modeset_disables(struct intel_atomic_state *state) 14616 { 14617 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 14618 struct intel_crtc *crtc; 14619 u32 handled = 0; 14620 int i; 14621 14622 /* Only disable port sync slaves */ 14623 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14624 new_crtc_state, i) { 14625 if (!needs_modeset(new_crtc_state)) 14626 continue; 14627 14628 if (!old_crtc_state->hw.active) 14629 continue; 14630 14631 /* In case of Transcoder port Sync master slave CRTCs can be 14632 * assigned in any order and we need to make sure that 14633 * slave CRTCs are disabled first and then master CRTC since 14634 * Slave vblanks are masked till Master Vblanks. 14635 */ 14636 if (!is_trans_port_sync_slave(old_crtc_state)) 14637 continue; 14638 14639 intel_pre_plane_update(state, crtc); 14640 intel_old_crtc_state_disables(state, old_crtc_state, 14641 new_crtc_state, crtc); 14642 handled |= BIT(crtc->pipe); 14643 } 14644 14645 /* Disable everything else left on */ 14646 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14647 new_crtc_state, i) { 14648 if (!needs_modeset(new_crtc_state) || 14649 (handled & BIT(crtc->pipe))) 14650 continue; 14651 14652 intel_pre_plane_update(state, crtc); 14653 if (old_crtc_state->hw.active) 14654 intel_old_crtc_state_disables(state, old_crtc_state, 14655 new_crtc_state, crtc); 14656 } 14657 } 14658 14659 static void intel_commit_modeset_enables(struct intel_atomic_state *state) 14660 { 14661 struct intel_crtc *crtc; 14662 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 14663 int i; 14664 14665 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 14666 if (!new_crtc_state->hw.active) 14667 continue; 14668 14669 intel_update_crtc(crtc, state, old_crtc_state, 14670 new_crtc_state); 14671 } 14672 } 14673 14674 static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc, 14675 struct intel_atomic_state *state, 14676 struct intel_crtc_state *new_crtc_state) 14677 { 14678 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14679 14680 intel_crtc_update_active_timings(new_crtc_state); 14681 dev_priv->display.crtc_enable(state, crtc); 14682 intel_crtc_enable_pipe_crc(crtc); 14683 } 14684 14685 static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc, 14686 struct intel_atomic_state *state) 14687 { 14688 struct drm_connector *uninitialized_var(conn); 14689 struct drm_connector_state *conn_state; 14690 struct intel_dp *intel_dp; 14691 int i; 14692 14693 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 14694 if (conn_state->crtc == &crtc->base) 14695 break; 14696 } 14697 intel_dp = enc_to_intel_dp(&intel_attached_encoder(conn)->base); 14698 intel_dp_stop_link_train(intel_dp); 14699 } 14700 14701 static void intel_post_crtc_enable_updates(struct intel_crtc *crtc, 14702 struct intel_atomic_state *state) 14703 { 14704 struct intel_crtc_state *new_crtc_state = 14705 intel_atomic_get_new_crtc_state(state, crtc); 14706 struct intel_crtc_state *old_crtc_state = 14707 intel_atomic_get_old_crtc_state(state, crtc); 14708 struct intel_plane_state *new_plane_state = 14709 intel_atomic_get_new_plane_state(state, 14710 to_intel_plane(crtc->base.primary)); 14711 bool modeset = needs_modeset(new_crtc_state); 14712 14713 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) 14714 intel_fbc_disable(crtc); 14715 else if (new_plane_state) 14716 intel_fbc_enable(crtc, new_crtc_state, new_plane_state); 14717 14718 /* Perform vblank evasion around commit operation */ 14719 intel_pipe_update_start(new_crtc_state); 14720 commit_pipe_config(state, old_crtc_state, new_crtc_state); 14721 skl_update_planes_on_crtc(state, crtc); 14722 intel_pipe_update_end(new_crtc_state); 14723 14724 /* 14725 * We usually enable FIFO underrun interrupts as part of the 14726 * CRTC enable sequence during modesets. But when we inherit a 14727 * valid pipe configuration from the BIOS we need to take care 14728 * of enabling them on the CRTC's first fastset. 14729 */ 14730 if (new_crtc_state->update_pipe && !modeset && 14731 old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) 14732 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 14733 } 14734 14735 static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc, 14736 struct intel_atomic_state *state, 14737 struct intel_crtc_state *old_crtc_state, 14738 struct intel_crtc_state *new_crtc_state) 14739 { 14740 struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state); 14741 struct intel_crtc_state *new_slave_crtc_state = 14742 intel_atomic_get_new_crtc_state(state, slave_crtc); 14743 struct intel_crtc_state *old_slave_crtc_state = 14744 intel_atomic_get_old_crtc_state(state, slave_crtc); 14745 14746 WARN_ON(!slave_crtc || !new_slave_crtc_state || 14747 !old_slave_crtc_state); 14748 14749 DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n", 14750 crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id, 14751 slave_crtc->base.name); 14752 14753 /* Enable seq for slave with with DP_TP_CTL left Idle until the 14754 * master is ready 14755 */ 14756 intel_crtc_enable_trans_port_sync(slave_crtc, 14757 state, 14758 new_slave_crtc_state); 14759 14760 /* Enable seq for master with with DP_TP_CTL left Idle */ 14761 intel_crtc_enable_trans_port_sync(crtc, 14762 state, 14763 new_crtc_state); 14764 14765 /* Set Slave's DP_TP_CTL to Normal */ 14766 intel_set_dp_tp_ctl_normal(slave_crtc, 14767 state); 14768 14769 /* Set Master's DP_TP_CTL To Normal */ 14770 usleep_range(200, 400); 14771 intel_set_dp_tp_ctl_normal(crtc, 14772 state); 14773 14774 /* Now do the post crtc enable for all master and slaves */ 14775 intel_post_crtc_enable_updates(slave_crtc, 14776 state); 14777 intel_post_crtc_enable_updates(crtc, 14778 state); 14779 } 14780 14781 static void skl_commit_modeset_enables(struct intel_atomic_state *state) 14782 { 14783 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14784 struct intel_crtc *crtc; 14785 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 14786 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; 14787 u8 required_slices = state->wm_results.ddb.enabled_slices; 14788 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 14789 u8 dirty_pipes = 0; 14790 int i; 14791 14792 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 14793 /* ignore allocations for crtc's that have been turned off. */ 14794 if (!needs_modeset(new_crtc_state) && new_crtc_state->hw.active) 14795 entries[i] = old_crtc_state->wm.skl.ddb; 14796 if (new_crtc_state->hw.active) 14797 dirty_pipes |= BIT(crtc->pipe); 14798 } 14799 14800 /* If 2nd DBuf slice required, enable it here */ 14801 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices) 14802 icl_dbuf_slices_update(dev_priv, required_slices); 14803 14804 /* 14805 * Whenever the number of active pipes changes, we need to make sure we 14806 * update the pipes in the right order so that their ddb allocations 14807 * never overlap with eachother inbetween CRTC updates. Otherwise we'll 14808 * cause pipe underruns and other bad stuff. 14809 */ 14810 while (dirty_pipes) { 14811 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14812 new_crtc_state, i) { 14813 enum pipe pipe = crtc->pipe; 14814 bool modeset = needs_modeset(new_crtc_state); 14815 14816 if ((dirty_pipes & BIT(pipe)) == 0) 14817 continue; 14818 14819 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 14820 entries, 14821 INTEL_NUM_PIPES(dev_priv), i)) 14822 continue; 14823 14824 entries[i] = new_crtc_state->wm.skl.ddb; 14825 dirty_pipes &= ~BIT(pipe); 14826 14827 if (modeset && is_trans_port_sync_mode(new_crtc_state)) { 14828 if (is_trans_port_sync_master(new_crtc_state)) 14829 intel_update_trans_port_sync_crtcs(crtc, 14830 state, 14831 old_crtc_state, 14832 new_crtc_state); 14833 else 14834 continue; 14835 } else { 14836 intel_update_crtc(crtc, state, old_crtc_state, 14837 new_crtc_state); 14838 } 14839 14840 /* 14841 * If this is an already active pipe, it's DDB changed, 14842 * and this isn't the last pipe that needs updating 14843 * then we need to wait for a vblank to pass for the 14844 * new ddb allocation to take effect. 14845 */ 14846 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 14847 &old_crtc_state->wm.skl.ddb) && 14848 !modeset && dirty_pipes) 14849 intel_wait_for_vblank(dev_priv, pipe); 14850 } 14851 } 14852 14853 /* If 2nd DBuf slice is no more required disable it */ 14854 if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices) 14855 icl_dbuf_slices_update(dev_priv, required_slices); 14856 } 14857 14858 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) 14859 { 14860 struct intel_atomic_state *state, *next; 14861 struct llist_node *freed; 14862 14863 freed = llist_del_all(&dev_priv->atomic_helper.free_list); 14864 llist_for_each_entry_safe(state, next, freed, freed) 14865 drm_atomic_state_put(&state->base); 14866 } 14867 14868 static void intel_atomic_helper_free_state_worker(struct work_struct *work) 14869 { 14870 struct drm_i915_private *dev_priv = 14871 container_of(work, typeof(*dev_priv), atomic_helper.free_work); 14872 14873 intel_atomic_helper_free_state(dev_priv); 14874 } 14875 14876 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 14877 { 14878 struct wait_queue_entry wait_fence, wait_reset; 14879 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); 14880 14881 init_wait_entry(&wait_fence, 0); 14882 init_wait_entry(&wait_reset, 0); 14883 for (;;) { 14884 prepare_to_wait(&intel_state->commit_ready.wait, 14885 &wait_fence, TASK_UNINTERRUPTIBLE); 14886 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags, 14887 I915_RESET_MODESET), 14888 &wait_reset, TASK_UNINTERRUPTIBLE); 14889 14890 14891 if (i915_sw_fence_done(&intel_state->commit_ready) || 14892 test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) 14893 break; 14894 14895 schedule(); 14896 } 14897 finish_wait(&intel_state->commit_ready.wait, &wait_fence); 14898 finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags, 14899 I915_RESET_MODESET), 14900 &wait_reset); 14901 } 14902 14903 static void intel_atomic_cleanup_work(struct work_struct *work) 14904 { 14905 struct drm_atomic_state *state = 14906 container_of(work, struct drm_atomic_state, commit_work); 14907 struct drm_i915_private *i915 = to_i915(state->dev); 14908 14909 drm_atomic_helper_cleanup_planes(&i915->drm, state); 14910 drm_atomic_helper_commit_cleanup_done(state); 14911 drm_atomic_state_put(state); 14912 14913 intel_atomic_helper_free_state(i915); 14914 } 14915 14916 static void intel_atomic_commit_tail(struct intel_atomic_state *state) 14917 { 14918 struct drm_device *dev = state->base.dev; 14919 struct drm_i915_private *dev_priv = to_i915(dev); 14920 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 14921 struct intel_crtc *crtc; 14922 u64 put_domains[I915_MAX_PIPES] = {}; 14923 intel_wakeref_t wakeref = 0; 14924 int i; 14925 14926 intel_atomic_commit_fence_wait(state); 14927 14928 drm_atomic_helper_wait_for_dependencies(&state->base); 14929 14930 if (state->modeset) 14931 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 14932 14933 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14934 new_crtc_state, i) { 14935 if (needs_modeset(new_crtc_state) || 14936 new_crtc_state->update_pipe) { 14937 14938 put_domains[crtc->pipe] = 14939 modeset_get_crtc_power_domains(new_crtc_state); 14940 } 14941 } 14942 14943 intel_commit_modeset_disables(state); 14944 14945 /* FIXME: Eventually get rid of our crtc->config pointer */ 14946 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 14947 crtc->config = new_crtc_state; 14948 14949 if (state->modeset) { 14950 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); 14951 14952 intel_set_cdclk_pre_plane_update(dev_priv, 14953 &state->cdclk.actual, 14954 &dev_priv->cdclk.actual, 14955 state->cdclk.pipe); 14956 14957 /* 14958 * SKL workaround: bspec recommends we disable the SAGV when we 14959 * have more then one pipe enabled 14960 */ 14961 if (!intel_can_enable_sagv(state)) 14962 intel_disable_sagv(dev_priv); 14963 14964 intel_modeset_verify_disabled(dev_priv, state); 14965 } 14966 14967 /* Complete the events for pipes that have now been disabled */ 14968 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14969 bool modeset = needs_modeset(new_crtc_state); 14970 14971 /* Complete events for now disable pipes here. */ 14972 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { 14973 spin_lock_irq(&dev->event_lock); 14974 drm_crtc_send_vblank_event(&crtc->base, 14975 new_crtc_state->uapi.event); 14976 spin_unlock_irq(&dev->event_lock); 14977 14978 new_crtc_state->uapi.event = NULL; 14979 } 14980 } 14981 14982 if (state->modeset) 14983 intel_encoders_update_prepare(state); 14984 14985 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 14986 dev_priv->display.commit_modeset_enables(state); 14987 14988 if (state->modeset) { 14989 intel_encoders_update_complete(state); 14990 14991 intel_set_cdclk_post_plane_update(dev_priv, 14992 &state->cdclk.actual, 14993 &dev_priv->cdclk.actual, 14994 state->cdclk.pipe); 14995 } 14996 14997 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 14998 * already, but still need the state for the delayed optimization. To 14999 * fix this: 15000 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 15001 * - schedule that vblank worker _before_ calling hw_done 15002 * - at the start of commit_tail, cancel it _synchrously 15003 * - switch over to the vblank wait helper in the core after that since 15004 * we don't need out special handling any more. 15005 */ 15006 drm_atomic_helper_wait_for_flip_done(dev, &state->base); 15007 15008 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15009 if (new_crtc_state->hw.active && 15010 !needs_modeset(new_crtc_state) && 15011 !new_crtc_state->preload_luts && 15012 (new_crtc_state->uapi.color_mgmt_changed || 15013 new_crtc_state->update_pipe)) 15014 intel_color_load_luts(new_crtc_state); 15015 } 15016 15017 /* 15018 * Now that the vblank has passed, we can go ahead and program the 15019 * optimal watermarks on platforms that need two-step watermark 15020 * programming. 15021 * 15022 * TODO: Move this (and other cleanup) to an async worker eventually. 15023 */ 15024 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15025 new_crtc_state, i) { 15026 /* 15027 * Gen2 reports pipe underruns whenever all planes are disabled. 15028 * So re-enable underrun reporting after some planes get enabled. 15029 * 15030 * We do this before .optimize_watermarks() so that we have a 15031 * chance of catching underruns with the intermediate watermarks 15032 * vs. the new plane configuration. 15033 */ 15034 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state)) 15035 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 15036 15037 if (dev_priv->display.optimize_watermarks) 15038 dev_priv->display.optimize_watermarks(state, crtc); 15039 } 15040 15041 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 15042 intel_post_plane_update(state, crtc); 15043 15044 if (put_domains[i]) 15045 modeset_put_power_domains(dev_priv, put_domains[i]); 15046 15047 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); 15048 } 15049 15050 /* Underruns don't always raise interrupts, so check manually */ 15051 intel_check_cpu_fifo_underruns(dev_priv); 15052 intel_check_pch_fifo_underruns(dev_priv); 15053 15054 if (state->modeset) 15055 intel_verify_planes(state); 15056 15057 if (state->modeset && intel_can_enable_sagv(state)) 15058 intel_enable_sagv(dev_priv); 15059 15060 drm_atomic_helper_commit_hw_done(&state->base); 15061 15062 if (state->modeset) { 15063 /* As one of the primary mmio accessors, KMS has a high 15064 * likelihood of triggering bugs in unclaimed access. After we 15065 * finish modesetting, see if an error has been flagged, and if 15066 * so enable debugging for the next modeset - and hope we catch 15067 * the culprit. 15068 */ 15069 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 15070 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); 15071 } 15072 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 15073 15074 /* 15075 * Defer the cleanup of the old state to a separate worker to not 15076 * impede the current task (userspace for blocking modesets) that 15077 * are executed inline. For out-of-line asynchronous modesets/flips, 15078 * deferring to a new worker seems overkill, but we would place a 15079 * schedule point (cond_resched()) here anyway to keep latencies 15080 * down. 15081 */ 15082 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work); 15083 queue_work(system_highpri_wq, &state->base.commit_work); 15084 } 15085 15086 static void intel_atomic_commit_work(struct work_struct *work) 15087 { 15088 struct intel_atomic_state *state = 15089 container_of(work, struct intel_atomic_state, base.commit_work); 15090 15091 intel_atomic_commit_tail(state); 15092 } 15093 15094 static int __i915_sw_fence_call 15095 intel_atomic_commit_ready(struct i915_sw_fence *fence, 15096 enum i915_sw_fence_notify notify) 15097 { 15098 struct intel_atomic_state *state = 15099 container_of(fence, struct intel_atomic_state, commit_ready); 15100 15101 switch (notify) { 15102 case FENCE_COMPLETE: 15103 /* we do blocking waits in the worker, nothing to do here */ 15104 break; 15105 case FENCE_FREE: 15106 { 15107 struct intel_atomic_helper *helper = 15108 &to_i915(state->base.dev)->atomic_helper; 15109 15110 if (llist_add(&state->freed, &helper->free_list)) 15111 schedule_work(&helper->free_work); 15112 break; 15113 } 15114 } 15115 15116 return NOTIFY_DONE; 15117 } 15118 15119 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 15120 { 15121 struct intel_plane_state *old_plane_state, *new_plane_state; 15122 struct intel_plane *plane; 15123 int i; 15124 15125 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 15126 new_plane_state, i) 15127 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 15128 to_intel_frontbuffer(new_plane_state->hw.fb), 15129 plane->frontbuffer_bit); 15130 } 15131 15132 static void assert_global_state_locked(struct drm_i915_private *dev_priv) 15133 { 15134 struct intel_crtc *crtc; 15135 15136 for_each_intel_crtc(&dev_priv->drm, crtc) 15137 drm_modeset_lock_assert_held(&crtc->base.mutex); 15138 } 15139 15140 static int intel_atomic_commit(struct drm_device *dev, 15141 struct drm_atomic_state *_state, 15142 bool nonblock) 15143 { 15144 struct intel_atomic_state *state = to_intel_atomic_state(_state); 15145 struct drm_i915_private *dev_priv = to_i915(dev); 15146 int ret = 0; 15147 15148 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 15149 15150 drm_atomic_state_get(&state->base); 15151 i915_sw_fence_init(&state->commit_ready, 15152 intel_atomic_commit_ready); 15153 15154 /* 15155 * The intel_legacy_cursor_update() fast path takes care 15156 * of avoiding the vblank waits for simple cursor 15157 * movement and flips. For cursor on/off and size changes, 15158 * we want to perform the vblank waits so that watermark 15159 * updates happen during the correct frames. Gen9+ have 15160 * double buffered watermarks and so shouldn't need this. 15161 * 15162 * Unset state->legacy_cursor_update before the call to 15163 * drm_atomic_helper_setup_commit() because otherwise 15164 * drm_atomic_helper_wait_for_flip_done() is a noop and 15165 * we get FIFO underruns because we didn't wait 15166 * for vblank. 15167 * 15168 * FIXME doing watermarks and fb cleanup from a vblank worker 15169 * (assuming we had any) would solve these problems. 15170 */ 15171 if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) { 15172 struct intel_crtc_state *new_crtc_state; 15173 struct intel_crtc *crtc; 15174 int i; 15175 15176 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 15177 if (new_crtc_state->wm.need_postvbl_update || 15178 new_crtc_state->update_wm_post) 15179 state->base.legacy_cursor_update = false; 15180 } 15181 15182 ret = intel_atomic_prepare_commit(state); 15183 if (ret) { 15184 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); 15185 i915_sw_fence_commit(&state->commit_ready); 15186 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 15187 return ret; 15188 } 15189 15190 ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 15191 if (!ret) 15192 ret = drm_atomic_helper_swap_state(&state->base, true); 15193 15194 if (ret) { 15195 i915_sw_fence_commit(&state->commit_ready); 15196 15197 drm_atomic_helper_cleanup_planes(dev, &state->base); 15198 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 15199 return ret; 15200 } 15201 dev_priv->wm.distrust_bios_wm = false; 15202 intel_shared_dpll_swap_state(state); 15203 intel_atomic_track_fbs(state); 15204 15205 if (state->global_state_changed) { 15206 assert_global_state_locked(dev_priv); 15207 15208 memcpy(dev_priv->min_cdclk, state->min_cdclk, 15209 sizeof(state->min_cdclk)); 15210 memcpy(dev_priv->min_voltage_level, state->min_voltage_level, 15211 sizeof(state->min_voltage_level)); 15212 dev_priv->active_pipes = state->active_pipes; 15213 dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk; 15214 15215 intel_cdclk_swap_state(state); 15216 } 15217 15218 drm_atomic_state_get(&state->base); 15219 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 15220 15221 i915_sw_fence_commit(&state->commit_ready); 15222 if (nonblock && state->modeset) { 15223 queue_work(dev_priv->modeset_wq, &state->base.commit_work); 15224 } else if (nonblock) { 15225 queue_work(dev_priv->flip_wq, &state->base.commit_work); 15226 } else { 15227 if (state->modeset) 15228 flush_workqueue(dev_priv->modeset_wq); 15229 intel_atomic_commit_tail(state); 15230 } 15231 15232 return 0; 15233 } 15234 15235 struct wait_rps_boost { 15236 struct wait_queue_entry wait; 15237 15238 struct drm_crtc *crtc; 15239 struct i915_request *request; 15240 }; 15241 15242 static int do_rps_boost(struct wait_queue_entry *_wait, 15243 unsigned mode, int sync, void *key) 15244 { 15245 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); 15246 struct i915_request *rq = wait->request; 15247 15248 /* 15249 * If we missed the vblank, but the request is already running it 15250 * is reasonable to assume that it will complete before the next 15251 * vblank without our intervention, so leave RPS alone. 15252 */ 15253 if (!i915_request_started(rq)) 15254 intel_rps_boost(rq); 15255 i915_request_put(rq); 15256 15257 drm_crtc_vblank_put(wait->crtc); 15258 15259 list_del(&wait->wait.entry); 15260 kfree(wait); 15261 return 1; 15262 } 15263 15264 static void add_rps_boost_after_vblank(struct drm_crtc *crtc, 15265 struct dma_fence *fence) 15266 { 15267 struct wait_rps_boost *wait; 15268 15269 if (!dma_fence_is_i915(fence)) 15270 return; 15271 15272 if (INTEL_GEN(to_i915(crtc->dev)) < 6) 15273 return; 15274 15275 if (drm_crtc_vblank_get(crtc)) 15276 return; 15277 15278 wait = kmalloc(sizeof(*wait), GFP_KERNEL); 15279 if (!wait) { 15280 drm_crtc_vblank_put(crtc); 15281 return; 15282 } 15283 15284 wait->request = to_request(dma_fence_get(fence)); 15285 wait->crtc = crtc; 15286 15287 wait->wait.func = do_rps_boost; 15288 wait->wait.flags = 0; 15289 15290 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); 15291 } 15292 15293 static int intel_plane_pin_fb(struct intel_plane_state *plane_state) 15294 { 15295 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 15296 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 15297 struct drm_framebuffer *fb = plane_state->hw.fb; 15298 struct i915_vma *vma; 15299 15300 if (plane->id == PLANE_CURSOR && 15301 INTEL_INFO(dev_priv)->display.cursor_needs_physical) { 15302 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 15303 const int align = intel_cursor_alignment(dev_priv); 15304 int err; 15305 15306 err = i915_gem_object_attach_phys(obj, align); 15307 if (err) 15308 return err; 15309 } 15310 15311 vma = intel_pin_and_fence_fb_obj(fb, 15312 &plane_state->view, 15313 intel_plane_uses_fence(plane_state), 15314 &plane_state->flags); 15315 if (IS_ERR(vma)) 15316 return PTR_ERR(vma); 15317 15318 plane_state->vma = vma; 15319 15320 return 0; 15321 } 15322 15323 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) 15324 { 15325 struct i915_vma *vma; 15326 15327 vma = fetch_and_zero(&old_plane_state->vma); 15328 if (vma) 15329 intel_unpin_fb_vma(vma, old_plane_state->flags); 15330 } 15331 15332 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj) 15333 { 15334 struct i915_sched_attr attr = { 15335 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY), 15336 }; 15337 15338 i915_gem_object_wait_priority(obj, 0, &attr); 15339 } 15340 15341 /** 15342 * intel_prepare_plane_fb - Prepare fb for usage on plane 15343 * @plane: drm plane to prepare for 15344 * @_new_plane_state: the plane state being prepared 15345 * 15346 * Prepares a framebuffer for usage on a display plane. Generally this 15347 * involves pinning the underlying object and updating the frontbuffer tracking 15348 * bits. Some older platforms need special physical address handling for 15349 * cursor planes. 15350 * 15351 * Returns 0 on success, negative error code on failure. 15352 */ 15353 int 15354 intel_prepare_plane_fb(struct drm_plane *plane, 15355 struct drm_plane_state *_new_plane_state) 15356 { 15357 struct intel_plane_state *new_plane_state = 15358 to_intel_plane_state(_new_plane_state); 15359 struct intel_atomic_state *intel_state = 15360 to_intel_atomic_state(new_plane_state->uapi.state); 15361 struct drm_i915_private *dev_priv = to_i915(plane->dev); 15362 struct drm_framebuffer *fb = new_plane_state->hw.fb; 15363 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 15364 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); 15365 int ret; 15366 15367 if (old_obj) { 15368 struct intel_crtc_state *crtc_state = 15369 intel_atomic_get_new_crtc_state(intel_state, 15370 to_intel_crtc(plane->state->crtc)); 15371 15372 /* Big Hammer, we also need to ensure that any pending 15373 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 15374 * current scanout is retired before unpinning the old 15375 * framebuffer. Note that we rely on userspace rendering 15376 * into the buffer attached to the pipe they are waiting 15377 * on. If not, userspace generates a GPU hang with IPEHR 15378 * point to the MI_WAIT_FOR_EVENT. 15379 * 15380 * This should only fail upon a hung GPU, in which case we 15381 * can safely continue. 15382 */ 15383 if (needs_modeset(crtc_state)) { 15384 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, 15385 old_obj->base.resv, NULL, 15386 false, 0, 15387 GFP_KERNEL); 15388 if (ret < 0) 15389 return ret; 15390 } 15391 } 15392 15393 if (new_plane_state->uapi.fence) { /* explicit fencing */ 15394 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready, 15395 new_plane_state->uapi.fence, 15396 I915_FENCE_TIMEOUT, 15397 GFP_KERNEL); 15398 if (ret < 0) 15399 return ret; 15400 } 15401 15402 if (!obj) 15403 return 0; 15404 15405 ret = i915_gem_object_pin_pages(obj); 15406 if (ret) 15407 return ret; 15408 15409 ret = intel_plane_pin_fb(new_plane_state); 15410 15411 i915_gem_object_unpin_pages(obj); 15412 if (ret) 15413 return ret; 15414 15415 fb_obj_bump_render_priority(obj); 15416 i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB); 15417 15418 if (!new_plane_state->uapi.fence) { /* implicit fencing */ 15419 struct dma_fence *fence; 15420 15421 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, 15422 obj->base.resv, NULL, 15423 false, I915_FENCE_TIMEOUT, 15424 GFP_KERNEL); 15425 if (ret < 0) 15426 return ret; 15427 15428 fence = dma_resv_get_excl_rcu(obj->base.resv); 15429 if (fence) { 15430 add_rps_boost_after_vblank(new_plane_state->hw.crtc, 15431 fence); 15432 dma_fence_put(fence); 15433 } 15434 } else { 15435 add_rps_boost_after_vblank(new_plane_state->hw.crtc, 15436 new_plane_state->uapi.fence); 15437 } 15438 15439 /* 15440 * We declare pageflips to be interactive and so merit a small bias 15441 * towards upclocking to deliver the frame on time. By only changing 15442 * the RPS thresholds to sample more regularly and aim for higher 15443 * clocks we can hopefully deliver low power workloads (like kodi) 15444 * that are not quite steady state without resorting to forcing 15445 * maximum clocks following a vblank miss (see do_rps_boost()). 15446 */ 15447 if (!intel_state->rps_interactive) { 15448 intel_rps_mark_interactive(&dev_priv->gt.rps, true); 15449 intel_state->rps_interactive = true; 15450 } 15451 15452 return 0; 15453 } 15454 15455 /** 15456 * intel_cleanup_plane_fb - Cleans up an fb after plane use 15457 * @plane: drm plane to clean up for 15458 * @_old_plane_state: the state from the previous modeset 15459 * 15460 * Cleans up a framebuffer that has just been removed from a plane. 15461 */ 15462 void 15463 intel_cleanup_plane_fb(struct drm_plane *plane, 15464 struct drm_plane_state *_old_plane_state) 15465 { 15466 struct intel_plane_state *old_plane_state = 15467 to_intel_plane_state(_old_plane_state); 15468 struct intel_atomic_state *intel_state = 15469 to_intel_atomic_state(old_plane_state->uapi.state); 15470 struct drm_i915_private *dev_priv = to_i915(plane->dev); 15471 15472 if (intel_state->rps_interactive) { 15473 intel_rps_mark_interactive(&dev_priv->gt.rps, false); 15474 intel_state->rps_interactive = false; 15475 } 15476 15477 /* Should only be called after a successful intel_prepare_plane_fb()! */ 15478 intel_plane_unpin_fb(old_plane_state); 15479 } 15480 15481 /** 15482 * intel_plane_destroy - destroy a plane 15483 * @plane: plane to destroy 15484 * 15485 * Common destruction function for all types of planes (primary, cursor, 15486 * sprite). 15487 */ 15488 void intel_plane_destroy(struct drm_plane *plane) 15489 { 15490 drm_plane_cleanup(plane); 15491 kfree(to_intel_plane(plane)); 15492 } 15493 15494 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, 15495 u32 format, u64 modifier) 15496 { 15497 switch (modifier) { 15498 case DRM_FORMAT_MOD_LINEAR: 15499 case I915_FORMAT_MOD_X_TILED: 15500 break; 15501 default: 15502 return false; 15503 } 15504 15505 switch (format) { 15506 case DRM_FORMAT_C8: 15507 case DRM_FORMAT_RGB565: 15508 case DRM_FORMAT_XRGB1555: 15509 case DRM_FORMAT_XRGB8888: 15510 return modifier == DRM_FORMAT_MOD_LINEAR || 15511 modifier == I915_FORMAT_MOD_X_TILED; 15512 default: 15513 return false; 15514 } 15515 } 15516 15517 static bool i965_plane_format_mod_supported(struct drm_plane *_plane, 15518 u32 format, u64 modifier) 15519 { 15520 switch (modifier) { 15521 case DRM_FORMAT_MOD_LINEAR: 15522 case I915_FORMAT_MOD_X_TILED: 15523 break; 15524 default: 15525 return false; 15526 } 15527 15528 switch (format) { 15529 case DRM_FORMAT_C8: 15530 case DRM_FORMAT_RGB565: 15531 case DRM_FORMAT_XRGB8888: 15532 case DRM_FORMAT_XBGR8888: 15533 case DRM_FORMAT_ARGB8888: 15534 case DRM_FORMAT_ABGR8888: 15535 case DRM_FORMAT_XRGB2101010: 15536 case DRM_FORMAT_XBGR2101010: 15537 case DRM_FORMAT_ARGB2101010: 15538 case DRM_FORMAT_ABGR2101010: 15539 case DRM_FORMAT_XBGR16161616F: 15540 return modifier == DRM_FORMAT_MOD_LINEAR || 15541 modifier == I915_FORMAT_MOD_X_TILED; 15542 default: 15543 return false; 15544 } 15545 } 15546 15547 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, 15548 u32 format, u64 modifier) 15549 { 15550 return modifier == DRM_FORMAT_MOD_LINEAR && 15551 format == DRM_FORMAT_ARGB8888; 15552 } 15553 15554 static const struct drm_plane_funcs i965_plane_funcs = { 15555 .update_plane = drm_atomic_helper_update_plane, 15556 .disable_plane = drm_atomic_helper_disable_plane, 15557 .destroy = intel_plane_destroy, 15558 .atomic_duplicate_state = intel_plane_duplicate_state, 15559 .atomic_destroy_state = intel_plane_destroy_state, 15560 .format_mod_supported = i965_plane_format_mod_supported, 15561 }; 15562 15563 static const struct drm_plane_funcs i8xx_plane_funcs = { 15564 .update_plane = drm_atomic_helper_update_plane, 15565 .disable_plane = drm_atomic_helper_disable_plane, 15566 .destroy = intel_plane_destroy, 15567 .atomic_duplicate_state = intel_plane_duplicate_state, 15568 .atomic_destroy_state = intel_plane_destroy_state, 15569 .format_mod_supported = i8xx_plane_format_mod_supported, 15570 }; 15571 15572 static int 15573 intel_legacy_cursor_update(struct drm_plane *_plane, 15574 struct drm_crtc *_crtc, 15575 struct drm_framebuffer *fb, 15576 int crtc_x, int crtc_y, 15577 unsigned int crtc_w, unsigned int crtc_h, 15578 u32 src_x, u32 src_y, 15579 u32 src_w, u32 src_h, 15580 struct drm_modeset_acquire_ctx *ctx) 15581 { 15582 struct intel_plane *plane = to_intel_plane(_plane); 15583 struct intel_crtc *crtc = to_intel_crtc(_crtc); 15584 struct intel_plane_state *old_plane_state = 15585 to_intel_plane_state(plane->base.state); 15586 struct intel_plane_state *new_plane_state; 15587 struct intel_crtc_state *crtc_state = 15588 to_intel_crtc_state(crtc->base.state); 15589 struct intel_crtc_state *new_crtc_state; 15590 int ret; 15591 15592 /* 15593 * When crtc is inactive or there is a modeset pending, 15594 * wait for it to complete in the slowpath 15595 */ 15596 if (!crtc_state->hw.active || needs_modeset(crtc_state) || 15597 crtc_state->update_pipe) 15598 goto slow; 15599 15600 /* 15601 * Don't do an async update if there is an outstanding commit modifying 15602 * the plane. This prevents our async update's changes from getting 15603 * overridden by a previous synchronous update's state. 15604 */ 15605 if (old_plane_state->uapi.commit && 15606 !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done)) 15607 goto slow; 15608 15609 /* 15610 * If any parameters change that may affect watermarks, 15611 * take the slowpath. Only changing fb or position should be 15612 * in the fastpath. 15613 */ 15614 if (old_plane_state->uapi.crtc != &crtc->base || 15615 old_plane_state->uapi.src_w != src_w || 15616 old_plane_state->uapi.src_h != src_h || 15617 old_plane_state->uapi.crtc_w != crtc_w || 15618 old_plane_state->uapi.crtc_h != crtc_h || 15619 !old_plane_state->uapi.fb != !fb) 15620 goto slow; 15621 15622 new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base)); 15623 if (!new_plane_state) 15624 return -ENOMEM; 15625 15626 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base)); 15627 if (!new_crtc_state) { 15628 ret = -ENOMEM; 15629 goto out_free; 15630 } 15631 15632 drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb); 15633 15634 new_plane_state->uapi.src_x = src_x; 15635 new_plane_state->uapi.src_y = src_y; 15636 new_plane_state->uapi.src_w = src_w; 15637 new_plane_state->uapi.src_h = src_h; 15638 new_plane_state->uapi.crtc_x = crtc_x; 15639 new_plane_state->uapi.crtc_y = crtc_y; 15640 new_plane_state->uapi.crtc_w = crtc_w; 15641 new_plane_state->uapi.crtc_h = crtc_h; 15642 15643 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state, 15644 old_plane_state, new_plane_state); 15645 if (ret) 15646 goto out_free; 15647 15648 ret = intel_plane_pin_fb(new_plane_state); 15649 if (ret) 15650 goto out_free; 15651 15652 intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb), 15653 ORIGIN_FLIP); 15654 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 15655 to_intel_frontbuffer(new_plane_state->hw.fb), 15656 plane->frontbuffer_bit); 15657 15658 /* Swap plane state */ 15659 plane->base.state = &new_plane_state->uapi; 15660 15661 /* 15662 * We cannot swap crtc_state as it may be in use by an atomic commit or 15663 * page flip that's running simultaneously. If we swap crtc_state and 15664 * destroy the old state, we will cause a use-after-free there. 15665 * 15666 * Only update active_planes, which is needed for our internal 15667 * bookkeeping. Either value will do the right thing when updating 15668 * planes atomically. If the cursor was part of the atomic update then 15669 * we would have taken the slowpath. 15670 */ 15671 crtc_state->active_planes = new_crtc_state->active_planes; 15672 15673 if (new_plane_state->uapi.visible) 15674 intel_update_plane(plane, crtc_state, new_plane_state); 15675 else 15676 intel_disable_plane(plane, crtc_state); 15677 15678 intel_plane_unpin_fb(old_plane_state); 15679 15680 out_free: 15681 if (new_crtc_state) 15682 intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi); 15683 if (ret) 15684 intel_plane_destroy_state(&plane->base, &new_plane_state->uapi); 15685 else 15686 intel_plane_destroy_state(&plane->base, &old_plane_state->uapi); 15687 return ret; 15688 15689 slow: 15690 return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb, 15691 crtc_x, crtc_y, crtc_w, crtc_h, 15692 src_x, src_y, src_w, src_h, ctx); 15693 } 15694 15695 static const struct drm_plane_funcs intel_cursor_plane_funcs = { 15696 .update_plane = intel_legacy_cursor_update, 15697 .disable_plane = drm_atomic_helper_disable_plane, 15698 .destroy = intel_plane_destroy, 15699 .atomic_duplicate_state = intel_plane_duplicate_state, 15700 .atomic_destroy_state = intel_plane_destroy_state, 15701 .format_mod_supported = intel_cursor_format_mod_supported, 15702 }; 15703 15704 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, 15705 enum i9xx_plane_id i9xx_plane) 15706 { 15707 if (!HAS_FBC(dev_priv)) 15708 return false; 15709 15710 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 15711 return i9xx_plane == PLANE_A; /* tied to pipe A */ 15712 else if (IS_IVYBRIDGE(dev_priv)) 15713 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B || 15714 i9xx_plane == PLANE_C; 15715 else if (INTEL_GEN(dev_priv) >= 4) 15716 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B; 15717 else 15718 return i9xx_plane == PLANE_A; 15719 } 15720 15721 static struct intel_plane * 15722 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) 15723 { 15724 struct intel_plane *plane; 15725 const struct drm_plane_funcs *plane_funcs; 15726 unsigned int supported_rotations; 15727 unsigned int possible_crtcs; 15728 const u32 *formats; 15729 int num_formats; 15730 int ret, zpos; 15731 15732 if (INTEL_GEN(dev_priv) >= 9) 15733 return skl_universal_plane_create(dev_priv, pipe, 15734 PLANE_PRIMARY); 15735 15736 plane = intel_plane_alloc(); 15737 if (IS_ERR(plane)) 15738 return plane; 15739 15740 plane->pipe = pipe; 15741 /* 15742 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS 15743 * port is hooked to pipe B. Hence we want plane A feeding pipe B. 15744 */ 15745 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) 15746 plane->i9xx_plane = (enum i9xx_plane_id) !pipe; 15747 else 15748 plane->i9xx_plane = (enum i9xx_plane_id) pipe; 15749 plane->id = PLANE_PRIMARY; 15750 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); 15751 15752 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane); 15753 if (plane->has_fbc) { 15754 struct intel_fbc *fbc = &dev_priv->fbc; 15755 15756 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; 15757 } 15758 15759 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 15760 formats = vlv_primary_formats; 15761 num_formats = ARRAY_SIZE(vlv_primary_formats); 15762 } else if (INTEL_GEN(dev_priv) >= 4) { 15763 /* 15764 * WaFP16GammaEnabling:ivb 15765 * "Workaround : When using the 64-bit format, the plane 15766 * output on each color channel has one quarter amplitude. 15767 * It can be brought up to full amplitude by using pipe 15768 * gamma correction or pipe color space conversion to 15769 * multiply the plane output by four." 15770 * 15771 * There is no dedicated plane gamma for the primary plane, 15772 * and using the pipe gamma/csc could conflict with other 15773 * planes, so we choose not to expose fp16 on IVB primary 15774 * planes. HSW primary planes no longer have this problem. 15775 */ 15776 if (IS_IVYBRIDGE(dev_priv)) { 15777 formats = ivb_primary_formats; 15778 num_formats = ARRAY_SIZE(ivb_primary_formats); 15779 } else { 15780 formats = i965_primary_formats; 15781 num_formats = ARRAY_SIZE(i965_primary_formats); 15782 } 15783 } else { 15784 formats = i8xx_primary_formats; 15785 num_formats = ARRAY_SIZE(i8xx_primary_formats); 15786 } 15787 15788 if (INTEL_GEN(dev_priv) >= 4) 15789 plane_funcs = &i965_plane_funcs; 15790 else 15791 plane_funcs = &i8xx_plane_funcs; 15792 15793 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 15794 plane->min_cdclk = vlv_plane_min_cdclk; 15795 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 15796 plane->min_cdclk = hsw_plane_min_cdclk; 15797 else if (IS_IVYBRIDGE(dev_priv)) 15798 plane->min_cdclk = ivb_plane_min_cdclk; 15799 else 15800 plane->min_cdclk = i9xx_plane_min_cdclk; 15801 15802 plane->max_stride = i9xx_plane_max_stride; 15803 plane->update_plane = i9xx_update_plane; 15804 plane->disable_plane = i9xx_disable_plane; 15805 plane->get_hw_state = i9xx_plane_get_hw_state; 15806 plane->check_plane = i9xx_plane_check; 15807 15808 possible_crtcs = BIT(pipe); 15809 15810 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 15811 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 15812 possible_crtcs, plane_funcs, 15813 formats, num_formats, 15814 i9xx_format_modifiers, 15815 DRM_PLANE_TYPE_PRIMARY, 15816 "primary %c", pipe_name(pipe)); 15817 else 15818 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 15819 possible_crtcs, plane_funcs, 15820 formats, num_formats, 15821 i9xx_format_modifiers, 15822 DRM_PLANE_TYPE_PRIMARY, 15823 "plane %c", 15824 plane_name(plane->i9xx_plane)); 15825 if (ret) 15826 goto fail; 15827 15828 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 15829 supported_rotations = 15830 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | 15831 DRM_MODE_REFLECT_X; 15832 } else if (INTEL_GEN(dev_priv) >= 4) { 15833 supported_rotations = 15834 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; 15835 } else { 15836 supported_rotations = DRM_MODE_ROTATE_0; 15837 } 15838 15839 if (INTEL_GEN(dev_priv) >= 4) 15840 drm_plane_create_rotation_property(&plane->base, 15841 DRM_MODE_ROTATE_0, 15842 supported_rotations); 15843 15844 zpos = 0; 15845 drm_plane_create_zpos_immutable_property(&plane->base, zpos); 15846 15847 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); 15848 15849 return plane; 15850 15851 fail: 15852 intel_plane_free(plane); 15853 15854 return ERR_PTR(ret); 15855 } 15856 15857 static struct intel_plane * 15858 intel_cursor_plane_create(struct drm_i915_private *dev_priv, 15859 enum pipe pipe) 15860 { 15861 unsigned int possible_crtcs; 15862 struct intel_plane *cursor; 15863 int ret, zpos; 15864 15865 cursor = intel_plane_alloc(); 15866 if (IS_ERR(cursor)) 15867 return cursor; 15868 15869 cursor->pipe = pipe; 15870 cursor->i9xx_plane = (enum i9xx_plane_id) pipe; 15871 cursor->id = PLANE_CURSOR; 15872 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id); 15873 15874 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 15875 cursor->max_stride = i845_cursor_max_stride; 15876 cursor->update_plane = i845_update_cursor; 15877 cursor->disable_plane = i845_disable_cursor; 15878 cursor->get_hw_state = i845_cursor_get_hw_state; 15879 cursor->check_plane = i845_check_cursor; 15880 } else { 15881 cursor->max_stride = i9xx_cursor_max_stride; 15882 cursor->update_plane = i9xx_update_cursor; 15883 cursor->disable_plane = i9xx_disable_cursor; 15884 cursor->get_hw_state = i9xx_cursor_get_hw_state; 15885 cursor->check_plane = i9xx_check_cursor; 15886 } 15887 15888 cursor->cursor.base = ~0; 15889 cursor->cursor.cntl = ~0; 15890 15891 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) 15892 cursor->cursor.size = ~0; 15893 15894 possible_crtcs = BIT(pipe); 15895 15896 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 15897 possible_crtcs, &intel_cursor_plane_funcs, 15898 intel_cursor_formats, 15899 ARRAY_SIZE(intel_cursor_formats), 15900 cursor_format_modifiers, 15901 DRM_PLANE_TYPE_CURSOR, 15902 "cursor %c", pipe_name(pipe)); 15903 if (ret) 15904 goto fail; 15905 15906 if (INTEL_GEN(dev_priv) >= 4) 15907 drm_plane_create_rotation_property(&cursor->base, 15908 DRM_MODE_ROTATE_0, 15909 DRM_MODE_ROTATE_0 | 15910 DRM_MODE_ROTATE_180); 15911 15912 zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1; 15913 drm_plane_create_zpos_immutable_property(&cursor->base, zpos); 15914 15915 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 15916 15917 return cursor; 15918 15919 fail: 15920 intel_plane_free(cursor); 15921 15922 return ERR_PTR(ret); 15923 } 15924 15925 #define INTEL_CRTC_FUNCS \ 15926 .gamma_set = drm_atomic_helper_legacy_gamma_set, \ 15927 .set_config = drm_atomic_helper_set_config, \ 15928 .destroy = intel_crtc_destroy, \ 15929 .page_flip = drm_atomic_helper_page_flip, \ 15930 .atomic_duplicate_state = intel_crtc_duplicate_state, \ 15931 .atomic_destroy_state = intel_crtc_destroy_state, \ 15932 .set_crc_source = intel_crtc_set_crc_source, \ 15933 .verify_crc_source = intel_crtc_verify_crc_source, \ 15934 .get_crc_sources = intel_crtc_get_crc_sources 15935 15936 static const struct drm_crtc_funcs bdw_crtc_funcs = { 15937 INTEL_CRTC_FUNCS, 15938 15939 .get_vblank_counter = g4x_get_vblank_counter, 15940 .enable_vblank = bdw_enable_vblank, 15941 .disable_vblank = bdw_disable_vblank, 15942 }; 15943 15944 static const struct drm_crtc_funcs ilk_crtc_funcs = { 15945 INTEL_CRTC_FUNCS, 15946 15947 .get_vblank_counter = g4x_get_vblank_counter, 15948 .enable_vblank = ilk_enable_vblank, 15949 .disable_vblank = ilk_disable_vblank, 15950 }; 15951 15952 static const struct drm_crtc_funcs g4x_crtc_funcs = { 15953 INTEL_CRTC_FUNCS, 15954 15955 .get_vblank_counter = g4x_get_vblank_counter, 15956 .enable_vblank = i965_enable_vblank, 15957 .disable_vblank = i965_disable_vblank, 15958 }; 15959 15960 static const struct drm_crtc_funcs i965_crtc_funcs = { 15961 INTEL_CRTC_FUNCS, 15962 15963 .get_vblank_counter = i915_get_vblank_counter, 15964 .enable_vblank = i965_enable_vblank, 15965 .disable_vblank = i965_disable_vblank, 15966 }; 15967 15968 static const struct drm_crtc_funcs i915gm_crtc_funcs = { 15969 INTEL_CRTC_FUNCS, 15970 15971 .get_vblank_counter = i915_get_vblank_counter, 15972 .enable_vblank = i915gm_enable_vblank, 15973 .disable_vblank = i915gm_disable_vblank, 15974 }; 15975 15976 static const struct drm_crtc_funcs i915_crtc_funcs = { 15977 INTEL_CRTC_FUNCS, 15978 15979 .get_vblank_counter = i915_get_vblank_counter, 15980 .enable_vblank = i8xx_enable_vblank, 15981 .disable_vblank = i8xx_disable_vblank, 15982 }; 15983 15984 static const struct drm_crtc_funcs i8xx_crtc_funcs = { 15985 INTEL_CRTC_FUNCS, 15986 15987 /* no hw vblank counter */ 15988 .enable_vblank = i8xx_enable_vblank, 15989 .disable_vblank = i8xx_disable_vblank, 15990 }; 15991 15992 static struct intel_crtc *intel_crtc_alloc(void) 15993 { 15994 struct intel_crtc_state *crtc_state; 15995 struct intel_crtc *crtc; 15996 15997 crtc = kzalloc(sizeof(*crtc), GFP_KERNEL); 15998 if (!crtc) 15999 return ERR_PTR(-ENOMEM); 16000 16001 crtc_state = intel_crtc_state_alloc(crtc); 16002 if (!crtc_state) { 16003 kfree(crtc); 16004 return ERR_PTR(-ENOMEM); 16005 } 16006 16007 crtc->base.state = &crtc_state->uapi; 16008 crtc->config = crtc_state; 16009 16010 return crtc; 16011 } 16012 16013 static void intel_crtc_free(struct intel_crtc *crtc) 16014 { 16015 intel_crtc_destroy_state(&crtc->base, crtc->base.state); 16016 kfree(crtc); 16017 } 16018 16019 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) 16020 { 16021 struct intel_plane *primary, *cursor; 16022 const struct drm_crtc_funcs *funcs; 16023 struct intel_crtc *crtc; 16024 int sprite, ret; 16025 16026 crtc = intel_crtc_alloc(); 16027 if (IS_ERR(crtc)) 16028 return PTR_ERR(crtc); 16029 16030 crtc->pipe = pipe; 16031 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe]; 16032 16033 primary = intel_primary_plane_create(dev_priv, pipe); 16034 if (IS_ERR(primary)) { 16035 ret = PTR_ERR(primary); 16036 goto fail; 16037 } 16038 crtc->plane_ids_mask |= BIT(primary->id); 16039 16040 for_each_sprite(dev_priv, pipe, sprite) { 16041 struct intel_plane *plane; 16042 16043 plane = intel_sprite_plane_create(dev_priv, pipe, sprite); 16044 if (IS_ERR(plane)) { 16045 ret = PTR_ERR(plane); 16046 goto fail; 16047 } 16048 crtc->plane_ids_mask |= BIT(plane->id); 16049 } 16050 16051 cursor = intel_cursor_plane_create(dev_priv, pipe); 16052 if (IS_ERR(cursor)) { 16053 ret = PTR_ERR(cursor); 16054 goto fail; 16055 } 16056 crtc->plane_ids_mask |= BIT(cursor->id); 16057 16058 if (HAS_GMCH(dev_priv)) { 16059 if (IS_CHERRYVIEW(dev_priv) || 16060 IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv)) 16061 funcs = &g4x_crtc_funcs; 16062 else if (IS_GEN(dev_priv, 4)) 16063 funcs = &i965_crtc_funcs; 16064 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv)) 16065 funcs = &i915gm_crtc_funcs; 16066 else if (IS_GEN(dev_priv, 3)) 16067 funcs = &i915_crtc_funcs; 16068 else 16069 funcs = &i8xx_crtc_funcs; 16070 } else { 16071 if (INTEL_GEN(dev_priv) >= 8) 16072 funcs = &bdw_crtc_funcs; 16073 else 16074 funcs = &ilk_crtc_funcs; 16075 } 16076 16077 ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base, 16078 &primary->base, &cursor->base, 16079 funcs, "pipe %c", pipe_name(pipe)); 16080 if (ret) 16081 goto fail; 16082 16083 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) || 16084 dev_priv->pipe_to_crtc_mapping[pipe] != NULL); 16085 dev_priv->pipe_to_crtc_mapping[pipe] = crtc; 16086 16087 if (INTEL_GEN(dev_priv) < 9) { 16088 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane; 16089 16090 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 16091 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL); 16092 dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc; 16093 } 16094 16095 intel_color_init(crtc); 16096 16097 WARN_ON(drm_crtc_index(&crtc->base) != crtc->pipe); 16098 16099 return 0; 16100 16101 fail: 16102 intel_crtc_free(crtc); 16103 16104 return ret; 16105 } 16106 16107 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 16108 struct drm_file *file) 16109 { 16110 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 16111 struct drm_crtc *drmmode_crtc; 16112 struct intel_crtc *crtc; 16113 16114 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); 16115 if (!drmmode_crtc) 16116 return -ENOENT; 16117 16118 crtc = to_intel_crtc(drmmode_crtc); 16119 pipe_from_crtc_id->pipe = crtc->pipe; 16120 16121 return 0; 16122 } 16123 16124 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) 16125 { 16126 struct drm_device *dev = encoder->base.dev; 16127 struct intel_encoder *source_encoder; 16128 u32 possible_clones = 0; 16129 16130 for_each_intel_encoder(dev, source_encoder) { 16131 if (encoders_cloneable(encoder, source_encoder)) 16132 possible_clones |= drm_encoder_mask(&source_encoder->base); 16133 } 16134 16135 return possible_clones; 16136 } 16137 16138 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) 16139 { 16140 struct drm_device *dev = encoder->base.dev; 16141 struct intel_crtc *crtc; 16142 u32 possible_crtcs = 0; 16143 16144 for_each_intel_crtc(dev, crtc) { 16145 if (encoder->pipe_mask & BIT(crtc->pipe)) 16146 possible_crtcs |= drm_crtc_mask(&crtc->base); 16147 } 16148 16149 return possible_crtcs; 16150 } 16151 16152 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) 16153 { 16154 if (!IS_MOBILE(dev_priv)) 16155 return false; 16156 16157 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 16158 return false; 16159 16160 if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 16161 return false; 16162 16163 return true; 16164 } 16165 16166 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) 16167 { 16168 if (INTEL_GEN(dev_priv) >= 9) 16169 return false; 16170 16171 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) 16172 return false; 16173 16174 if (HAS_PCH_LPT_H(dev_priv) && 16175 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 16176 return false; 16177 16178 /* DDI E can't be used if DDI A requires 4 lanes */ 16179 if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 16180 return false; 16181 16182 if (!dev_priv->vbt.int_crt_support) 16183 return false; 16184 16185 return true; 16186 } 16187 16188 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) 16189 { 16190 int pps_num; 16191 int pps_idx; 16192 16193 if (HAS_DDI(dev_priv)) 16194 return; 16195 /* 16196 * This w/a is needed at least on CPT/PPT, but to be sure apply it 16197 * everywhere where registers can be write protected. 16198 */ 16199 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 16200 pps_num = 2; 16201 else 16202 pps_num = 1; 16203 16204 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { 16205 u32 val = I915_READ(PP_CONTROL(pps_idx)); 16206 16207 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS; 16208 I915_WRITE(PP_CONTROL(pps_idx), val); 16209 } 16210 } 16211 16212 static void intel_pps_init(struct drm_i915_private *dev_priv) 16213 { 16214 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv)) 16215 dev_priv->pps_mmio_base = PCH_PPS_BASE; 16216 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 16217 dev_priv->pps_mmio_base = VLV_PPS_BASE; 16218 else 16219 dev_priv->pps_mmio_base = PPS_BASE; 16220 16221 intel_pps_unlock_regs_wa(dev_priv); 16222 } 16223 16224 static void intel_setup_outputs(struct drm_i915_private *dev_priv) 16225 { 16226 struct intel_encoder *encoder; 16227 bool dpd_is_edp = false; 16228 16229 intel_pps_init(dev_priv); 16230 16231 if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) 16232 return; 16233 16234 if (INTEL_GEN(dev_priv) >= 12) { 16235 intel_ddi_init(dev_priv, PORT_A); 16236 intel_ddi_init(dev_priv, PORT_B); 16237 intel_ddi_init(dev_priv, PORT_D); 16238 intel_ddi_init(dev_priv, PORT_E); 16239 intel_ddi_init(dev_priv, PORT_F); 16240 intel_ddi_init(dev_priv, PORT_G); 16241 intel_ddi_init(dev_priv, PORT_H); 16242 intel_ddi_init(dev_priv, PORT_I); 16243 icl_dsi_init(dev_priv); 16244 } else if (IS_ELKHARTLAKE(dev_priv)) { 16245 intel_ddi_init(dev_priv, PORT_A); 16246 intel_ddi_init(dev_priv, PORT_B); 16247 intel_ddi_init(dev_priv, PORT_C); 16248 intel_ddi_init(dev_priv, PORT_D); 16249 icl_dsi_init(dev_priv); 16250 } else if (IS_GEN(dev_priv, 11)) { 16251 intel_ddi_init(dev_priv, PORT_A); 16252 intel_ddi_init(dev_priv, PORT_B); 16253 intel_ddi_init(dev_priv, PORT_C); 16254 intel_ddi_init(dev_priv, PORT_D); 16255 intel_ddi_init(dev_priv, PORT_E); 16256 /* 16257 * On some ICL SKUs port F is not present. No strap bits for 16258 * this, so rely on VBT. 16259 * Work around broken VBTs on SKUs known to have no port F. 16260 */ 16261 if (IS_ICL_WITH_PORT_F(dev_priv) && 16262 intel_bios_is_port_present(dev_priv, PORT_F)) 16263 intel_ddi_init(dev_priv, PORT_F); 16264 16265 icl_dsi_init(dev_priv); 16266 } else if (IS_GEN9_LP(dev_priv)) { 16267 /* 16268 * FIXME: Broxton doesn't support port detection via the 16269 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 16270 * detect the ports. 16271 */ 16272 intel_ddi_init(dev_priv, PORT_A); 16273 intel_ddi_init(dev_priv, PORT_B); 16274 intel_ddi_init(dev_priv, PORT_C); 16275 16276 vlv_dsi_init(dev_priv); 16277 } else if (HAS_DDI(dev_priv)) { 16278 int found; 16279 16280 if (intel_ddi_crt_present(dev_priv)) 16281 intel_crt_init(dev_priv); 16282 16283 /* 16284 * Haswell uses DDI functions to detect digital outputs. 16285 * On SKL pre-D0 the strap isn't connected, so we assume 16286 * it's there. 16287 */ 16288 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 16289 /* WaIgnoreDDIAStrap: skl */ 16290 if (found || IS_GEN9_BC(dev_priv)) 16291 intel_ddi_init(dev_priv, PORT_A); 16292 16293 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP 16294 * register */ 16295 found = I915_READ(SFUSE_STRAP); 16296 16297 if (found & SFUSE_STRAP_DDIB_DETECTED) 16298 intel_ddi_init(dev_priv, PORT_B); 16299 if (found & SFUSE_STRAP_DDIC_DETECTED) 16300 intel_ddi_init(dev_priv, PORT_C); 16301 if (found & SFUSE_STRAP_DDID_DETECTED) 16302 intel_ddi_init(dev_priv, PORT_D); 16303 if (found & SFUSE_STRAP_DDIF_DETECTED) 16304 intel_ddi_init(dev_priv, PORT_F); 16305 /* 16306 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 16307 */ 16308 if (IS_GEN9_BC(dev_priv) && 16309 intel_bios_is_port_present(dev_priv, PORT_E)) 16310 intel_ddi_init(dev_priv, PORT_E); 16311 16312 } else if (HAS_PCH_SPLIT(dev_priv)) { 16313 int found; 16314 16315 /* 16316 * intel_edp_init_connector() depends on this completing first, 16317 * to prevent the registration of both eDP and LVDS and the 16318 * incorrect sharing of the PPS. 16319 */ 16320 intel_lvds_init(dev_priv); 16321 intel_crt_init(dev_priv); 16322 16323 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); 16324 16325 if (ilk_has_edp_a(dev_priv)) 16326 intel_dp_init(dev_priv, DP_A, PORT_A); 16327 16328 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 16329 /* PCH SDVOB multiplex with HDMIB */ 16330 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 16331 if (!found) 16332 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 16333 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 16334 intel_dp_init(dev_priv, PCH_DP_B, PORT_B); 16335 } 16336 16337 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 16338 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 16339 16340 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 16341 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 16342 16343 if (I915_READ(PCH_DP_C) & DP_DETECTED) 16344 intel_dp_init(dev_priv, PCH_DP_C, PORT_C); 16345 16346 if (I915_READ(PCH_DP_D) & DP_DETECTED) 16347 intel_dp_init(dev_priv, PCH_DP_D, PORT_D); 16348 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 16349 bool has_edp, has_port; 16350 16351 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support) 16352 intel_crt_init(dev_priv); 16353 16354 /* 16355 * The DP_DETECTED bit is the latched state of the DDC 16356 * SDA pin at boot. However since eDP doesn't require DDC 16357 * (no way to plug in a DP->HDMI dongle) the DDC pins for 16358 * eDP ports may have been muxed to an alternate function. 16359 * Thus we can't rely on the DP_DETECTED bit alone to detect 16360 * eDP ports. Consult the VBT as well as DP_DETECTED to 16361 * detect eDP ports. 16362 * 16363 * Sadly the straps seem to be missing sometimes even for HDMI 16364 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 16365 * and VBT for the presence of the port. Additionally we can't 16366 * trust the port type the VBT declares as we've seen at least 16367 * HDMI ports that the VBT claim are DP or eDP. 16368 */ 16369 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); 16370 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 16371 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) 16372 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B); 16373 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 16374 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 16375 16376 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); 16377 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 16378 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) 16379 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C); 16380 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 16381 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 16382 16383 if (IS_CHERRYVIEW(dev_priv)) { 16384 /* 16385 * eDP not supported on port D, 16386 * so no need to worry about it 16387 */ 16388 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 16389 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) 16390 intel_dp_init(dev_priv, CHV_DP_D, PORT_D); 16391 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) 16392 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 16393 } 16394 16395 vlv_dsi_init(dev_priv); 16396 } else if (IS_PINEVIEW(dev_priv)) { 16397 intel_lvds_init(dev_priv); 16398 intel_crt_init(dev_priv); 16399 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) { 16400 bool found = false; 16401 16402 if (IS_MOBILE(dev_priv)) 16403 intel_lvds_init(dev_priv); 16404 16405 intel_crt_init(dev_priv); 16406 16407 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 16408 DRM_DEBUG_KMS("probing SDVOB\n"); 16409 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 16410 if (!found && IS_G4X(dev_priv)) { 16411 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 16412 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 16413 } 16414 16415 if (!found && IS_G4X(dev_priv)) 16416 intel_dp_init(dev_priv, DP_B, PORT_B); 16417 } 16418 16419 /* Before G4X SDVOC doesn't have its own detect register */ 16420 16421 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 16422 DRM_DEBUG_KMS("probing SDVOC\n"); 16423 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 16424 } 16425 16426 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 16427 16428 if (IS_G4X(dev_priv)) { 16429 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 16430 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 16431 } 16432 if (IS_G4X(dev_priv)) 16433 intel_dp_init(dev_priv, DP_C, PORT_C); 16434 } 16435 16436 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) 16437 intel_dp_init(dev_priv, DP_D, PORT_D); 16438 16439 if (SUPPORTS_TV(dev_priv)) 16440 intel_tv_init(dev_priv); 16441 } else if (IS_GEN(dev_priv, 2)) { 16442 if (IS_I85X(dev_priv)) 16443 intel_lvds_init(dev_priv); 16444 16445 intel_crt_init(dev_priv); 16446 intel_dvo_init(dev_priv); 16447 } 16448 16449 intel_psr_init(dev_priv); 16450 16451 for_each_intel_encoder(&dev_priv->drm, encoder) { 16452 encoder->base.possible_crtcs = 16453 intel_encoder_possible_crtcs(encoder); 16454 encoder->base.possible_clones = 16455 intel_encoder_possible_clones(encoder); 16456 } 16457 16458 intel_init_pch_refclk(dev_priv); 16459 16460 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 16461 } 16462 16463 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 16464 { 16465 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 16466 16467 drm_framebuffer_cleanup(fb); 16468 intel_frontbuffer_put(intel_fb->frontbuffer); 16469 16470 kfree(intel_fb); 16471 } 16472 16473 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 16474 struct drm_file *file, 16475 unsigned int *handle) 16476 { 16477 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 16478 16479 if (obj->userptr.mm) { 16480 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); 16481 return -EINVAL; 16482 } 16483 16484 return drm_gem_handle_create(file, &obj->base, handle); 16485 } 16486 16487 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, 16488 struct drm_file *file, 16489 unsigned flags, unsigned color, 16490 struct drm_clip_rect *clips, 16491 unsigned num_clips) 16492 { 16493 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 16494 16495 i915_gem_object_flush_if_display(obj); 16496 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); 16497 16498 return 0; 16499 } 16500 16501 static const struct drm_framebuffer_funcs intel_fb_funcs = { 16502 .destroy = intel_user_framebuffer_destroy, 16503 .create_handle = intel_user_framebuffer_create_handle, 16504 .dirty = intel_user_framebuffer_dirty, 16505 }; 16506 16507 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, 16508 struct drm_i915_gem_object *obj, 16509 struct drm_mode_fb_cmd2 *mode_cmd) 16510 { 16511 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 16512 struct drm_framebuffer *fb = &intel_fb->base; 16513 u32 max_stride; 16514 unsigned int tiling, stride; 16515 int ret = -EINVAL; 16516 int i; 16517 16518 intel_fb->frontbuffer = intel_frontbuffer_get(obj); 16519 if (!intel_fb->frontbuffer) 16520 return -ENOMEM; 16521 16522 i915_gem_object_lock(obj); 16523 tiling = i915_gem_object_get_tiling(obj); 16524 stride = i915_gem_object_get_stride(obj); 16525 i915_gem_object_unlock(obj); 16526 16527 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 16528 /* 16529 * If there's a fence, enforce that 16530 * the fb modifier and tiling mode match. 16531 */ 16532 if (tiling != I915_TILING_NONE && 16533 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 16534 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n"); 16535 goto err; 16536 } 16537 } else { 16538 if (tiling == I915_TILING_X) { 16539 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 16540 } else if (tiling == I915_TILING_Y) { 16541 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n"); 16542 goto err; 16543 } 16544 } 16545 16546 if (!drm_any_plane_has_format(&dev_priv->drm, 16547 mode_cmd->pixel_format, 16548 mode_cmd->modifier[0])) { 16549 struct drm_format_name_buf format_name; 16550 16551 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n", 16552 drm_get_format_name(mode_cmd->pixel_format, 16553 &format_name), 16554 mode_cmd->modifier[0]); 16555 goto err; 16556 } 16557 16558 /* 16559 * gen2/3 display engine uses the fence if present, 16560 * so the tiling mode must match the fb modifier exactly. 16561 */ 16562 if (INTEL_GEN(dev_priv) < 4 && 16563 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 16564 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n"); 16565 goto err; 16566 } 16567 16568 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format, 16569 mode_cmd->modifier[0]); 16570 if (mode_cmd->pitches[0] > max_stride) { 16571 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n", 16572 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? 16573 "tiled" : "linear", 16574 mode_cmd->pitches[0], max_stride); 16575 goto err; 16576 } 16577 16578 /* 16579 * If there's a fence, enforce that 16580 * the fb pitch and fence stride match. 16581 */ 16582 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) { 16583 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n", 16584 mode_cmd->pitches[0], stride); 16585 goto err; 16586 } 16587 16588 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 16589 if (mode_cmd->offsets[0] != 0) 16590 goto err; 16591 16592 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd); 16593 16594 for (i = 0; i < fb->format->num_planes; i++) { 16595 u32 stride_alignment; 16596 16597 if (mode_cmd->handles[i] != mode_cmd->handles[0]) { 16598 DRM_DEBUG_KMS("bad plane %d handle\n", i); 16599 goto err; 16600 } 16601 16602 stride_alignment = intel_fb_stride_alignment(fb, i); 16603 if (fb->pitches[i] & (stride_alignment - 1)) { 16604 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n", 16605 i, fb->pitches[i], stride_alignment); 16606 goto err; 16607 } 16608 16609 if (is_gen12_ccs_plane(fb, i)) { 16610 int ccs_aux_stride = gen12_ccs_aux_stride(fb, i); 16611 16612 if (fb->pitches[i] != ccs_aux_stride) { 16613 DRM_DEBUG_KMS("ccs aux plane %d pitch (%d) must be %d\n", 16614 i, 16615 fb->pitches[i], ccs_aux_stride); 16616 goto err; 16617 } 16618 } 16619 16620 fb->obj[i] = &obj->base; 16621 } 16622 16623 ret = intel_fill_fb_info(dev_priv, fb); 16624 if (ret) 16625 goto err; 16626 16627 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs); 16628 if (ret) { 16629 DRM_ERROR("framebuffer init failed %d\n", ret); 16630 goto err; 16631 } 16632 16633 return 0; 16634 16635 err: 16636 intel_frontbuffer_put(intel_fb->frontbuffer); 16637 return ret; 16638 } 16639 16640 static struct drm_framebuffer * 16641 intel_user_framebuffer_create(struct drm_device *dev, 16642 struct drm_file *filp, 16643 const struct drm_mode_fb_cmd2 *user_mode_cmd) 16644 { 16645 struct drm_framebuffer *fb; 16646 struct drm_i915_gem_object *obj; 16647 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 16648 16649 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); 16650 if (!obj) 16651 return ERR_PTR(-ENOENT); 16652 16653 fb = intel_framebuffer_create(obj, &mode_cmd); 16654 i915_gem_object_put(obj); 16655 16656 return fb; 16657 } 16658 16659 static void intel_atomic_state_free(struct drm_atomic_state *state) 16660 { 16661 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 16662 16663 drm_atomic_state_default_release(state); 16664 16665 i915_sw_fence_fini(&intel_state->commit_ready); 16666 16667 kfree(state); 16668 } 16669 16670 static enum drm_mode_status 16671 intel_mode_valid(struct drm_device *dev, 16672 const struct drm_display_mode *mode) 16673 { 16674 struct drm_i915_private *dev_priv = to_i915(dev); 16675 int hdisplay_max, htotal_max; 16676 int vdisplay_max, vtotal_max; 16677 16678 /* 16679 * Can't reject DBLSCAN here because Xorg ddxen can add piles 16680 * of DBLSCAN modes to the output's mode list when they detect 16681 * the scaling mode property on the connector. And they don't 16682 * ask the kernel to validate those modes in any way until 16683 * modeset time at which point the client gets a protocol error. 16684 * So in order to not upset those clients we silently ignore the 16685 * DBLSCAN flag on such connectors. For other connectors we will 16686 * reject modes with the DBLSCAN flag in encoder->compute_config(). 16687 * And we always reject DBLSCAN modes in connector->mode_valid() 16688 * as we never want such modes on the connector's mode list. 16689 */ 16690 16691 if (mode->vscan > 1) 16692 return MODE_NO_VSCAN; 16693 16694 if (mode->flags & DRM_MODE_FLAG_HSKEW) 16695 return MODE_H_ILLEGAL; 16696 16697 if (mode->flags & (DRM_MODE_FLAG_CSYNC | 16698 DRM_MODE_FLAG_NCSYNC | 16699 DRM_MODE_FLAG_PCSYNC)) 16700 return MODE_HSYNC; 16701 16702 if (mode->flags & (DRM_MODE_FLAG_BCAST | 16703 DRM_MODE_FLAG_PIXMUX | 16704 DRM_MODE_FLAG_CLKDIV2)) 16705 return MODE_BAD; 16706 16707 /* Transcoder timing limits */ 16708 if (INTEL_GEN(dev_priv) >= 11) { 16709 hdisplay_max = 16384; 16710 vdisplay_max = 8192; 16711 htotal_max = 16384; 16712 vtotal_max = 8192; 16713 } else if (INTEL_GEN(dev_priv) >= 9 || 16714 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 16715 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 16716 vdisplay_max = 4096; 16717 htotal_max = 8192; 16718 vtotal_max = 8192; 16719 } else if (INTEL_GEN(dev_priv) >= 3) { 16720 hdisplay_max = 4096; 16721 vdisplay_max = 4096; 16722 htotal_max = 8192; 16723 vtotal_max = 8192; 16724 } else { 16725 hdisplay_max = 2048; 16726 vdisplay_max = 2048; 16727 htotal_max = 4096; 16728 vtotal_max = 4096; 16729 } 16730 16731 if (mode->hdisplay > hdisplay_max || 16732 mode->hsync_start > htotal_max || 16733 mode->hsync_end > htotal_max || 16734 mode->htotal > htotal_max) 16735 return MODE_H_ILLEGAL; 16736 16737 if (mode->vdisplay > vdisplay_max || 16738 mode->vsync_start > vtotal_max || 16739 mode->vsync_end > vtotal_max || 16740 mode->vtotal > vtotal_max) 16741 return MODE_V_ILLEGAL; 16742 16743 if (INTEL_GEN(dev_priv) >= 5) { 16744 if (mode->hdisplay < 64 || 16745 mode->htotal - mode->hdisplay < 32) 16746 return MODE_H_ILLEGAL; 16747 16748 if (mode->vtotal - mode->vdisplay < 5) 16749 return MODE_V_ILLEGAL; 16750 } else { 16751 if (mode->htotal - mode->hdisplay < 32) 16752 return MODE_H_ILLEGAL; 16753 16754 if (mode->vtotal - mode->vdisplay < 3) 16755 return MODE_V_ILLEGAL; 16756 } 16757 16758 return MODE_OK; 16759 } 16760 16761 enum drm_mode_status 16762 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, 16763 const struct drm_display_mode *mode) 16764 { 16765 int plane_width_max, plane_height_max; 16766 16767 /* 16768 * intel_mode_valid() should be 16769 * sufficient on older platforms. 16770 */ 16771 if (INTEL_GEN(dev_priv) < 9) 16772 return MODE_OK; 16773 16774 /* 16775 * Most people will probably want a fullscreen 16776 * plane so let's not advertize modes that are 16777 * too big for that. 16778 */ 16779 if (INTEL_GEN(dev_priv) >= 11) { 16780 plane_width_max = 5120; 16781 plane_height_max = 4320; 16782 } else { 16783 plane_width_max = 5120; 16784 plane_height_max = 4096; 16785 } 16786 16787 if (mode->hdisplay > plane_width_max) 16788 return MODE_H_ILLEGAL; 16789 16790 if (mode->vdisplay > plane_height_max) 16791 return MODE_V_ILLEGAL; 16792 16793 return MODE_OK; 16794 } 16795 16796 static const struct drm_mode_config_funcs intel_mode_funcs = { 16797 .fb_create = intel_user_framebuffer_create, 16798 .get_format_info = intel_get_format_info, 16799 .output_poll_changed = intel_fbdev_output_poll_changed, 16800 .mode_valid = intel_mode_valid, 16801 .atomic_check = intel_atomic_check, 16802 .atomic_commit = intel_atomic_commit, 16803 .atomic_state_alloc = intel_atomic_state_alloc, 16804 .atomic_state_clear = intel_atomic_state_clear, 16805 .atomic_state_free = intel_atomic_state_free, 16806 }; 16807 16808 /** 16809 * intel_init_display_hooks - initialize the display modesetting hooks 16810 * @dev_priv: device private 16811 */ 16812 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 16813 { 16814 intel_init_cdclk_hooks(dev_priv); 16815 16816 if (INTEL_GEN(dev_priv) >= 9) { 16817 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 16818 dev_priv->display.get_initial_plane_config = 16819 skylake_get_initial_plane_config; 16820 dev_priv->display.crtc_compute_clock = 16821 haswell_crtc_compute_clock; 16822 dev_priv->display.crtc_enable = haswell_crtc_enable; 16823 dev_priv->display.crtc_disable = haswell_crtc_disable; 16824 } else if (HAS_DDI(dev_priv)) { 16825 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 16826 dev_priv->display.get_initial_plane_config = 16827 i9xx_get_initial_plane_config; 16828 dev_priv->display.crtc_compute_clock = 16829 haswell_crtc_compute_clock; 16830 dev_priv->display.crtc_enable = haswell_crtc_enable; 16831 dev_priv->display.crtc_disable = haswell_crtc_disable; 16832 } else if (HAS_PCH_SPLIT(dev_priv)) { 16833 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 16834 dev_priv->display.get_initial_plane_config = 16835 i9xx_get_initial_plane_config; 16836 dev_priv->display.crtc_compute_clock = 16837 ironlake_crtc_compute_clock; 16838 dev_priv->display.crtc_enable = ironlake_crtc_enable; 16839 dev_priv->display.crtc_disable = ironlake_crtc_disable; 16840 } else if (IS_CHERRYVIEW(dev_priv)) { 16841 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 16842 dev_priv->display.get_initial_plane_config = 16843 i9xx_get_initial_plane_config; 16844 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock; 16845 dev_priv->display.crtc_enable = valleyview_crtc_enable; 16846 dev_priv->display.crtc_disable = i9xx_crtc_disable; 16847 } else if (IS_VALLEYVIEW(dev_priv)) { 16848 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 16849 dev_priv->display.get_initial_plane_config = 16850 i9xx_get_initial_plane_config; 16851 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock; 16852 dev_priv->display.crtc_enable = valleyview_crtc_enable; 16853 dev_priv->display.crtc_disable = i9xx_crtc_disable; 16854 } else if (IS_G4X(dev_priv)) { 16855 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 16856 dev_priv->display.get_initial_plane_config = 16857 i9xx_get_initial_plane_config; 16858 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock; 16859 dev_priv->display.crtc_enable = i9xx_crtc_enable; 16860 dev_priv->display.crtc_disable = i9xx_crtc_disable; 16861 } else if (IS_PINEVIEW(dev_priv)) { 16862 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 16863 dev_priv->display.get_initial_plane_config = 16864 i9xx_get_initial_plane_config; 16865 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock; 16866 dev_priv->display.crtc_enable = i9xx_crtc_enable; 16867 dev_priv->display.crtc_disable = i9xx_crtc_disable; 16868 } else if (!IS_GEN(dev_priv, 2)) { 16869 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 16870 dev_priv->display.get_initial_plane_config = 16871 i9xx_get_initial_plane_config; 16872 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 16873 dev_priv->display.crtc_enable = i9xx_crtc_enable; 16874 dev_priv->display.crtc_disable = i9xx_crtc_disable; 16875 } else { 16876 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 16877 dev_priv->display.get_initial_plane_config = 16878 i9xx_get_initial_plane_config; 16879 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock; 16880 dev_priv->display.crtc_enable = i9xx_crtc_enable; 16881 dev_priv->display.crtc_disable = i9xx_crtc_disable; 16882 } 16883 16884 if (IS_GEN(dev_priv, 5)) { 16885 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 16886 } else if (IS_GEN(dev_priv, 6)) { 16887 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 16888 } else if (IS_IVYBRIDGE(dev_priv)) { 16889 /* FIXME: detect B0+ stepping and use auto training */ 16890 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 16891 } 16892 16893 if (INTEL_GEN(dev_priv) >= 9) 16894 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables; 16895 else 16896 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables; 16897 16898 } 16899 16900 void intel_modeset_init_hw(struct drm_i915_private *i915) 16901 { 16902 intel_update_cdclk(i915); 16903 intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK"); 16904 i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw; 16905 } 16906 16907 /* 16908 * Calculate what we think the watermarks should be for the state we've read 16909 * out of the hardware and then immediately program those watermarks so that 16910 * we ensure the hardware settings match our internal state. 16911 * 16912 * We can calculate what we think WM's should be by creating a duplicate of the 16913 * current state (which was constructed during hardware readout) and running it 16914 * through the atomic check code to calculate new watermark values in the 16915 * state object. 16916 */ 16917 static void sanitize_watermarks(struct drm_device *dev) 16918 { 16919 struct drm_i915_private *dev_priv = to_i915(dev); 16920 struct drm_atomic_state *state; 16921 struct intel_atomic_state *intel_state; 16922 struct intel_crtc *crtc; 16923 struct intel_crtc_state *crtc_state; 16924 struct drm_modeset_acquire_ctx ctx; 16925 int ret; 16926 int i; 16927 16928 /* Only supported on platforms that use atomic watermark design */ 16929 if (!dev_priv->display.optimize_watermarks) 16930 return; 16931 16932 /* 16933 * We need to hold connection_mutex before calling duplicate_state so 16934 * that the connector loop is protected. 16935 */ 16936 drm_modeset_acquire_init(&ctx, 0); 16937 retry: 16938 ret = drm_modeset_lock_all_ctx(dev, &ctx); 16939 if (ret == -EDEADLK) { 16940 drm_modeset_backoff(&ctx); 16941 goto retry; 16942 } else if (WARN_ON(ret)) { 16943 goto fail; 16944 } 16945 16946 state = drm_atomic_helper_duplicate_state(dev, &ctx); 16947 if (WARN_ON(IS_ERR(state))) 16948 goto fail; 16949 16950 intel_state = to_intel_atomic_state(state); 16951 16952 /* 16953 * Hardware readout is the only time we don't want to calculate 16954 * intermediate watermarks (since we don't trust the current 16955 * watermarks). 16956 */ 16957 if (!HAS_GMCH(dev_priv)) 16958 intel_state->skip_intermediate_wm = true; 16959 16960 ret = intel_atomic_check(dev, state); 16961 if (ret) { 16962 /* 16963 * If we fail here, it means that the hardware appears to be 16964 * programmed in a way that shouldn't be possible, given our 16965 * understanding of watermark requirements. This might mean a 16966 * mistake in the hardware readout code or a mistake in the 16967 * watermark calculations for a given platform. Raise a WARN 16968 * so that this is noticeable. 16969 * 16970 * If this actually happens, we'll have to just leave the 16971 * BIOS-programmed watermarks untouched and hope for the best. 16972 */ 16973 WARN(true, "Could not determine valid watermarks for inherited state\n"); 16974 goto put_state; 16975 } 16976 16977 /* Write calculated watermark values back */ 16978 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { 16979 crtc_state->wm.need_postvbl_update = true; 16980 dev_priv->display.optimize_watermarks(intel_state, crtc); 16981 16982 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; 16983 } 16984 16985 put_state: 16986 drm_atomic_state_put(state); 16987 fail: 16988 drm_modeset_drop_locks(&ctx); 16989 drm_modeset_acquire_fini(&ctx); 16990 } 16991 16992 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv) 16993 { 16994 if (IS_GEN(dev_priv, 5)) { 16995 u32 fdi_pll_clk = 16996 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; 16997 16998 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000; 16999 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) { 17000 dev_priv->fdi_pll_freq = 270000; 17001 } else { 17002 return; 17003 } 17004 17005 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq); 17006 } 17007 17008 static int intel_initial_commit(struct drm_device *dev) 17009 { 17010 struct drm_atomic_state *state = NULL; 17011 struct drm_modeset_acquire_ctx ctx; 17012 struct intel_crtc *crtc; 17013 int ret = 0; 17014 17015 state = drm_atomic_state_alloc(dev); 17016 if (!state) 17017 return -ENOMEM; 17018 17019 drm_modeset_acquire_init(&ctx, 0); 17020 17021 retry: 17022 state->acquire_ctx = &ctx; 17023 17024 for_each_intel_crtc(dev, crtc) { 17025 struct intel_crtc_state *crtc_state = 17026 intel_atomic_get_crtc_state(state, crtc); 17027 17028 if (IS_ERR(crtc_state)) { 17029 ret = PTR_ERR(crtc_state); 17030 goto out; 17031 } 17032 17033 if (crtc_state->hw.active) { 17034 ret = drm_atomic_add_affected_planes(state, &crtc->base); 17035 if (ret) 17036 goto out; 17037 17038 /* 17039 * FIXME hack to force a LUT update to avoid the 17040 * plane update forcing the pipe gamma on without 17041 * having a proper LUT loaded. Remove once we 17042 * have readout for pipe gamma enable. 17043 */ 17044 crtc_state->uapi.color_mgmt_changed = true; 17045 } 17046 } 17047 17048 ret = drm_atomic_commit(state); 17049 17050 out: 17051 if (ret == -EDEADLK) { 17052 drm_atomic_state_clear(state); 17053 drm_modeset_backoff(&ctx); 17054 goto retry; 17055 } 17056 17057 drm_atomic_state_put(state); 17058 17059 drm_modeset_drop_locks(&ctx); 17060 drm_modeset_acquire_fini(&ctx); 17061 17062 return ret; 17063 } 17064 17065 static void intel_mode_config_init(struct drm_i915_private *i915) 17066 { 17067 struct drm_mode_config *mode_config = &i915->drm.mode_config; 17068 17069 drm_mode_config_init(&i915->drm); 17070 17071 mode_config->min_width = 0; 17072 mode_config->min_height = 0; 17073 17074 mode_config->preferred_depth = 24; 17075 mode_config->prefer_shadow = 1; 17076 17077 mode_config->allow_fb_modifiers = true; 17078 17079 mode_config->funcs = &intel_mode_funcs; 17080 17081 /* 17082 * Maximum framebuffer dimensions, chosen to match 17083 * the maximum render engine surface size on gen4+. 17084 */ 17085 if (INTEL_GEN(i915) >= 7) { 17086 mode_config->max_width = 16384; 17087 mode_config->max_height = 16384; 17088 } else if (INTEL_GEN(i915) >= 4) { 17089 mode_config->max_width = 8192; 17090 mode_config->max_height = 8192; 17091 } else if (IS_GEN(i915, 3)) { 17092 mode_config->max_width = 4096; 17093 mode_config->max_height = 4096; 17094 } else { 17095 mode_config->max_width = 2048; 17096 mode_config->max_height = 2048; 17097 } 17098 17099 if (IS_I845G(i915) || IS_I865G(i915)) { 17100 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512; 17101 mode_config->cursor_height = 1023; 17102 } else if (IS_GEN(i915, 2)) { 17103 mode_config->cursor_width = 64; 17104 mode_config->cursor_height = 64; 17105 } else { 17106 mode_config->cursor_width = 256; 17107 mode_config->cursor_height = 256; 17108 } 17109 } 17110 17111 int intel_modeset_init(struct drm_i915_private *i915) 17112 { 17113 struct drm_device *dev = &i915->drm; 17114 enum pipe pipe; 17115 struct intel_crtc *crtc; 17116 int ret; 17117 17118 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); 17119 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI | 17120 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); 17121 17122 intel_mode_config_init(i915); 17123 17124 ret = intel_bw_init(i915); 17125 if (ret) 17126 return ret; 17127 17128 init_llist_head(&i915->atomic_helper.free_list); 17129 INIT_WORK(&i915->atomic_helper.free_work, 17130 intel_atomic_helper_free_state_worker); 17131 17132 intel_init_quirks(i915); 17133 17134 intel_fbc_init(i915); 17135 17136 intel_init_pm(i915); 17137 17138 intel_panel_sanitize_ssc(i915); 17139 17140 intel_gmbus_setup(i915); 17141 17142 DRM_DEBUG_KMS("%d display pipe%s available.\n", 17143 INTEL_NUM_PIPES(i915), 17144 INTEL_NUM_PIPES(i915) > 1 ? "s" : ""); 17145 17146 if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) { 17147 for_each_pipe(i915, pipe) { 17148 ret = intel_crtc_init(i915, pipe); 17149 if (ret) { 17150 drm_mode_config_cleanup(dev); 17151 return ret; 17152 } 17153 } 17154 } 17155 17156 intel_shared_dpll_init(dev); 17157 intel_update_fdi_pll_freq(i915); 17158 17159 intel_update_czclk(i915); 17160 intel_modeset_init_hw(i915); 17161 17162 intel_hdcp_component_init(i915); 17163 17164 if (i915->max_cdclk_freq == 0) 17165 intel_update_max_cdclk(i915); 17166 17167 /* Just disable it once at startup */ 17168 intel_vga_disable(i915); 17169 intel_setup_outputs(i915); 17170 17171 drm_modeset_lock_all(dev); 17172 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); 17173 drm_modeset_unlock_all(dev); 17174 17175 for_each_intel_crtc(dev, crtc) { 17176 struct intel_initial_plane_config plane_config = {}; 17177 17178 if (!crtc->active) 17179 continue; 17180 17181 /* 17182 * Note that reserving the BIOS fb up front prevents us 17183 * from stuffing other stolen allocations like the ring 17184 * on top. This prevents some ugliness at boot time, and 17185 * can even allow for smooth boot transitions if the BIOS 17186 * fb is large enough for the active pipe configuration. 17187 */ 17188 i915->display.get_initial_plane_config(crtc, &plane_config); 17189 17190 /* 17191 * If the fb is shared between multiple heads, we'll 17192 * just get the first one. 17193 */ 17194 intel_find_initial_plane_obj(crtc, &plane_config); 17195 } 17196 17197 /* 17198 * Make sure hardware watermarks really match the state we read out. 17199 * Note that we need to do this after reconstructing the BIOS fb's 17200 * since the watermark calculation done here will use pstate->fb. 17201 */ 17202 if (!HAS_GMCH(i915)) 17203 sanitize_watermarks(dev); 17204 17205 /* 17206 * Force all active planes to recompute their states. So that on 17207 * mode_setcrtc after probe, all the intel_plane_state variables 17208 * are already calculated and there is no assert_plane warnings 17209 * during bootup. 17210 */ 17211 ret = intel_initial_commit(dev); 17212 if (ret) 17213 DRM_DEBUG_KMS("Initial commit in probe failed.\n"); 17214 17215 return 0; 17216 } 17217 17218 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 17219 { 17220 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17221 /* 640x480@60Hz, ~25175 kHz */ 17222 struct dpll clock = { 17223 .m1 = 18, 17224 .m2 = 7, 17225 .p1 = 13, 17226 .p2 = 4, 17227 .n = 2, 17228 }; 17229 u32 dpll, fp; 17230 int i; 17231 17232 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154); 17233 17234 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 17235 pipe_name(pipe), clock.vco, clock.dot); 17236 17237 fp = i9xx_dpll_compute_fp(&clock); 17238 dpll = DPLL_DVO_2X_MODE | 17239 DPLL_VGA_MODE_DIS | 17240 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 17241 PLL_P2_DIVIDE_BY_4 | 17242 PLL_REF_INPUT_DREFCLK | 17243 DPLL_VCO_ENABLE; 17244 17245 I915_WRITE(FP0(pipe), fp); 17246 I915_WRITE(FP1(pipe), fp); 17247 17248 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); 17249 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); 17250 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); 17251 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16)); 17252 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16)); 17253 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); 17254 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); 17255 17256 /* 17257 * Apparently we need to have VGA mode enabled prior to changing 17258 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 17259 * dividers, even though the register value does change. 17260 */ 17261 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); 17262 I915_WRITE(DPLL(pipe), dpll); 17263 17264 /* Wait for the clocks to stabilize. */ 17265 POSTING_READ(DPLL(pipe)); 17266 udelay(150); 17267 17268 /* The pixel multiplier can only be updated once the 17269 * DPLL is enabled and the clocks are stable. 17270 * 17271 * So write it again. 17272 */ 17273 I915_WRITE(DPLL(pipe), dpll); 17274 17275 /* We do this three times for luck */ 17276 for (i = 0; i < 3 ; i++) { 17277 I915_WRITE(DPLL(pipe), dpll); 17278 POSTING_READ(DPLL(pipe)); 17279 udelay(150); /* wait for warmup */ 17280 } 17281 17282 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE); 17283 POSTING_READ(PIPECONF(pipe)); 17284 17285 intel_wait_for_pipe_scanline_moving(crtc); 17286 } 17287 17288 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 17289 { 17290 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17291 17292 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n", 17293 pipe_name(pipe)); 17294 17295 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE); 17296 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE); 17297 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE); 17298 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE); 17299 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE); 17300 17301 I915_WRITE(PIPECONF(pipe), 0); 17302 POSTING_READ(PIPECONF(pipe)); 17303 17304 intel_wait_for_pipe_scanline_stopped(crtc); 17305 17306 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 17307 POSTING_READ(DPLL(pipe)); 17308 } 17309 17310 static void 17311 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) 17312 { 17313 struct intel_crtc *crtc; 17314 17315 if (INTEL_GEN(dev_priv) >= 4) 17316 return; 17317 17318 for_each_intel_crtc(&dev_priv->drm, crtc) { 17319 struct intel_plane *plane = 17320 to_intel_plane(crtc->base.primary); 17321 struct intel_crtc *plane_crtc; 17322 enum pipe pipe; 17323 17324 if (!plane->get_hw_state(plane, &pipe)) 17325 continue; 17326 17327 if (pipe == crtc->pipe) 17328 continue; 17329 17330 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", 17331 plane->base.base.id, plane->base.name); 17332 17333 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17334 intel_plane_disable_noatomic(plane_crtc, plane); 17335 } 17336 } 17337 17338 static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 17339 { 17340 struct drm_device *dev = crtc->base.dev; 17341 struct intel_encoder *encoder; 17342 17343 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 17344 return true; 17345 17346 return false; 17347 } 17348 17349 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder) 17350 { 17351 struct drm_device *dev = encoder->base.dev; 17352 struct intel_connector *connector; 17353 17354 for_each_connector_on_encoder(dev, &encoder->base, connector) 17355 return connector; 17356 17357 return NULL; 17358 } 17359 17360 static bool has_pch_trancoder(struct drm_i915_private *dev_priv, 17361 enum pipe pch_transcoder) 17362 { 17363 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 17364 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A); 17365 } 17366 17367 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state) 17368 { 17369 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 17370 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 17371 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 17372 17373 if (INTEL_GEN(dev_priv) >= 9 || 17374 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 17375 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder); 17376 u32 val; 17377 17378 if (transcoder_is_dsi(cpu_transcoder)) 17379 return; 17380 17381 val = I915_READ(reg); 17382 val &= ~HSW_FRAME_START_DELAY_MASK; 17383 val |= HSW_FRAME_START_DELAY(0); 17384 I915_WRITE(reg, val); 17385 } else { 17386 i915_reg_t reg = PIPECONF(cpu_transcoder); 17387 u32 val; 17388 17389 val = I915_READ(reg); 17390 val &= ~PIPECONF_FRAME_START_DELAY_MASK; 17391 val |= PIPECONF_FRAME_START_DELAY(0); 17392 I915_WRITE(reg, val); 17393 } 17394 17395 if (!crtc_state->has_pch_encoder) 17396 return; 17397 17398 if (HAS_PCH_IBX(dev_priv)) { 17399 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe); 17400 u32 val; 17401 17402 val = I915_READ(reg); 17403 val &= ~TRANS_FRAME_START_DELAY_MASK; 17404 val |= TRANS_FRAME_START_DELAY(0); 17405 I915_WRITE(reg, val); 17406 } else { 17407 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc); 17408 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder); 17409 u32 val; 17410 17411 val = I915_READ(reg); 17412 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 17413 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); 17414 I915_WRITE(reg, val); 17415 } 17416 } 17417 17418 static void intel_sanitize_crtc(struct intel_crtc *crtc, 17419 struct drm_modeset_acquire_ctx *ctx) 17420 { 17421 struct drm_device *dev = crtc->base.dev; 17422 struct drm_i915_private *dev_priv = to_i915(dev); 17423 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); 17424 17425 if (crtc_state->hw.active) { 17426 struct intel_plane *plane; 17427 17428 /* Clear any frame start delays used for debugging left by the BIOS */ 17429 intel_sanitize_frame_start_delay(crtc_state); 17430 17431 /* Disable everything but the primary plane */ 17432 for_each_intel_plane_on_crtc(dev, crtc, plane) { 17433 const struct intel_plane_state *plane_state = 17434 to_intel_plane_state(plane->base.state); 17435 17436 if (plane_state->uapi.visible && 17437 plane->base.type != DRM_PLANE_TYPE_PRIMARY) 17438 intel_plane_disable_noatomic(crtc, plane); 17439 } 17440 17441 /* 17442 * Disable any background color set by the BIOS, but enable the 17443 * gamma and CSC to match how we program our planes. 17444 */ 17445 if (INTEL_GEN(dev_priv) >= 9) 17446 I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe), 17447 SKL_BOTTOM_COLOR_GAMMA_ENABLE | 17448 SKL_BOTTOM_COLOR_CSC_ENABLE); 17449 } 17450 17451 /* Adjust the state of the output pipe according to whether we 17452 * have active connectors/encoders. */ 17453 if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc)) 17454 intel_crtc_disable_noatomic(crtc, ctx); 17455 17456 if (crtc_state->hw.active || HAS_GMCH(dev_priv)) { 17457 /* 17458 * We start out with underrun reporting disabled to avoid races. 17459 * For correct bookkeeping mark this on active crtcs. 17460 * 17461 * Also on gmch platforms we dont have any hardware bits to 17462 * disable the underrun reporting. Which means we need to start 17463 * out with underrun reporting disabled also on inactive pipes, 17464 * since otherwise we'll complain about the garbage we read when 17465 * e.g. coming up after runtime pm. 17466 * 17467 * No protection against concurrent access is required - at 17468 * worst a fifo underrun happens which also sets this to false. 17469 */ 17470 crtc->cpu_fifo_underrun_disabled = true; 17471 /* 17472 * We track the PCH trancoder underrun reporting state 17473 * within the crtc. With crtc for pipe A housing the underrun 17474 * reporting state for PCH transcoder A, crtc for pipe B housing 17475 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, 17476 * and marking underrun reporting as disabled for the non-existing 17477 * PCH transcoders B and C would prevent enabling the south 17478 * error interrupt (see cpt_can_enable_serr_int()). 17479 */ 17480 if (has_pch_trancoder(dev_priv, crtc->pipe)) 17481 crtc->pch_fifo_underrun_disabled = true; 17482 } 17483 } 17484 17485 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) 17486 { 17487 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 17488 17489 /* 17490 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram 17491 * the hardware when a high res displays plugged in. DPLL P 17492 * divider is zero, and the pipe timings are bonkers. We'll 17493 * try to disable everything in that case. 17494 * 17495 * FIXME would be nice to be able to sanitize this state 17496 * without several WARNs, but for now let's take the easy 17497 * road. 17498 */ 17499 return IS_GEN(dev_priv, 6) && 17500 crtc_state->hw.active && 17501 crtc_state->shared_dpll && 17502 crtc_state->port_clock == 0; 17503 } 17504 17505 static void intel_sanitize_encoder(struct intel_encoder *encoder) 17506 { 17507 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 17508 struct intel_connector *connector; 17509 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 17510 struct intel_crtc_state *crtc_state = crtc ? 17511 to_intel_crtc_state(crtc->base.state) : NULL; 17512 17513 /* We need to check both for a crtc link (meaning that the 17514 * encoder is active and trying to read from a pipe) and the 17515 * pipe itself being active. */ 17516 bool has_active_crtc = crtc_state && 17517 crtc_state->hw.active; 17518 17519 if (crtc_state && has_bogus_dpll_config(crtc_state)) { 17520 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n", 17521 pipe_name(crtc->pipe)); 17522 has_active_crtc = false; 17523 } 17524 17525 connector = intel_encoder_find_connector(encoder); 17526 if (connector && !has_active_crtc) { 17527 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 17528 encoder->base.base.id, 17529 encoder->base.name); 17530 17531 /* Connector is active, but has no active pipe. This is 17532 * fallout from our resume register restoring. Disable 17533 * the encoder manually again. */ 17534 if (crtc_state) { 17535 struct drm_encoder *best_encoder; 17536 17537 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 17538 encoder->base.base.id, 17539 encoder->base.name); 17540 17541 /* avoid oopsing in case the hooks consult best_encoder */ 17542 best_encoder = connector->base.state->best_encoder; 17543 connector->base.state->best_encoder = &encoder->base; 17544 17545 if (encoder->disable) 17546 encoder->disable(encoder, crtc_state, 17547 connector->base.state); 17548 if (encoder->post_disable) 17549 encoder->post_disable(encoder, crtc_state, 17550 connector->base.state); 17551 17552 connector->base.state->best_encoder = best_encoder; 17553 } 17554 encoder->base.crtc = NULL; 17555 17556 /* Inconsistent output/port/pipe state happens presumably due to 17557 * a bug in one of the get_hw_state functions. Or someplace else 17558 * in our code, like the register restore mess on resume. Clamp 17559 * things to off as a safer default. */ 17560 17561 connector->base.dpms = DRM_MODE_DPMS_OFF; 17562 connector->base.encoder = NULL; 17563 } 17564 17565 /* notify opregion of the sanitized encoder state */ 17566 intel_opregion_notify_encoder(encoder, connector && has_active_crtc); 17567 17568 if (INTEL_GEN(dev_priv) >= 11) 17569 icl_sanitize_encoder_pll_mapping(encoder); 17570 } 17571 17572 /* FIXME read out full plane state for all planes */ 17573 static void readout_plane_state(struct drm_i915_private *dev_priv) 17574 { 17575 struct intel_plane *plane; 17576 struct intel_crtc *crtc; 17577 17578 for_each_intel_plane(&dev_priv->drm, plane) { 17579 struct intel_plane_state *plane_state = 17580 to_intel_plane_state(plane->base.state); 17581 struct intel_crtc_state *crtc_state; 17582 enum pipe pipe = PIPE_A; 17583 bool visible; 17584 17585 visible = plane->get_hw_state(plane, &pipe); 17586 17587 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17588 crtc_state = to_intel_crtc_state(crtc->base.state); 17589 17590 intel_set_plane_visible(crtc_state, plane_state, visible); 17591 17592 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n", 17593 plane->base.base.id, plane->base.name, 17594 enableddisabled(visible), pipe_name(pipe)); 17595 } 17596 17597 for_each_intel_crtc(&dev_priv->drm, crtc) { 17598 struct intel_crtc_state *crtc_state = 17599 to_intel_crtc_state(crtc->base.state); 17600 17601 fixup_active_planes(crtc_state); 17602 } 17603 } 17604 17605 static void intel_modeset_readout_hw_state(struct drm_device *dev) 17606 { 17607 struct drm_i915_private *dev_priv = to_i915(dev); 17608 enum pipe pipe; 17609 struct intel_crtc *crtc; 17610 struct intel_encoder *encoder; 17611 struct intel_connector *connector; 17612 struct drm_connector_list_iter conn_iter; 17613 int i; 17614 17615 dev_priv->active_pipes = 0; 17616 17617 for_each_intel_crtc(dev, crtc) { 17618 struct intel_crtc_state *crtc_state = 17619 to_intel_crtc_state(crtc->base.state); 17620 17621 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi); 17622 intel_crtc_free_hw_state(crtc_state); 17623 intel_crtc_state_reset(crtc_state, crtc); 17624 17625 crtc_state->hw.active = crtc_state->hw.enable = 17626 dev_priv->display.get_pipe_config(crtc, crtc_state); 17627 17628 crtc->base.enabled = crtc_state->hw.enable; 17629 crtc->active = crtc_state->hw.active; 17630 17631 if (crtc_state->hw.active) 17632 dev_priv->active_pipes |= BIT(crtc->pipe); 17633 17634 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", 17635 crtc->base.base.id, crtc->base.name, 17636 enableddisabled(crtc_state->hw.active)); 17637 } 17638 17639 readout_plane_state(dev_priv); 17640 17641 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 17642 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 17643 17644 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll, 17645 &pll->state.hw_state); 17646 17647 if (IS_ELKHARTLAKE(dev_priv) && pll->on && 17648 pll->info->id == DPLL_ID_EHL_DPLL4) { 17649 pll->wakeref = intel_display_power_get(dev_priv, 17650 POWER_DOMAIN_DPLL_DC_OFF); 17651 } 17652 17653 pll->state.crtc_mask = 0; 17654 for_each_intel_crtc(dev, crtc) { 17655 struct intel_crtc_state *crtc_state = 17656 to_intel_crtc_state(crtc->base.state); 17657 17658 if (crtc_state->hw.active && 17659 crtc_state->shared_dpll == pll) 17660 pll->state.crtc_mask |= 1 << crtc->pipe; 17661 } 17662 pll->active_mask = pll->state.crtc_mask; 17663 17664 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", 17665 pll->info->name, pll->state.crtc_mask, pll->on); 17666 } 17667 17668 for_each_intel_encoder(dev, encoder) { 17669 pipe = 0; 17670 17671 if (encoder->get_hw_state(encoder, &pipe)) { 17672 struct intel_crtc_state *crtc_state; 17673 17674 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17675 crtc_state = to_intel_crtc_state(crtc->base.state); 17676 17677 encoder->base.crtc = &crtc->base; 17678 encoder->get_config(encoder, crtc_state); 17679 } else { 17680 encoder->base.crtc = NULL; 17681 } 17682 17683 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 17684 encoder->base.base.id, encoder->base.name, 17685 enableddisabled(encoder->base.crtc), 17686 pipe_name(pipe)); 17687 } 17688 17689 drm_connector_list_iter_begin(dev, &conn_iter); 17690 for_each_intel_connector_iter(connector, &conn_iter) { 17691 if (connector->get_hw_state(connector)) { 17692 struct intel_crtc_state *crtc_state; 17693 struct intel_crtc *crtc; 17694 17695 connector->base.dpms = DRM_MODE_DPMS_ON; 17696 17697 encoder = connector->encoder; 17698 connector->base.encoder = &encoder->base; 17699 17700 crtc = to_intel_crtc(encoder->base.crtc); 17701 crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL; 17702 17703 if (crtc_state && crtc_state->hw.active) { 17704 /* 17705 * This has to be done during hardware readout 17706 * because anything calling .crtc_disable may 17707 * rely on the connector_mask being accurate. 17708 */ 17709 crtc_state->uapi.connector_mask |= 17710 drm_connector_mask(&connector->base); 17711 crtc_state->uapi.encoder_mask |= 17712 drm_encoder_mask(&encoder->base); 17713 } 17714 } else { 17715 connector->base.dpms = DRM_MODE_DPMS_OFF; 17716 connector->base.encoder = NULL; 17717 } 17718 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 17719 connector->base.base.id, connector->base.name, 17720 enableddisabled(connector->base.encoder)); 17721 } 17722 drm_connector_list_iter_end(&conn_iter); 17723 17724 for_each_intel_crtc(dev, crtc) { 17725 struct intel_bw_state *bw_state = 17726 to_intel_bw_state(dev_priv->bw_obj.state); 17727 struct intel_crtc_state *crtc_state = 17728 to_intel_crtc_state(crtc->base.state); 17729 struct intel_plane *plane; 17730 int min_cdclk = 0; 17731 17732 if (crtc_state->hw.active) { 17733 struct drm_display_mode *mode = &crtc_state->hw.mode; 17734 17735 intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode, 17736 crtc_state); 17737 17738 *mode = crtc_state->hw.adjusted_mode; 17739 mode->hdisplay = crtc_state->pipe_src_w; 17740 mode->vdisplay = crtc_state->pipe_src_h; 17741 17742 /* 17743 * The initial mode needs to be set in order to keep 17744 * the atomic core happy. It wants a valid mode if the 17745 * crtc's enabled, so we do the above call. 17746 * 17747 * But we don't set all the derived state fully, hence 17748 * set a flag to indicate that a full recalculation is 17749 * needed on the next commit. 17750 */ 17751 mode->private_flags = I915_MODE_FLAG_INHERITED; 17752 17753 intel_crtc_compute_pixel_rate(crtc_state); 17754 17755 intel_crtc_update_active_timings(crtc_state); 17756 17757 intel_crtc_copy_hw_to_uapi_state(crtc_state); 17758 } 17759 17760 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 17761 const struct intel_plane_state *plane_state = 17762 to_intel_plane_state(plane->base.state); 17763 17764 /* 17765 * FIXME don't have the fb yet, so can't 17766 * use intel_plane_data_rate() :( 17767 */ 17768 if (plane_state->uapi.visible) 17769 crtc_state->data_rate[plane->id] = 17770 4 * crtc_state->pixel_rate; 17771 /* 17772 * FIXME don't have the fb yet, so can't 17773 * use plane->min_cdclk() :( 17774 */ 17775 if (plane_state->uapi.visible && plane->min_cdclk) { 17776 if (crtc_state->double_wide || 17777 INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 17778 crtc_state->min_cdclk[plane->id] = 17779 DIV_ROUND_UP(crtc_state->pixel_rate, 2); 17780 else 17781 crtc_state->min_cdclk[plane->id] = 17782 crtc_state->pixel_rate; 17783 } 17784 DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n", 17785 plane->base.base.id, plane->base.name, 17786 crtc_state->min_cdclk[plane->id]); 17787 } 17788 17789 if (crtc_state->hw.active) { 17790 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); 17791 if (WARN_ON(min_cdclk < 0)) 17792 min_cdclk = 0; 17793 } 17794 17795 dev_priv->min_cdclk[crtc->pipe] = min_cdclk; 17796 dev_priv->min_voltage_level[crtc->pipe] = 17797 crtc_state->min_voltage_level; 17798 17799 intel_bw_crtc_update(bw_state, crtc_state); 17800 17801 intel_pipe_config_sanity_check(dev_priv, crtc_state); 17802 } 17803 } 17804 17805 static void 17806 get_encoder_power_domains(struct drm_i915_private *dev_priv) 17807 { 17808 struct intel_encoder *encoder; 17809 17810 for_each_intel_encoder(&dev_priv->drm, encoder) { 17811 struct intel_crtc_state *crtc_state; 17812 17813 if (!encoder->get_power_domains) 17814 continue; 17815 17816 /* 17817 * MST-primary and inactive encoders don't have a crtc state 17818 * and neither of these require any power domain references. 17819 */ 17820 if (!encoder->base.crtc) 17821 continue; 17822 17823 crtc_state = to_intel_crtc_state(encoder->base.crtc->state); 17824 encoder->get_power_domains(encoder, crtc_state); 17825 } 17826 } 17827 17828 static void intel_early_display_was(struct drm_i915_private *dev_priv) 17829 { 17830 /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */ 17831 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) 17832 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | 17833 DARBF_GATING_DIS); 17834 17835 if (IS_HASWELL(dev_priv)) { 17836 /* 17837 * WaRsPkgCStateDisplayPMReq:hsw 17838 * System hang if this isn't done before disabling all planes! 17839 */ 17840 I915_WRITE(CHICKEN_PAR1_1, 17841 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); 17842 } 17843 } 17844 17845 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv, 17846 enum port port, i915_reg_t hdmi_reg) 17847 { 17848 u32 val = I915_READ(hdmi_reg); 17849 17850 if (val & SDVO_ENABLE || 17851 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A)) 17852 return; 17853 17854 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n", 17855 port_name(port)); 17856 17857 val &= ~SDVO_PIPE_SEL_MASK; 17858 val |= SDVO_PIPE_SEL(PIPE_A); 17859 17860 I915_WRITE(hdmi_reg, val); 17861 } 17862 17863 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv, 17864 enum port port, i915_reg_t dp_reg) 17865 { 17866 u32 val = I915_READ(dp_reg); 17867 17868 if (val & DP_PORT_EN || 17869 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A)) 17870 return; 17871 17872 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n", 17873 port_name(port)); 17874 17875 val &= ~DP_PIPE_SEL_MASK; 17876 val |= DP_PIPE_SEL(PIPE_A); 17877 17878 I915_WRITE(dp_reg, val); 17879 } 17880 17881 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv) 17882 { 17883 /* 17884 * The BIOS may select transcoder B on some of the PCH 17885 * ports even it doesn't enable the port. This would trip 17886 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled(). 17887 * Sanitize the transcoder select bits to prevent that. We 17888 * assume that the BIOS never actually enabled the port, 17889 * because if it did we'd actually have to toggle the port 17890 * on and back off to make the transcoder A select stick 17891 * (see. intel_dp_link_down(), intel_disable_hdmi(), 17892 * intel_disable_sdvo()). 17893 */ 17894 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B); 17895 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C); 17896 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D); 17897 17898 /* PCH SDVOB multiplex with HDMIB */ 17899 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB); 17900 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC); 17901 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID); 17902 } 17903 17904 /* Scan out the current hw modeset state, 17905 * and sanitizes it to the current state 17906 */ 17907 static void 17908 intel_modeset_setup_hw_state(struct drm_device *dev, 17909 struct drm_modeset_acquire_ctx *ctx) 17910 { 17911 struct drm_i915_private *dev_priv = to_i915(dev); 17912 struct intel_encoder *encoder; 17913 struct intel_crtc *crtc; 17914 intel_wakeref_t wakeref; 17915 int i; 17916 17917 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 17918 17919 intel_early_display_was(dev_priv); 17920 intel_modeset_readout_hw_state(dev); 17921 17922 /* HW state is read out, now we need to sanitize this mess. */ 17923 17924 /* Sanitize the TypeC port mode upfront, encoders depend on this */ 17925 for_each_intel_encoder(dev, encoder) { 17926 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 17927 17928 /* We need to sanitize only the MST primary port. */ 17929 if (encoder->type != INTEL_OUTPUT_DP_MST && 17930 intel_phy_is_tc(dev_priv, phy)) 17931 intel_tc_port_sanitize(enc_to_dig_port(&encoder->base)); 17932 } 17933 17934 get_encoder_power_domains(dev_priv); 17935 17936 if (HAS_PCH_IBX(dev_priv)) 17937 ibx_sanitize_pch_ports(dev_priv); 17938 17939 /* 17940 * intel_sanitize_plane_mapping() may need to do vblank 17941 * waits, so we need vblank interrupts restored beforehand. 17942 */ 17943 for_each_intel_crtc(&dev_priv->drm, crtc) { 17944 struct intel_crtc_state *crtc_state = 17945 to_intel_crtc_state(crtc->base.state); 17946 17947 drm_crtc_vblank_reset(&crtc->base); 17948 17949 if (crtc_state->hw.active) 17950 intel_crtc_vblank_on(crtc_state); 17951 } 17952 17953 intel_sanitize_plane_mapping(dev_priv); 17954 17955 for_each_intel_encoder(dev, encoder) 17956 intel_sanitize_encoder(encoder); 17957 17958 for_each_intel_crtc(&dev_priv->drm, crtc) { 17959 struct intel_crtc_state *crtc_state = 17960 to_intel_crtc_state(crtc->base.state); 17961 17962 intel_sanitize_crtc(crtc, ctx); 17963 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]"); 17964 } 17965 17966 intel_modeset_update_connector_atomic_state(dev); 17967 17968 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 17969 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 17970 17971 if (!pll->on || pll->active_mask) 17972 continue; 17973 17974 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", 17975 pll->info->name); 17976 17977 pll->info->funcs->disable(dev_priv, pll); 17978 pll->on = false; 17979 } 17980 17981 if (IS_G4X(dev_priv)) { 17982 g4x_wm_get_hw_state(dev_priv); 17983 g4x_wm_sanitize(dev_priv); 17984 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 17985 vlv_wm_get_hw_state(dev_priv); 17986 vlv_wm_sanitize(dev_priv); 17987 } else if (INTEL_GEN(dev_priv) >= 9) { 17988 skl_wm_get_hw_state(dev_priv); 17989 } else if (HAS_PCH_SPLIT(dev_priv)) { 17990 ilk_wm_get_hw_state(dev_priv); 17991 } 17992 17993 for_each_intel_crtc(dev, crtc) { 17994 struct intel_crtc_state *crtc_state = 17995 to_intel_crtc_state(crtc->base.state); 17996 u64 put_domains; 17997 17998 put_domains = modeset_get_crtc_power_domains(crtc_state); 17999 if (WARN_ON(put_domains)) 18000 modeset_put_power_domains(dev_priv, put_domains); 18001 } 18002 18003 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); 18004 } 18005 18006 void intel_display_resume(struct drm_device *dev) 18007 { 18008 struct drm_i915_private *dev_priv = to_i915(dev); 18009 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 18010 struct drm_modeset_acquire_ctx ctx; 18011 int ret; 18012 18013 dev_priv->modeset_restore_state = NULL; 18014 if (state) 18015 state->acquire_ctx = &ctx; 18016 18017 drm_modeset_acquire_init(&ctx, 0); 18018 18019 while (1) { 18020 ret = drm_modeset_lock_all_ctx(dev, &ctx); 18021 if (ret != -EDEADLK) 18022 break; 18023 18024 drm_modeset_backoff(&ctx); 18025 } 18026 18027 if (!ret) 18028 ret = __intel_display_resume(dev, state, &ctx); 18029 18030 intel_enable_ipc(dev_priv); 18031 drm_modeset_drop_locks(&ctx); 18032 drm_modeset_acquire_fini(&ctx); 18033 18034 if (ret) 18035 DRM_ERROR("Restoring old state failed with %i\n", ret); 18036 if (state) 18037 drm_atomic_state_put(state); 18038 } 18039 18040 static void intel_hpd_poll_fini(struct drm_i915_private *i915) 18041 { 18042 struct intel_connector *connector; 18043 struct drm_connector_list_iter conn_iter; 18044 18045 /* Kill all the work that may have been queued by hpd. */ 18046 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 18047 for_each_intel_connector_iter(connector, &conn_iter) { 18048 if (connector->modeset_retry_work.func) 18049 cancel_work_sync(&connector->modeset_retry_work); 18050 if (connector->hdcp.shim) { 18051 cancel_delayed_work_sync(&connector->hdcp.check_work); 18052 cancel_work_sync(&connector->hdcp.prop_work); 18053 } 18054 } 18055 drm_connector_list_iter_end(&conn_iter); 18056 } 18057 18058 void intel_modeset_driver_remove(struct drm_i915_private *i915) 18059 { 18060 flush_workqueue(i915->flip_wq); 18061 flush_workqueue(i915->modeset_wq); 18062 18063 flush_work(&i915->atomic_helper.free_work); 18064 WARN_ON(!llist_empty(&i915->atomic_helper.free_list)); 18065 18066 /* 18067 * Interrupts and polling as the first thing to avoid creating havoc. 18068 * Too much stuff here (turning of connectors, ...) would 18069 * experience fancy races otherwise. 18070 */ 18071 intel_irq_uninstall(i915); 18072 18073 /* 18074 * Due to the hpd irq storm handling the hotplug work can re-arm the 18075 * poll handlers. Hence disable polling after hpd handling is shut down. 18076 */ 18077 intel_hpd_poll_fini(i915); 18078 18079 /* 18080 * MST topology needs to be suspended so we don't have any calls to 18081 * fbdev after it's finalized. MST will be destroyed later as part of 18082 * drm_mode_config_cleanup() 18083 */ 18084 intel_dp_mst_suspend(i915); 18085 18086 /* poll work can call into fbdev, hence clean that up afterwards */ 18087 intel_fbdev_fini(i915); 18088 18089 intel_unregister_dsm_handler(); 18090 18091 intel_fbc_global_disable(i915); 18092 18093 /* flush any delayed tasks or pending work */ 18094 flush_scheduled_work(); 18095 18096 intel_hdcp_component_fini(i915); 18097 18098 drm_mode_config_cleanup(&i915->drm); 18099 18100 intel_overlay_cleanup(i915); 18101 18102 intel_gmbus_teardown(i915); 18103 18104 destroy_workqueue(i915->flip_wq); 18105 destroy_workqueue(i915->modeset_wq); 18106 18107 intel_fbc_cleanup_cfb(i915); 18108 } 18109 18110 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 18111 18112 struct intel_display_error_state { 18113 18114 u32 power_well_driver; 18115 18116 struct intel_cursor_error_state { 18117 u32 control; 18118 u32 position; 18119 u32 base; 18120 u32 size; 18121 } cursor[I915_MAX_PIPES]; 18122 18123 struct intel_pipe_error_state { 18124 bool power_domain_on; 18125 u32 source; 18126 u32 stat; 18127 } pipe[I915_MAX_PIPES]; 18128 18129 struct intel_plane_error_state { 18130 u32 control; 18131 u32 stride; 18132 u32 size; 18133 u32 pos; 18134 u32 addr; 18135 u32 surface; 18136 u32 tile_offset; 18137 } plane[I915_MAX_PIPES]; 18138 18139 struct intel_transcoder_error_state { 18140 bool available; 18141 bool power_domain_on; 18142 enum transcoder cpu_transcoder; 18143 18144 u32 conf; 18145 18146 u32 htotal; 18147 u32 hblank; 18148 u32 hsync; 18149 u32 vtotal; 18150 u32 vblank; 18151 u32 vsync; 18152 } transcoder[5]; 18153 }; 18154 18155 struct intel_display_error_state * 18156 intel_display_capture_error_state(struct drm_i915_private *dev_priv) 18157 { 18158 struct intel_display_error_state *error; 18159 int transcoders[] = { 18160 TRANSCODER_A, 18161 TRANSCODER_B, 18162 TRANSCODER_C, 18163 TRANSCODER_D, 18164 TRANSCODER_EDP, 18165 }; 18166 int i; 18167 18168 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder)); 18169 18170 if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) 18171 return NULL; 18172 18173 error = kzalloc(sizeof(*error), GFP_ATOMIC); 18174 if (error == NULL) 18175 return NULL; 18176 18177 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 18178 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2); 18179 18180 for_each_pipe(dev_priv, i) { 18181 error->pipe[i].power_domain_on = 18182 __intel_display_power_is_enabled(dev_priv, 18183 POWER_DOMAIN_PIPE(i)); 18184 if (!error->pipe[i].power_domain_on) 18185 continue; 18186 18187 error->cursor[i].control = I915_READ(CURCNTR(i)); 18188 error->cursor[i].position = I915_READ(CURPOS(i)); 18189 error->cursor[i].base = I915_READ(CURBASE(i)); 18190 18191 error->plane[i].control = I915_READ(DSPCNTR(i)); 18192 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 18193 if (INTEL_GEN(dev_priv) <= 3) { 18194 error->plane[i].size = I915_READ(DSPSIZE(i)); 18195 error->plane[i].pos = I915_READ(DSPPOS(i)); 18196 } 18197 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 18198 error->plane[i].addr = I915_READ(DSPADDR(i)); 18199 if (INTEL_GEN(dev_priv) >= 4) { 18200 error->plane[i].surface = I915_READ(DSPSURF(i)); 18201 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 18202 } 18203 18204 error->pipe[i].source = I915_READ(PIPESRC(i)); 18205 18206 if (HAS_GMCH(dev_priv)) 18207 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 18208 } 18209 18210 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { 18211 enum transcoder cpu_transcoder = transcoders[i]; 18212 18213 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder]) 18214 continue; 18215 18216 error->transcoder[i].available = true; 18217 error->transcoder[i].power_domain_on = 18218 __intel_display_power_is_enabled(dev_priv, 18219 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 18220 if (!error->transcoder[i].power_domain_on) 18221 continue; 18222 18223 error->transcoder[i].cpu_transcoder = cpu_transcoder; 18224 18225 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 18226 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 18227 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 18228 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 18229 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 18230 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 18231 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 18232 } 18233 18234 return error; 18235 } 18236 18237 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 18238 18239 void 18240 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 18241 struct intel_display_error_state *error) 18242 { 18243 struct drm_i915_private *dev_priv = m->i915; 18244 int i; 18245 18246 if (!error) 18247 return; 18248 18249 err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv)); 18250 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 18251 err_printf(m, "PWR_WELL_CTL2: %08x\n", 18252 error->power_well_driver); 18253 for_each_pipe(dev_priv, i) { 18254 err_printf(m, "Pipe [%d]:\n", i); 18255 err_printf(m, " Power: %s\n", 18256 onoff(error->pipe[i].power_domain_on)); 18257 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 18258 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 18259 18260 err_printf(m, "Plane [%d]:\n", i); 18261 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 18262 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 18263 if (INTEL_GEN(dev_priv) <= 3) { 18264 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 18265 err_printf(m, " POS: %08x\n", error->plane[i].pos); 18266 } 18267 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 18268 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 18269 if (INTEL_GEN(dev_priv) >= 4) { 18270 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 18271 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 18272 } 18273 18274 err_printf(m, "Cursor [%d]:\n", i); 18275 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 18276 err_printf(m, " POS: %08x\n", error->cursor[i].position); 18277 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 18278 } 18279 18280 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { 18281 if (!error->transcoder[i].available) 18282 continue; 18283 18284 err_printf(m, "CPU transcoder: %s\n", 18285 transcoder_name(error->transcoder[i].cpu_transcoder)); 18286 err_printf(m, " Power: %s\n", 18287 onoff(error->transcoder[i].power_domain_on)); 18288 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 18289 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 18290 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 18291 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 18292 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 18293 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 18294 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 18295 } 18296 } 18297 18298 #endif 18299