1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/i2c.h> 28 #include <linux/input.h> 29 #include <linux/intel-iommu.h> 30 #include <linux/kernel.h> 31 #include <linux/module.h> 32 #include <linux/dma-resv.h> 33 #include <linux/slab.h> 34 35 #include <drm/drm_atomic.h> 36 #include <drm/drm_atomic_helper.h> 37 #include <drm/drm_atomic_uapi.h> 38 #include <drm/drm_dp_helper.h> 39 #include <drm/drm_edid.h> 40 #include <drm/drm_fourcc.h> 41 #include <drm/drm_plane_helper.h> 42 #include <drm/drm_probe_helper.h> 43 #include <drm/drm_rect.h> 44 #include <drm/i915_drm.h> 45 46 #include "display/intel_crt.h" 47 #include "display/intel_ddi.h" 48 #include "display/intel_dp.h" 49 #include "display/intel_dp_mst.h" 50 #include "display/intel_dsi.h" 51 #include "display/intel_dvo.h" 52 #include "display/intel_gmbus.h" 53 #include "display/intel_hdmi.h" 54 #include "display/intel_lvds.h" 55 #include "display/intel_sdvo.h" 56 #include "display/intel_tv.h" 57 #include "display/intel_vdsc.h" 58 59 #include "gt/intel_rps.h" 60 61 #include "i915_drv.h" 62 #include "i915_trace.h" 63 #include "intel_acpi.h" 64 #include "intel_atomic.h" 65 #include "intel_atomic_plane.h" 66 #include "intel_bw.h" 67 #include "intel_cdclk.h" 68 #include "intel_color.h" 69 #include "intel_display_types.h" 70 #include "intel_dp_link_training.h" 71 #include "intel_fbc.h" 72 #include "intel_fbdev.h" 73 #include "intel_fifo_underrun.h" 74 #include "intel_frontbuffer.h" 75 #include "intel_hdcp.h" 76 #include "intel_hotplug.h" 77 #include "intel_overlay.h" 78 #include "intel_pipe_crc.h" 79 #include "intel_pm.h" 80 #include "intel_psr.h" 81 #include "intel_quirks.h" 82 #include "intel_sideband.h" 83 #include "intel_sprite.h" 84 #include "intel_tc.h" 85 #include "intel_vga.h" 86 87 /* Primary plane formats for gen <= 3 */ 88 static const u32 i8xx_primary_formats[] = { 89 DRM_FORMAT_C8, 90 DRM_FORMAT_XRGB1555, 91 DRM_FORMAT_RGB565, 92 DRM_FORMAT_XRGB8888, 93 }; 94 95 /* Primary plane formats for ivb (no fp16 due to hw issue) */ 96 static const u32 ivb_primary_formats[] = { 97 DRM_FORMAT_C8, 98 DRM_FORMAT_RGB565, 99 DRM_FORMAT_XRGB8888, 100 DRM_FORMAT_XBGR8888, 101 DRM_FORMAT_XRGB2101010, 102 DRM_FORMAT_XBGR2101010, 103 }; 104 105 /* Primary plane formats for gen >= 4, except ivb */ 106 static const u32 i965_primary_formats[] = { 107 DRM_FORMAT_C8, 108 DRM_FORMAT_RGB565, 109 DRM_FORMAT_XRGB8888, 110 DRM_FORMAT_XBGR8888, 111 DRM_FORMAT_XRGB2101010, 112 DRM_FORMAT_XBGR2101010, 113 DRM_FORMAT_XBGR16161616F, 114 }; 115 116 /* Primary plane formats for vlv/chv */ 117 static const u32 vlv_primary_formats[] = { 118 DRM_FORMAT_C8, 119 DRM_FORMAT_RGB565, 120 DRM_FORMAT_XRGB8888, 121 DRM_FORMAT_XBGR8888, 122 DRM_FORMAT_ARGB8888, 123 DRM_FORMAT_ABGR8888, 124 DRM_FORMAT_XRGB2101010, 125 DRM_FORMAT_XBGR2101010, 126 DRM_FORMAT_ARGB2101010, 127 DRM_FORMAT_ABGR2101010, 128 DRM_FORMAT_XBGR16161616F, 129 }; 130 131 static const u64 i9xx_format_modifiers[] = { 132 I915_FORMAT_MOD_X_TILED, 133 DRM_FORMAT_MOD_LINEAR, 134 DRM_FORMAT_MOD_INVALID 135 }; 136 137 /* Cursor formats */ 138 static const u32 intel_cursor_formats[] = { 139 DRM_FORMAT_ARGB8888, 140 }; 141 142 static const u64 cursor_format_modifiers[] = { 143 DRM_FORMAT_MOD_LINEAR, 144 DRM_FORMAT_MOD_INVALID 145 }; 146 147 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 148 struct intel_crtc_state *pipe_config); 149 static void ilk_pch_clock_get(struct intel_crtc *crtc, 150 struct intel_crtc_state *pipe_config); 151 152 static int intel_framebuffer_init(struct intel_framebuffer *ifb, 153 struct drm_i915_gem_object *obj, 154 struct drm_mode_fb_cmd2 *mode_cmd); 155 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state); 156 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 157 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 158 const struct intel_link_m_n *m_n, 159 const struct intel_link_m_n *m2_n2); 160 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); 161 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state); 162 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state); 163 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); 164 static void vlv_prepare_pll(struct intel_crtc *crtc, 165 const struct intel_crtc_state *pipe_config); 166 static void chv_prepare_pll(struct intel_crtc *crtc, 167 const struct intel_crtc_state *pipe_config); 168 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state); 169 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); 170 static void intel_modeset_setup_hw_state(struct drm_device *dev, 171 struct drm_modeset_acquire_ctx *ctx); 172 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc); 173 174 struct intel_limit { 175 struct { 176 int min, max; 177 } dot, vco, n, m, m1, m2, p, p1; 178 179 struct { 180 int dot_limit; 181 int p2_slow, p2_fast; 182 } p2; 183 }; 184 185 /* returns HPLL frequency in kHz */ 186 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 187 { 188 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 189 190 /* Obtain SKU information */ 191 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 192 CCK_FUSE_HPLL_FREQ_MASK; 193 194 return vco_freq[hpll_freq] * 1000; 195 } 196 197 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 198 const char *name, u32 reg, int ref_freq) 199 { 200 u32 val; 201 int divider; 202 203 val = vlv_cck_read(dev_priv, reg); 204 divider = val & CCK_FREQUENCY_VALUES; 205 206 WARN((val & CCK_FREQUENCY_STATUS) != 207 (divider << CCK_FREQUENCY_STATUS_SHIFT), 208 "%s change in progress\n", name); 209 210 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 211 } 212 213 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 214 const char *name, u32 reg) 215 { 216 int hpll; 217 218 vlv_cck_get(dev_priv); 219 220 if (dev_priv->hpll_freq == 0) 221 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 222 223 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); 224 225 vlv_cck_put(dev_priv); 226 227 return hpll; 228 } 229 230 static void intel_update_czclk(struct drm_i915_private *dev_priv) 231 { 232 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 233 return; 234 235 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 236 CCK_CZ_CLOCK_CONTROL); 237 238 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq); 239 } 240 241 static inline u32 /* units of 100MHz */ 242 intel_fdi_link_freq(struct drm_i915_private *dev_priv, 243 const struct intel_crtc_state *pipe_config) 244 { 245 if (HAS_DDI(dev_priv)) 246 return pipe_config->port_clock; /* SPLL */ 247 else 248 return dev_priv->fdi_pll_freq; 249 } 250 251 static const struct intel_limit intel_limits_i8xx_dac = { 252 .dot = { .min = 25000, .max = 350000 }, 253 .vco = { .min = 908000, .max = 1512000 }, 254 .n = { .min = 2, .max = 16 }, 255 .m = { .min = 96, .max = 140 }, 256 .m1 = { .min = 18, .max = 26 }, 257 .m2 = { .min = 6, .max = 16 }, 258 .p = { .min = 4, .max = 128 }, 259 .p1 = { .min = 2, .max = 33 }, 260 .p2 = { .dot_limit = 165000, 261 .p2_slow = 4, .p2_fast = 2 }, 262 }; 263 264 static const struct intel_limit intel_limits_i8xx_dvo = { 265 .dot = { .min = 25000, .max = 350000 }, 266 .vco = { .min = 908000, .max = 1512000 }, 267 .n = { .min = 2, .max = 16 }, 268 .m = { .min = 96, .max = 140 }, 269 .m1 = { .min = 18, .max = 26 }, 270 .m2 = { .min = 6, .max = 16 }, 271 .p = { .min = 4, .max = 128 }, 272 .p1 = { .min = 2, .max = 33 }, 273 .p2 = { .dot_limit = 165000, 274 .p2_slow = 4, .p2_fast = 4 }, 275 }; 276 277 static const struct intel_limit intel_limits_i8xx_lvds = { 278 .dot = { .min = 25000, .max = 350000 }, 279 .vco = { .min = 908000, .max = 1512000 }, 280 .n = { .min = 2, .max = 16 }, 281 .m = { .min = 96, .max = 140 }, 282 .m1 = { .min = 18, .max = 26 }, 283 .m2 = { .min = 6, .max = 16 }, 284 .p = { .min = 4, .max = 128 }, 285 .p1 = { .min = 1, .max = 6 }, 286 .p2 = { .dot_limit = 165000, 287 .p2_slow = 14, .p2_fast = 7 }, 288 }; 289 290 static const struct intel_limit intel_limits_i9xx_sdvo = { 291 .dot = { .min = 20000, .max = 400000 }, 292 .vco = { .min = 1400000, .max = 2800000 }, 293 .n = { .min = 1, .max = 6 }, 294 .m = { .min = 70, .max = 120 }, 295 .m1 = { .min = 8, .max = 18 }, 296 .m2 = { .min = 3, .max = 7 }, 297 .p = { .min = 5, .max = 80 }, 298 .p1 = { .min = 1, .max = 8 }, 299 .p2 = { .dot_limit = 200000, 300 .p2_slow = 10, .p2_fast = 5 }, 301 }; 302 303 static const struct intel_limit intel_limits_i9xx_lvds = { 304 .dot = { .min = 20000, .max = 400000 }, 305 .vco = { .min = 1400000, .max = 2800000 }, 306 .n = { .min = 1, .max = 6 }, 307 .m = { .min = 70, .max = 120 }, 308 .m1 = { .min = 8, .max = 18 }, 309 .m2 = { .min = 3, .max = 7 }, 310 .p = { .min = 7, .max = 98 }, 311 .p1 = { .min = 1, .max = 8 }, 312 .p2 = { .dot_limit = 112000, 313 .p2_slow = 14, .p2_fast = 7 }, 314 }; 315 316 317 static const struct intel_limit intel_limits_g4x_sdvo = { 318 .dot = { .min = 25000, .max = 270000 }, 319 .vco = { .min = 1750000, .max = 3500000}, 320 .n = { .min = 1, .max = 4 }, 321 .m = { .min = 104, .max = 138 }, 322 .m1 = { .min = 17, .max = 23 }, 323 .m2 = { .min = 5, .max = 11 }, 324 .p = { .min = 10, .max = 30 }, 325 .p1 = { .min = 1, .max = 3}, 326 .p2 = { .dot_limit = 270000, 327 .p2_slow = 10, 328 .p2_fast = 10 329 }, 330 }; 331 332 static const struct intel_limit intel_limits_g4x_hdmi = { 333 .dot = { .min = 22000, .max = 400000 }, 334 .vco = { .min = 1750000, .max = 3500000}, 335 .n = { .min = 1, .max = 4 }, 336 .m = { .min = 104, .max = 138 }, 337 .m1 = { .min = 16, .max = 23 }, 338 .m2 = { .min = 5, .max = 11 }, 339 .p = { .min = 5, .max = 80 }, 340 .p1 = { .min = 1, .max = 8}, 341 .p2 = { .dot_limit = 165000, 342 .p2_slow = 10, .p2_fast = 5 }, 343 }; 344 345 static const struct intel_limit intel_limits_g4x_single_channel_lvds = { 346 .dot = { .min = 20000, .max = 115000 }, 347 .vco = { .min = 1750000, .max = 3500000 }, 348 .n = { .min = 1, .max = 3 }, 349 .m = { .min = 104, .max = 138 }, 350 .m1 = { .min = 17, .max = 23 }, 351 .m2 = { .min = 5, .max = 11 }, 352 .p = { .min = 28, .max = 112 }, 353 .p1 = { .min = 2, .max = 8 }, 354 .p2 = { .dot_limit = 0, 355 .p2_slow = 14, .p2_fast = 14 356 }, 357 }; 358 359 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { 360 .dot = { .min = 80000, .max = 224000 }, 361 .vco = { .min = 1750000, .max = 3500000 }, 362 .n = { .min = 1, .max = 3 }, 363 .m = { .min = 104, .max = 138 }, 364 .m1 = { .min = 17, .max = 23 }, 365 .m2 = { .min = 5, .max = 11 }, 366 .p = { .min = 14, .max = 42 }, 367 .p1 = { .min = 2, .max = 6 }, 368 .p2 = { .dot_limit = 0, 369 .p2_slow = 7, .p2_fast = 7 370 }, 371 }; 372 373 static const struct intel_limit pnv_limits_sdvo = { 374 .dot = { .min = 20000, .max = 400000}, 375 .vco = { .min = 1700000, .max = 3500000 }, 376 /* Pineview's Ncounter is a ring counter */ 377 .n = { .min = 3, .max = 6 }, 378 .m = { .min = 2, .max = 256 }, 379 /* Pineview only has one combined m divider, which we treat as m2. */ 380 .m1 = { .min = 0, .max = 0 }, 381 .m2 = { .min = 0, .max = 254 }, 382 .p = { .min = 5, .max = 80 }, 383 .p1 = { .min = 1, .max = 8 }, 384 .p2 = { .dot_limit = 200000, 385 .p2_slow = 10, .p2_fast = 5 }, 386 }; 387 388 static const struct intel_limit pnv_limits_lvds = { 389 .dot = { .min = 20000, .max = 400000 }, 390 .vco = { .min = 1700000, .max = 3500000 }, 391 .n = { .min = 3, .max = 6 }, 392 .m = { .min = 2, .max = 256 }, 393 .m1 = { .min = 0, .max = 0 }, 394 .m2 = { .min = 0, .max = 254 }, 395 .p = { .min = 7, .max = 112 }, 396 .p1 = { .min = 1, .max = 8 }, 397 .p2 = { .dot_limit = 112000, 398 .p2_slow = 14, .p2_fast = 14 }, 399 }; 400 401 /* Ironlake / Sandybridge 402 * 403 * We calculate clock using (register_value + 2) for N/M1/M2, so here 404 * the range value for them is (actual_value - 2). 405 */ 406 static const struct intel_limit ilk_limits_dac = { 407 .dot = { .min = 25000, .max = 350000 }, 408 .vco = { .min = 1760000, .max = 3510000 }, 409 .n = { .min = 1, .max = 5 }, 410 .m = { .min = 79, .max = 127 }, 411 .m1 = { .min = 12, .max = 22 }, 412 .m2 = { .min = 5, .max = 9 }, 413 .p = { .min = 5, .max = 80 }, 414 .p1 = { .min = 1, .max = 8 }, 415 .p2 = { .dot_limit = 225000, 416 .p2_slow = 10, .p2_fast = 5 }, 417 }; 418 419 static const struct intel_limit ilk_limits_single_lvds = { 420 .dot = { .min = 25000, .max = 350000 }, 421 .vco = { .min = 1760000, .max = 3510000 }, 422 .n = { .min = 1, .max = 3 }, 423 .m = { .min = 79, .max = 118 }, 424 .m1 = { .min = 12, .max = 22 }, 425 .m2 = { .min = 5, .max = 9 }, 426 .p = { .min = 28, .max = 112 }, 427 .p1 = { .min = 2, .max = 8 }, 428 .p2 = { .dot_limit = 225000, 429 .p2_slow = 14, .p2_fast = 14 }, 430 }; 431 432 static const struct intel_limit ilk_limits_dual_lvds = { 433 .dot = { .min = 25000, .max = 350000 }, 434 .vco = { .min = 1760000, .max = 3510000 }, 435 .n = { .min = 1, .max = 3 }, 436 .m = { .min = 79, .max = 127 }, 437 .m1 = { .min = 12, .max = 22 }, 438 .m2 = { .min = 5, .max = 9 }, 439 .p = { .min = 14, .max = 56 }, 440 .p1 = { .min = 2, .max = 8 }, 441 .p2 = { .dot_limit = 225000, 442 .p2_slow = 7, .p2_fast = 7 }, 443 }; 444 445 /* LVDS 100mhz refclk limits. */ 446 static const struct intel_limit ilk_limits_single_lvds_100m = { 447 .dot = { .min = 25000, .max = 350000 }, 448 .vco = { .min = 1760000, .max = 3510000 }, 449 .n = { .min = 1, .max = 2 }, 450 .m = { .min = 79, .max = 126 }, 451 .m1 = { .min = 12, .max = 22 }, 452 .m2 = { .min = 5, .max = 9 }, 453 .p = { .min = 28, .max = 112 }, 454 .p1 = { .min = 2, .max = 8 }, 455 .p2 = { .dot_limit = 225000, 456 .p2_slow = 14, .p2_fast = 14 }, 457 }; 458 459 static const struct intel_limit ilk_limits_dual_lvds_100m = { 460 .dot = { .min = 25000, .max = 350000 }, 461 .vco = { .min = 1760000, .max = 3510000 }, 462 .n = { .min = 1, .max = 3 }, 463 .m = { .min = 79, .max = 126 }, 464 .m1 = { .min = 12, .max = 22 }, 465 .m2 = { .min = 5, .max = 9 }, 466 .p = { .min = 14, .max = 42 }, 467 .p1 = { .min = 2, .max = 6 }, 468 .p2 = { .dot_limit = 225000, 469 .p2_slow = 7, .p2_fast = 7 }, 470 }; 471 472 static const struct intel_limit intel_limits_vlv = { 473 /* 474 * These are the data rate limits (measured in fast clocks) 475 * since those are the strictest limits we have. The fast 476 * clock and actual rate limits are more relaxed, so checking 477 * them would make no difference. 478 */ 479 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 480 .vco = { .min = 4000000, .max = 6000000 }, 481 .n = { .min = 1, .max = 7 }, 482 .m1 = { .min = 2, .max = 3 }, 483 .m2 = { .min = 11, .max = 156 }, 484 .p1 = { .min = 2, .max = 3 }, 485 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 486 }; 487 488 static const struct intel_limit intel_limits_chv = { 489 /* 490 * These are the data rate limits (measured in fast clocks) 491 * since those are the strictest limits we have. The fast 492 * clock and actual rate limits are more relaxed, so checking 493 * them would make no difference. 494 */ 495 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 496 .vco = { .min = 4800000, .max = 6480000 }, 497 .n = { .min = 1, .max = 1 }, 498 .m1 = { .min = 2, .max = 2 }, 499 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 500 .p1 = { .min = 2, .max = 4 }, 501 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 502 }; 503 504 static const struct intel_limit intel_limits_bxt = { 505 /* FIXME: find real dot limits */ 506 .dot = { .min = 0, .max = INT_MAX }, 507 .vco = { .min = 4800000, .max = 6700000 }, 508 .n = { .min = 1, .max = 1 }, 509 .m1 = { .min = 2, .max = 2 }, 510 /* FIXME: find real m2 limits */ 511 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 512 .p1 = { .min = 2, .max = 4 }, 513 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 514 }; 515 516 /* WA Display #0827: Gen9:all */ 517 static void 518 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) 519 { 520 if (enable) 521 I915_WRITE(CLKGATE_DIS_PSL(pipe), 522 I915_READ(CLKGATE_DIS_PSL(pipe)) | 523 DUPS1_GATING_DIS | DUPS2_GATING_DIS); 524 else 525 I915_WRITE(CLKGATE_DIS_PSL(pipe), 526 I915_READ(CLKGATE_DIS_PSL(pipe)) & 527 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS)); 528 } 529 530 /* Wa_2006604312:icl */ 531 static void 532 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 533 bool enable) 534 { 535 if (enable) 536 I915_WRITE(CLKGATE_DIS_PSL(pipe), 537 I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS); 538 else 539 I915_WRITE(CLKGATE_DIS_PSL(pipe), 540 I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS); 541 } 542 543 static bool 544 needs_modeset(const struct intel_crtc_state *state) 545 { 546 return drm_atomic_crtc_needs_modeset(&state->uapi); 547 } 548 549 bool 550 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) 551 { 552 return (crtc_state->master_transcoder != INVALID_TRANSCODER || 553 crtc_state->sync_mode_slaves_mask); 554 } 555 556 static bool 557 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) 558 { 559 return crtc_state->master_transcoder != INVALID_TRANSCODER; 560 } 561 562 /* 563 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 564 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 565 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 566 * The helpers' return value is the rate of the clock that is fed to the 567 * display engine's pipe which can be the above fast dot clock rate or a 568 * divided-down version of it. 569 */ 570 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 571 static int pnv_calc_dpll_params(int refclk, struct dpll *clock) 572 { 573 clock->m = clock->m2 + 2; 574 clock->p = clock->p1 * clock->p2; 575 if (WARN_ON(clock->n == 0 || clock->p == 0)) 576 return 0; 577 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 578 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 579 580 return clock->dot; 581 } 582 583 static u32 i9xx_dpll_compute_m(struct dpll *dpll) 584 { 585 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 586 } 587 588 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock) 589 { 590 clock->m = i9xx_dpll_compute_m(clock); 591 clock->p = clock->p1 * clock->p2; 592 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 593 return 0; 594 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 595 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 596 597 return clock->dot; 598 } 599 600 static int vlv_calc_dpll_params(int refclk, struct dpll *clock) 601 { 602 clock->m = clock->m1 * clock->m2; 603 clock->p = clock->p1 * clock->p2; 604 if (WARN_ON(clock->n == 0 || clock->p == 0)) 605 return 0; 606 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 607 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 608 609 return clock->dot / 5; 610 } 611 612 int chv_calc_dpll_params(int refclk, struct dpll *clock) 613 { 614 clock->m = clock->m1 * clock->m2; 615 clock->p = clock->p1 * clock->p2; 616 if (WARN_ON(clock->n == 0 || clock->p == 0)) 617 return 0; 618 clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), 619 clock->n << 22); 620 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 621 622 return clock->dot / 5; 623 } 624 625 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 626 627 /* 628 * Returns whether the given set of divisors are valid for a given refclk with 629 * the given connectors. 630 */ 631 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv, 632 const struct intel_limit *limit, 633 const struct dpll *clock) 634 { 635 if (clock->n < limit->n.min || limit->n.max < clock->n) 636 INTELPllInvalid("n out of range\n"); 637 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 638 INTELPllInvalid("p1 out of range\n"); 639 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 640 INTELPllInvalid("m2 out of range\n"); 641 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 642 INTELPllInvalid("m1 out of range\n"); 643 644 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) && 645 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv)) 646 if (clock->m1 <= clock->m2) 647 INTELPllInvalid("m1 <= m2\n"); 648 649 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 650 !IS_GEN9_LP(dev_priv)) { 651 if (clock->p < limit->p.min || limit->p.max < clock->p) 652 INTELPllInvalid("p out of range\n"); 653 if (clock->m < limit->m.min || limit->m.max < clock->m) 654 INTELPllInvalid("m out of range\n"); 655 } 656 657 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 658 INTELPllInvalid("vco out of range\n"); 659 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 660 * connector, etc., rather than just a single range. 661 */ 662 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 663 INTELPllInvalid("dot out of range\n"); 664 665 return true; 666 } 667 668 static int 669 i9xx_select_p2_div(const struct intel_limit *limit, 670 const struct intel_crtc_state *crtc_state, 671 int target) 672 { 673 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 674 675 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 676 /* 677 * For LVDS just rely on its current settings for dual-channel. 678 * We haven't figured out how to reliably set up different 679 * single/dual channel state, if we even can. 680 */ 681 if (intel_is_dual_link_lvds(dev_priv)) 682 return limit->p2.p2_fast; 683 else 684 return limit->p2.p2_slow; 685 } else { 686 if (target < limit->p2.dot_limit) 687 return limit->p2.p2_slow; 688 else 689 return limit->p2.p2_fast; 690 } 691 } 692 693 /* 694 * Returns a set of divisors for the desired target clock with the given 695 * refclk, or FALSE. The returned values represent the clock equation: 696 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 697 * 698 * Target and reference clocks are specified in kHz. 699 * 700 * If match_clock is provided, then best_clock P divider must match the P 701 * divider from @match_clock used for LVDS downclocking. 702 */ 703 static bool 704 i9xx_find_best_dpll(const struct intel_limit *limit, 705 struct intel_crtc_state *crtc_state, 706 int target, int refclk, struct dpll *match_clock, 707 struct dpll *best_clock) 708 { 709 struct drm_device *dev = crtc_state->uapi.crtc->dev; 710 struct dpll clock; 711 int err = target; 712 713 memset(best_clock, 0, sizeof(*best_clock)); 714 715 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 716 717 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 718 clock.m1++) { 719 for (clock.m2 = limit->m2.min; 720 clock.m2 <= limit->m2.max; clock.m2++) { 721 if (clock.m2 >= clock.m1) 722 break; 723 for (clock.n = limit->n.min; 724 clock.n <= limit->n.max; clock.n++) { 725 for (clock.p1 = limit->p1.min; 726 clock.p1 <= limit->p1.max; clock.p1++) { 727 int this_err; 728 729 i9xx_calc_dpll_params(refclk, &clock); 730 if (!intel_PLL_is_valid(to_i915(dev), 731 limit, 732 &clock)) 733 continue; 734 if (match_clock && 735 clock.p != match_clock->p) 736 continue; 737 738 this_err = abs(clock.dot - target); 739 if (this_err < err) { 740 *best_clock = clock; 741 err = this_err; 742 } 743 } 744 } 745 } 746 } 747 748 return (err != target); 749 } 750 751 /* 752 * Returns a set of divisors for the desired target clock with the given 753 * refclk, or FALSE. The returned values represent the clock equation: 754 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 755 * 756 * Target and reference clocks are specified in kHz. 757 * 758 * If match_clock is provided, then best_clock P divider must match the P 759 * divider from @match_clock used for LVDS downclocking. 760 */ 761 static bool 762 pnv_find_best_dpll(const struct intel_limit *limit, 763 struct intel_crtc_state *crtc_state, 764 int target, int refclk, struct dpll *match_clock, 765 struct dpll *best_clock) 766 { 767 struct drm_device *dev = crtc_state->uapi.crtc->dev; 768 struct dpll clock; 769 int err = target; 770 771 memset(best_clock, 0, sizeof(*best_clock)); 772 773 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 774 775 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 776 clock.m1++) { 777 for (clock.m2 = limit->m2.min; 778 clock.m2 <= limit->m2.max; clock.m2++) { 779 for (clock.n = limit->n.min; 780 clock.n <= limit->n.max; clock.n++) { 781 for (clock.p1 = limit->p1.min; 782 clock.p1 <= limit->p1.max; clock.p1++) { 783 int this_err; 784 785 pnv_calc_dpll_params(refclk, &clock); 786 if (!intel_PLL_is_valid(to_i915(dev), 787 limit, 788 &clock)) 789 continue; 790 if (match_clock && 791 clock.p != match_clock->p) 792 continue; 793 794 this_err = abs(clock.dot - target); 795 if (this_err < err) { 796 *best_clock = clock; 797 err = this_err; 798 } 799 } 800 } 801 } 802 } 803 804 return (err != target); 805 } 806 807 /* 808 * Returns a set of divisors for the desired target clock with the given 809 * refclk, or FALSE. The returned values represent the clock equation: 810 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 811 * 812 * Target and reference clocks are specified in kHz. 813 * 814 * If match_clock is provided, then best_clock P divider must match the P 815 * divider from @match_clock used for LVDS downclocking. 816 */ 817 static bool 818 g4x_find_best_dpll(const struct intel_limit *limit, 819 struct intel_crtc_state *crtc_state, 820 int target, int refclk, struct dpll *match_clock, 821 struct dpll *best_clock) 822 { 823 struct drm_device *dev = crtc_state->uapi.crtc->dev; 824 struct dpll clock; 825 int max_n; 826 bool found = false; 827 /* approximately equals target * 0.00585 */ 828 int err_most = (target >> 8) + (target >> 9); 829 830 memset(best_clock, 0, sizeof(*best_clock)); 831 832 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 833 834 max_n = limit->n.max; 835 /* based on hardware requirement, prefer smaller n to precision */ 836 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 837 /* based on hardware requirement, prefere larger m1,m2 */ 838 for (clock.m1 = limit->m1.max; 839 clock.m1 >= limit->m1.min; clock.m1--) { 840 for (clock.m2 = limit->m2.max; 841 clock.m2 >= limit->m2.min; clock.m2--) { 842 for (clock.p1 = limit->p1.max; 843 clock.p1 >= limit->p1.min; clock.p1--) { 844 int this_err; 845 846 i9xx_calc_dpll_params(refclk, &clock); 847 if (!intel_PLL_is_valid(to_i915(dev), 848 limit, 849 &clock)) 850 continue; 851 852 this_err = abs(clock.dot - target); 853 if (this_err < err_most) { 854 *best_clock = clock; 855 err_most = this_err; 856 max_n = clock.n; 857 found = true; 858 } 859 } 860 } 861 } 862 } 863 return found; 864 } 865 866 /* 867 * Check if the calculated PLL configuration is more optimal compared to the 868 * best configuration and error found so far. Return the calculated error. 869 */ 870 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 871 const struct dpll *calculated_clock, 872 const struct dpll *best_clock, 873 unsigned int best_error_ppm, 874 unsigned int *error_ppm) 875 { 876 /* 877 * For CHV ignore the error and consider only the P value. 878 * Prefer a bigger P value based on HW requirements. 879 */ 880 if (IS_CHERRYVIEW(to_i915(dev))) { 881 *error_ppm = 0; 882 883 return calculated_clock->p > best_clock->p; 884 } 885 886 if (WARN_ON_ONCE(!target_freq)) 887 return false; 888 889 *error_ppm = div_u64(1000000ULL * 890 abs(target_freq - calculated_clock->dot), 891 target_freq); 892 /* 893 * Prefer a better P value over a better (smaller) error if the error 894 * is small. Ensure this preference for future configurations too by 895 * setting the error to 0. 896 */ 897 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 898 *error_ppm = 0; 899 900 return true; 901 } 902 903 return *error_ppm + 10 < best_error_ppm; 904 } 905 906 /* 907 * Returns a set of divisors for the desired target clock with the given 908 * refclk, or FALSE. The returned values represent the clock equation: 909 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 910 */ 911 static bool 912 vlv_find_best_dpll(const struct intel_limit *limit, 913 struct intel_crtc_state *crtc_state, 914 int target, int refclk, struct dpll *match_clock, 915 struct dpll *best_clock) 916 { 917 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 918 struct drm_device *dev = crtc->base.dev; 919 struct dpll clock; 920 unsigned int bestppm = 1000000; 921 /* min update 19.2 MHz */ 922 int max_n = min(limit->n.max, refclk / 19200); 923 bool found = false; 924 925 target *= 5; /* fast clock */ 926 927 memset(best_clock, 0, sizeof(*best_clock)); 928 929 /* based on hardware requirement, prefer smaller n to precision */ 930 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 931 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 932 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 933 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 934 clock.p = clock.p1 * clock.p2; 935 /* based on hardware requirement, prefer bigger m1,m2 values */ 936 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 937 unsigned int ppm; 938 939 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 940 refclk * clock.m1); 941 942 vlv_calc_dpll_params(refclk, &clock); 943 944 if (!intel_PLL_is_valid(to_i915(dev), 945 limit, 946 &clock)) 947 continue; 948 949 if (!vlv_PLL_is_optimal(dev, target, 950 &clock, 951 best_clock, 952 bestppm, &ppm)) 953 continue; 954 955 *best_clock = clock; 956 bestppm = ppm; 957 found = true; 958 } 959 } 960 } 961 } 962 963 return found; 964 } 965 966 /* 967 * Returns a set of divisors for the desired target clock with the given 968 * refclk, or FALSE. The returned values represent the clock equation: 969 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 970 */ 971 static bool 972 chv_find_best_dpll(const struct intel_limit *limit, 973 struct intel_crtc_state *crtc_state, 974 int target, int refclk, struct dpll *match_clock, 975 struct dpll *best_clock) 976 { 977 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 978 struct drm_device *dev = crtc->base.dev; 979 unsigned int best_error_ppm; 980 struct dpll clock; 981 u64 m2; 982 int found = false; 983 984 memset(best_clock, 0, sizeof(*best_clock)); 985 best_error_ppm = 1000000; 986 987 /* 988 * Based on hardware doc, the n always set to 1, and m1 always 989 * set to 2. If requires to support 200Mhz refclk, we need to 990 * revisit this because n may not 1 anymore. 991 */ 992 clock.n = 1, clock.m1 = 2; 993 target *= 5; /* fast clock */ 994 995 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 996 for (clock.p2 = limit->p2.p2_fast; 997 clock.p2 >= limit->p2.p2_slow; 998 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 999 unsigned int error_ppm; 1000 1001 clock.p = clock.p1 * clock.p2; 1002 1003 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22, 1004 refclk * clock.m1); 1005 1006 if (m2 > INT_MAX/clock.m1) 1007 continue; 1008 1009 clock.m2 = m2; 1010 1011 chv_calc_dpll_params(refclk, &clock); 1012 1013 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock)) 1014 continue; 1015 1016 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 1017 best_error_ppm, &error_ppm)) 1018 continue; 1019 1020 *best_clock = clock; 1021 best_error_ppm = error_ppm; 1022 found = true; 1023 } 1024 } 1025 1026 return found; 1027 } 1028 1029 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, 1030 struct dpll *best_clock) 1031 { 1032 int refclk = 100000; 1033 const struct intel_limit *limit = &intel_limits_bxt; 1034 1035 return chv_find_best_dpll(limit, crtc_state, 1036 crtc_state->port_clock, refclk, 1037 NULL, best_clock); 1038 } 1039 1040 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, 1041 enum pipe pipe) 1042 { 1043 i915_reg_t reg = PIPEDSL(pipe); 1044 u32 line1, line2; 1045 u32 line_mask; 1046 1047 if (IS_GEN(dev_priv, 2)) 1048 line_mask = DSL_LINEMASK_GEN2; 1049 else 1050 line_mask = DSL_LINEMASK_GEN3; 1051 1052 line1 = I915_READ(reg) & line_mask; 1053 msleep(5); 1054 line2 = I915_READ(reg) & line_mask; 1055 1056 return line1 != line2; 1057 } 1058 1059 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state) 1060 { 1061 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1062 enum pipe pipe = crtc->pipe; 1063 1064 /* Wait for the display line to settle/start moving */ 1065 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100)) 1066 DRM_ERROR("pipe %c scanline %s wait timed out\n", 1067 pipe_name(pipe), onoff(state)); 1068 } 1069 1070 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc) 1071 { 1072 wait_for_pipe_scanline_moving(crtc, false); 1073 } 1074 1075 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) 1076 { 1077 wait_for_pipe_scanline_moving(crtc, true); 1078 } 1079 1080 static void 1081 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 1082 { 1083 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1084 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1085 1086 if (INTEL_GEN(dev_priv) >= 4) { 1087 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 1088 i915_reg_t reg = PIPECONF(cpu_transcoder); 1089 1090 /* Wait for the Pipe State to go off */ 1091 if (intel_de_wait_for_clear(dev_priv, reg, 1092 I965_PIPECONF_ACTIVE, 100)) 1093 WARN(1, "pipe_off wait timed out\n"); 1094 } else { 1095 intel_wait_for_pipe_scanline_stopped(crtc); 1096 } 1097 } 1098 1099 /* Only for pre-ILK configs */ 1100 void assert_pll(struct drm_i915_private *dev_priv, 1101 enum pipe pipe, bool state) 1102 { 1103 u32 val; 1104 bool cur_state; 1105 1106 val = I915_READ(DPLL(pipe)); 1107 cur_state = !!(val & DPLL_VCO_ENABLE); 1108 I915_STATE_WARN(cur_state != state, 1109 "PLL state assertion failure (expected %s, current %s)\n", 1110 onoff(state), onoff(cur_state)); 1111 } 1112 1113 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1114 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1115 { 1116 u32 val; 1117 bool cur_state; 1118 1119 vlv_cck_get(dev_priv); 1120 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1121 vlv_cck_put(dev_priv); 1122 1123 cur_state = val & DSI_PLL_VCO_EN; 1124 I915_STATE_WARN(cur_state != state, 1125 "DSI PLL state assertion failure (expected %s, current %s)\n", 1126 onoff(state), onoff(cur_state)); 1127 } 1128 1129 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1130 enum pipe pipe, bool state) 1131 { 1132 bool cur_state; 1133 1134 if (HAS_DDI(dev_priv)) { 1135 /* 1136 * DDI does not have a specific FDI_TX register. 1137 * 1138 * FDI is never fed from EDP transcoder 1139 * so pipe->transcoder cast is fine here. 1140 */ 1141 enum transcoder cpu_transcoder = (enum transcoder)pipe; 1142 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1143 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1144 } else { 1145 u32 val = I915_READ(FDI_TX_CTL(pipe)); 1146 cur_state = !!(val & FDI_TX_ENABLE); 1147 } 1148 I915_STATE_WARN(cur_state != state, 1149 "FDI TX state assertion failure (expected %s, current %s)\n", 1150 onoff(state), onoff(cur_state)); 1151 } 1152 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1153 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1154 1155 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1156 enum pipe pipe, bool state) 1157 { 1158 u32 val; 1159 bool cur_state; 1160 1161 val = I915_READ(FDI_RX_CTL(pipe)); 1162 cur_state = !!(val & FDI_RX_ENABLE); 1163 I915_STATE_WARN(cur_state != state, 1164 "FDI RX state assertion failure (expected %s, current %s)\n", 1165 onoff(state), onoff(cur_state)); 1166 } 1167 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1168 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1169 1170 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1171 enum pipe pipe) 1172 { 1173 u32 val; 1174 1175 /* ILK FDI PLL is always enabled */ 1176 if (IS_GEN(dev_priv, 5)) 1177 return; 1178 1179 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1180 if (HAS_DDI(dev_priv)) 1181 return; 1182 1183 val = I915_READ(FDI_TX_CTL(pipe)); 1184 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1185 } 1186 1187 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1188 enum pipe pipe, bool state) 1189 { 1190 u32 val; 1191 bool cur_state; 1192 1193 val = I915_READ(FDI_RX_CTL(pipe)); 1194 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1195 I915_STATE_WARN(cur_state != state, 1196 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1197 onoff(state), onoff(cur_state)); 1198 } 1199 1200 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) 1201 { 1202 i915_reg_t pp_reg; 1203 u32 val; 1204 enum pipe panel_pipe = INVALID_PIPE; 1205 bool locked = true; 1206 1207 if (WARN_ON(HAS_DDI(dev_priv))) 1208 return; 1209 1210 if (HAS_PCH_SPLIT(dev_priv)) { 1211 u32 port_sel; 1212 1213 pp_reg = PP_CONTROL(0); 1214 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1215 1216 switch (port_sel) { 1217 case PANEL_PORT_SELECT_LVDS: 1218 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe); 1219 break; 1220 case PANEL_PORT_SELECT_DPA: 1221 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe); 1222 break; 1223 case PANEL_PORT_SELECT_DPC: 1224 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe); 1225 break; 1226 case PANEL_PORT_SELECT_DPD: 1227 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe); 1228 break; 1229 default: 1230 MISSING_CASE(port_sel); 1231 break; 1232 } 1233 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1234 /* presumably write lock depends on pipe, not port select */ 1235 pp_reg = PP_CONTROL(pipe); 1236 panel_pipe = pipe; 1237 } else { 1238 u32 port_sel; 1239 1240 pp_reg = PP_CONTROL(0); 1241 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1242 1243 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS); 1244 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe); 1245 } 1246 1247 val = I915_READ(pp_reg); 1248 if (!(val & PANEL_POWER_ON) || 1249 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1250 locked = false; 1251 1252 I915_STATE_WARN(panel_pipe == pipe && locked, 1253 "panel assertion failure, pipe %c regs locked\n", 1254 pipe_name(pipe)); 1255 } 1256 1257 void assert_pipe(struct drm_i915_private *dev_priv, 1258 enum transcoder cpu_transcoder, bool state) 1259 { 1260 bool cur_state; 1261 enum intel_display_power_domain power_domain; 1262 intel_wakeref_t wakeref; 1263 1264 /* we keep both pipes enabled on 830 */ 1265 if (IS_I830(dev_priv)) 1266 state = true; 1267 1268 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 1269 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 1270 if (wakeref) { 1271 u32 val = I915_READ(PIPECONF(cpu_transcoder)); 1272 cur_state = !!(val & PIPECONF_ENABLE); 1273 1274 intel_display_power_put(dev_priv, power_domain, wakeref); 1275 } else { 1276 cur_state = false; 1277 } 1278 1279 I915_STATE_WARN(cur_state != state, 1280 "transcoder %s assertion failure (expected %s, current %s)\n", 1281 transcoder_name(cpu_transcoder), 1282 onoff(state), onoff(cur_state)); 1283 } 1284 1285 static void assert_plane(struct intel_plane *plane, bool state) 1286 { 1287 enum pipe pipe; 1288 bool cur_state; 1289 1290 cur_state = plane->get_hw_state(plane, &pipe); 1291 1292 I915_STATE_WARN(cur_state != state, 1293 "%s assertion failure (expected %s, current %s)\n", 1294 plane->base.name, onoff(state), onoff(cur_state)); 1295 } 1296 1297 #define assert_plane_enabled(p) assert_plane(p, true) 1298 #define assert_plane_disabled(p) assert_plane(p, false) 1299 1300 static void assert_planes_disabled(struct intel_crtc *crtc) 1301 { 1302 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1303 struct intel_plane *plane; 1304 1305 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 1306 assert_plane_disabled(plane); 1307 } 1308 1309 static void assert_vblank_disabled(struct drm_crtc *crtc) 1310 { 1311 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1312 drm_crtc_vblank_put(crtc); 1313 } 1314 1315 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1316 enum pipe pipe) 1317 { 1318 u32 val; 1319 bool enabled; 1320 1321 val = I915_READ(PCH_TRANSCONF(pipe)); 1322 enabled = !!(val & TRANS_ENABLE); 1323 I915_STATE_WARN(enabled, 1324 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1325 pipe_name(pipe)); 1326 } 1327 1328 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1329 enum pipe pipe, enum port port, 1330 i915_reg_t dp_reg) 1331 { 1332 enum pipe port_pipe; 1333 bool state; 1334 1335 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); 1336 1337 I915_STATE_WARN(state && port_pipe == pipe, 1338 "PCH DP %c enabled on transcoder %c, should be disabled\n", 1339 port_name(port), pipe_name(pipe)); 1340 1341 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 1342 "IBX PCH DP %c still using transcoder B\n", 1343 port_name(port)); 1344 } 1345 1346 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1347 enum pipe pipe, enum port port, 1348 i915_reg_t hdmi_reg) 1349 { 1350 enum pipe port_pipe; 1351 bool state; 1352 1353 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); 1354 1355 I915_STATE_WARN(state && port_pipe == pipe, 1356 "PCH HDMI %c enabled on transcoder %c, should be disabled\n", 1357 port_name(port), pipe_name(pipe)); 1358 1359 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 1360 "IBX PCH HDMI %c still using transcoder B\n", 1361 port_name(port)); 1362 } 1363 1364 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1365 enum pipe pipe) 1366 { 1367 enum pipe port_pipe; 1368 1369 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); 1370 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); 1371 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); 1372 1373 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && 1374 port_pipe == pipe, 1375 "PCH VGA enabled on transcoder %c, should be disabled\n", 1376 pipe_name(pipe)); 1377 1378 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && 1379 port_pipe == pipe, 1380 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1381 pipe_name(pipe)); 1382 1383 /* PCH SDVOB multiplex with HDMIB */ 1384 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); 1385 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); 1386 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); 1387 } 1388 1389 static void _vlv_enable_pll(struct intel_crtc *crtc, 1390 const struct intel_crtc_state *pipe_config) 1391 { 1392 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1393 enum pipe pipe = crtc->pipe; 1394 1395 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1396 POSTING_READ(DPLL(pipe)); 1397 udelay(150); 1398 1399 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) 1400 DRM_ERROR("DPLL %d failed to lock\n", pipe); 1401 } 1402 1403 static void vlv_enable_pll(struct intel_crtc *crtc, 1404 const struct intel_crtc_state *pipe_config) 1405 { 1406 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1407 enum pipe pipe = crtc->pipe; 1408 1409 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 1410 1411 /* PLL is protected by panel, make sure we can write it */ 1412 assert_panel_unlocked(dev_priv, pipe); 1413 1414 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1415 _vlv_enable_pll(crtc, pipe_config); 1416 1417 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1418 POSTING_READ(DPLL_MD(pipe)); 1419 } 1420 1421 1422 static void _chv_enable_pll(struct intel_crtc *crtc, 1423 const struct intel_crtc_state *pipe_config) 1424 { 1425 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1426 enum pipe pipe = crtc->pipe; 1427 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1428 u32 tmp; 1429 1430 vlv_dpio_get(dev_priv); 1431 1432 /* Enable back the 10bit clock to display controller */ 1433 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1434 tmp |= DPIO_DCLKP_EN; 1435 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1436 1437 vlv_dpio_put(dev_priv); 1438 1439 /* 1440 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1441 */ 1442 udelay(1); 1443 1444 /* Enable PLL */ 1445 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1446 1447 /* Check PLL is locked */ 1448 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) 1449 DRM_ERROR("PLL %d failed to lock\n", pipe); 1450 } 1451 1452 static void chv_enable_pll(struct intel_crtc *crtc, 1453 const struct intel_crtc_state *pipe_config) 1454 { 1455 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1456 enum pipe pipe = crtc->pipe; 1457 1458 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 1459 1460 /* PLL is protected by panel, make sure we can write it */ 1461 assert_panel_unlocked(dev_priv, pipe); 1462 1463 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1464 _chv_enable_pll(crtc, pipe_config); 1465 1466 if (pipe != PIPE_A) { 1467 /* 1468 * WaPixelRepeatModeFixForC0:chv 1469 * 1470 * DPLLCMD is AWOL. Use chicken bits to propagate 1471 * the value from DPLLBMD to either pipe B or C. 1472 */ 1473 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); 1474 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md); 1475 I915_WRITE(CBR4_VLV, 0); 1476 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; 1477 1478 /* 1479 * DPLLB VGA mode also seems to cause problems. 1480 * We should always have it disabled. 1481 */ 1482 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0); 1483 } else { 1484 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1485 POSTING_READ(DPLL_MD(pipe)); 1486 } 1487 } 1488 1489 static bool i9xx_has_pps(struct drm_i915_private *dev_priv) 1490 { 1491 if (IS_I830(dev_priv)) 1492 return false; 1493 1494 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 1495 } 1496 1497 static void i9xx_enable_pll(struct intel_crtc *crtc, 1498 const struct intel_crtc_state *crtc_state) 1499 { 1500 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1501 i915_reg_t reg = DPLL(crtc->pipe); 1502 u32 dpll = crtc_state->dpll_hw_state.dpll; 1503 int i; 1504 1505 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); 1506 1507 /* PLL is protected by panel, make sure we can write it */ 1508 if (i9xx_has_pps(dev_priv)) 1509 assert_panel_unlocked(dev_priv, crtc->pipe); 1510 1511 /* 1512 * Apparently we need to have VGA mode enabled prior to changing 1513 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1514 * dividers, even though the register value does change. 1515 */ 1516 I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS); 1517 I915_WRITE(reg, dpll); 1518 1519 /* Wait for the clocks to stabilize. */ 1520 POSTING_READ(reg); 1521 udelay(150); 1522 1523 if (INTEL_GEN(dev_priv) >= 4) { 1524 I915_WRITE(DPLL_MD(crtc->pipe), 1525 crtc_state->dpll_hw_state.dpll_md); 1526 } else { 1527 /* The pixel multiplier can only be updated once the 1528 * DPLL is enabled and the clocks are stable. 1529 * 1530 * So write it again. 1531 */ 1532 I915_WRITE(reg, dpll); 1533 } 1534 1535 /* We do this three times for luck */ 1536 for (i = 0; i < 3; i++) { 1537 I915_WRITE(reg, dpll); 1538 POSTING_READ(reg); 1539 udelay(150); /* wait for warmup */ 1540 } 1541 } 1542 1543 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) 1544 { 1545 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1546 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1547 enum pipe pipe = crtc->pipe; 1548 1549 /* Don't disable pipe or pipe PLLs if needed */ 1550 if (IS_I830(dev_priv)) 1551 return; 1552 1553 /* Make sure the pipe isn't still relying on us */ 1554 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); 1555 1556 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 1557 POSTING_READ(DPLL(pipe)); 1558 } 1559 1560 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1561 { 1562 u32 val; 1563 1564 /* Make sure the pipe isn't still relying on us */ 1565 assert_pipe_disabled(dev_priv, (enum transcoder)pipe); 1566 1567 val = DPLL_INTEGRATED_REF_CLK_VLV | 1568 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1569 if (pipe != PIPE_A) 1570 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1571 1572 I915_WRITE(DPLL(pipe), val); 1573 POSTING_READ(DPLL(pipe)); 1574 } 1575 1576 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1577 { 1578 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1579 u32 val; 1580 1581 /* Make sure the pipe isn't still relying on us */ 1582 assert_pipe_disabled(dev_priv, (enum transcoder)pipe); 1583 1584 val = DPLL_SSC_REF_CLK_CHV | 1585 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1586 if (pipe != PIPE_A) 1587 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1588 1589 I915_WRITE(DPLL(pipe), val); 1590 POSTING_READ(DPLL(pipe)); 1591 1592 vlv_dpio_get(dev_priv); 1593 1594 /* Disable 10bit clock to display controller */ 1595 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1596 val &= ~DPIO_DCLKP_EN; 1597 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1598 1599 vlv_dpio_put(dev_priv); 1600 } 1601 1602 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1603 struct intel_digital_port *dport, 1604 unsigned int expected_mask) 1605 { 1606 u32 port_mask; 1607 i915_reg_t dpll_reg; 1608 1609 switch (dport->base.port) { 1610 case PORT_B: 1611 port_mask = DPLL_PORTB_READY_MASK; 1612 dpll_reg = DPLL(0); 1613 break; 1614 case PORT_C: 1615 port_mask = DPLL_PORTC_READY_MASK; 1616 dpll_reg = DPLL(0); 1617 expected_mask <<= 4; 1618 break; 1619 case PORT_D: 1620 port_mask = DPLL_PORTD_READY_MASK; 1621 dpll_reg = DPIO_PHY_STATUS; 1622 break; 1623 default: 1624 BUG(); 1625 } 1626 1627 if (intel_de_wait_for_register(dev_priv, dpll_reg, 1628 port_mask, expected_mask, 1000)) 1629 WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", 1630 dport->base.base.base.id, dport->base.base.name, 1631 I915_READ(dpll_reg) & port_mask, expected_mask); 1632 } 1633 1634 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) 1635 { 1636 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1637 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1638 enum pipe pipe = crtc->pipe; 1639 i915_reg_t reg; 1640 u32 val, pipeconf_val; 1641 1642 /* Make sure PCH DPLL is enabled */ 1643 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); 1644 1645 /* FDI must be feeding us bits for PCH ports */ 1646 assert_fdi_tx_enabled(dev_priv, pipe); 1647 assert_fdi_rx_enabled(dev_priv, pipe); 1648 1649 if (HAS_PCH_CPT(dev_priv)) { 1650 reg = TRANS_CHICKEN2(pipe); 1651 val = I915_READ(reg); 1652 /* 1653 * Workaround: Set the timing override bit 1654 * before enabling the pch transcoder. 1655 */ 1656 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1657 /* Configure frame start delay to match the CPU */ 1658 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 1659 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); 1660 I915_WRITE(reg, val); 1661 } 1662 1663 reg = PCH_TRANSCONF(pipe); 1664 val = I915_READ(reg); 1665 pipeconf_val = I915_READ(PIPECONF(pipe)); 1666 1667 if (HAS_PCH_IBX(dev_priv)) { 1668 /* Configure frame start delay to match the CPU */ 1669 val &= ~TRANS_FRAME_START_DELAY_MASK; 1670 val |= TRANS_FRAME_START_DELAY(0); 1671 1672 /* 1673 * Make the BPC in transcoder be consistent with 1674 * that in pipeconf reg. For HDMI we must use 8bpc 1675 * here for both 8bpc and 12bpc. 1676 */ 1677 val &= ~PIPECONF_BPC_MASK; 1678 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 1679 val |= PIPECONF_8BPC; 1680 else 1681 val |= pipeconf_val & PIPECONF_BPC_MASK; 1682 } 1683 1684 val &= ~TRANS_INTERLACE_MASK; 1685 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { 1686 if (HAS_PCH_IBX(dev_priv) && 1687 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 1688 val |= TRANS_LEGACY_INTERLACED_ILK; 1689 else 1690 val |= TRANS_INTERLACED; 1691 } else { 1692 val |= TRANS_PROGRESSIVE; 1693 } 1694 1695 I915_WRITE(reg, val | TRANS_ENABLE); 1696 if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100)) 1697 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 1698 } 1699 1700 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1701 enum transcoder cpu_transcoder) 1702 { 1703 u32 val, pipeconf_val; 1704 1705 /* FDI must be feeding us bits for PCH ports */ 1706 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); 1707 assert_fdi_rx_enabled(dev_priv, PIPE_A); 1708 1709 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1710 /* Workaround: set timing override bit. */ 1711 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1712 /* Configure frame start delay to match the CPU */ 1713 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 1714 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); 1715 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1716 1717 val = TRANS_ENABLE; 1718 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 1719 1720 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 1721 PIPECONF_INTERLACED_ILK) 1722 val |= TRANS_INTERLACED; 1723 else 1724 val |= TRANS_PROGRESSIVE; 1725 1726 I915_WRITE(LPT_TRANSCONF, val); 1727 if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, 1728 TRANS_STATE_ENABLE, 100)) 1729 DRM_ERROR("Failed to enable PCH transcoder\n"); 1730 } 1731 1732 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1733 enum pipe pipe) 1734 { 1735 i915_reg_t reg; 1736 u32 val; 1737 1738 /* FDI relies on the transcoder */ 1739 assert_fdi_tx_disabled(dev_priv, pipe); 1740 assert_fdi_rx_disabled(dev_priv, pipe); 1741 1742 /* Ports must be off as well */ 1743 assert_pch_ports_disabled(dev_priv, pipe); 1744 1745 reg = PCH_TRANSCONF(pipe); 1746 val = I915_READ(reg); 1747 val &= ~TRANS_ENABLE; 1748 I915_WRITE(reg, val); 1749 /* wait for PCH transcoder off, transcoder state */ 1750 if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) 1751 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 1752 1753 if (HAS_PCH_CPT(dev_priv)) { 1754 /* Workaround: Clear the timing override chicken bit again. */ 1755 reg = TRANS_CHICKEN2(pipe); 1756 val = I915_READ(reg); 1757 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1758 I915_WRITE(reg, val); 1759 } 1760 } 1761 1762 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 1763 { 1764 u32 val; 1765 1766 val = I915_READ(LPT_TRANSCONF); 1767 val &= ~TRANS_ENABLE; 1768 I915_WRITE(LPT_TRANSCONF, val); 1769 /* wait for PCH transcoder off, transcoder state */ 1770 if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, 1771 TRANS_STATE_ENABLE, 50)) 1772 DRM_ERROR("Failed to disable PCH transcoder\n"); 1773 1774 /* Workaround: clear timing override bit. */ 1775 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1776 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1777 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1778 } 1779 1780 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) 1781 { 1782 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1783 1784 if (HAS_PCH_LPT(dev_priv)) 1785 return PIPE_A; 1786 else 1787 return crtc->pipe; 1788 } 1789 1790 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state) 1791 { 1792 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1793 1794 /* 1795 * On i965gm the hardware frame counter reads 1796 * zero when the TV encoder is enabled :( 1797 */ 1798 if (IS_I965GM(dev_priv) && 1799 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT))) 1800 return 0; 1801 1802 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1803 return 0xffffffff; /* full 32 bit counter */ 1804 else if (INTEL_GEN(dev_priv) >= 3) 1805 return 0xffffff; /* only 24 bits of frame count */ 1806 else 1807 return 0; /* Gen2 doesn't have a hardware frame counter */ 1808 } 1809 1810 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) 1811 { 1812 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1813 1814 assert_vblank_disabled(&crtc->base); 1815 drm_crtc_set_max_vblank_count(&crtc->base, 1816 intel_crtc_max_vblank_count(crtc_state)); 1817 drm_crtc_vblank_on(&crtc->base); 1818 } 1819 1820 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state) 1821 { 1822 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1823 1824 drm_crtc_vblank_off(&crtc->base); 1825 assert_vblank_disabled(&crtc->base); 1826 } 1827 1828 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) 1829 { 1830 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 1831 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1832 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 1833 enum pipe pipe = crtc->pipe; 1834 i915_reg_t reg; 1835 u32 val; 1836 1837 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); 1838 1839 assert_planes_disabled(crtc); 1840 1841 /* 1842 * A pipe without a PLL won't actually be able to drive bits from 1843 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1844 * need the check. 1845 */ 1846 if (HAS_GMCH(dev_priv)) { 1847 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 1848 assert_dsi_pll_enabled(dev_priv); 1849 else 1850 assert_pll_enabled(dev_priv, pipe); 1851 } else { 1852 if (new_crtc_state->has_pch_encoder) { 1853 /* if driving the PCH, we need FDI enabled */ 1854 assert_fdi_rx_pll_enabled(dev_priv, 1855 intel_crtc_pch_transcoder(crtc)); 1856 assert_fdi_tx_pll_enabled(dev_priv, 1857 (enum pipe) cpu_transcoder); 1858 } 1859 /* FIXME: assert CPU port conditions for SNB+ */ 1860 } 1861 1862 trace_intel_pipe_enable(crtc); 1863 1864 reg = PIPECONF(cpu_transcoder); 1865 val = I915_READ(reg); 1866 if (val & PIPECONF_ENABLE) { 1867 /* we keep both pipes enabled on 830 */ 1868 WARN_ON(!IS_I830(dev_priv)); 1869 return; 1870 } 1871 1872 I915_WRITE(reg, val | PIPECONF_ENABLE); 1873 POSTING_READ(reg); 1874 1875 /* 1876 * Until the pipe starts PIPEDSL reads will return a stale value, 1877 * which causes an apparent vblank timestamp jump when PIPEDSL 1878 * resets to its proper value. That also messes up the frame count 1879 * when it's derived from the timestamps. So let's wait for the 1880 * pipe to start properly before we call drm_crtc_vblank_on() 1881 */ 1882 if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 1883 intel_wait_for_pipe_scanline_moving(crtc); 1884 } 1885 1886 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) 1887 { 1888 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1889 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1890 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 1891 enum pipe pipe = crtc->pipe; 1892 i915_reg_t reg; 1893 u32 val; 1894 1895 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); 1896 1897 /* 1898 * Make sure planes won't keep trying to pump pixels to us, 1899 * or we might hang the display. 1900 */ 1901 assert_planes_disabled(crtc); 1902 1903 trace_intel_pipe_disable(crtc); 1904 1905 reg = PIPECONF(cpu_transcoder); 1906 val = I915_READ(reg); 1907 if ((val & PIPECONF_ENABLE) == 0) 1908 return; 1909 1910 /* 1911 * Double wide has implications for planes 1912 * so best keep it disabled when not needed. 1913 */ 1914 if (old_crtc_state->double_wide) 1915 val &= ~PIPECONF_DOUBLE_WIDE; 1916 1917 /* Don't disable pipe or pipe PLLs if needed */ 1918 if (!IS_I830(dev_priv)) 1919 val &= ~PIPECONF_ENABLE; 1920 1921 I915_WRITE(reg, val); 1922 if ((val & PIPECONF_ENABLE) == 0) 1923 intel_wait_for_pipe_off(old_crtc_state); 1924 } 1925 1926 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) 1927 { 1928 return IS_GEN(dev_priv, 2) ? 2048 : 4096; 1929 } 1930 1931 static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane) 1932 { 1933 if (!is_ccs_modifier(fb->modifier)) 1934 return false; 1935 1936 return plane >= fb->format->num_planes / 2; 1937 } 1938 1939 static bool is_gen12_ccs_modifier(u64 modifier) 1940 { 1941 return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || 1942 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; 1943 1944 } 1945 1946 static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane) 1947 { 1948 return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane); 1949 } 1950 1951 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane) 1952 { 1953 if (is_ccs_modifier(fb->modifier)) 1954 return is_ccs_plane(fb, plane); 1955 1956 return plane == 1; 1957 } 1958 1959 static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane) 1960 { 1961 WARN_ON(!is_ccs_modifier(fb->modifier) || 1962 (main_plane && main_plane >= fb->format->num_planes / 2)); 1963 1964 return fb->format->num_planes / 2 + main_plane; 1965 } 1966 1967 static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) 1968 { 1969 WARN_ON(!is_ccs_modifier(fb->modifier) || 1970 ccs_plane < fb->format->num_planes / 2); 1971 1972 return ccs_plane - fb->format->num_planes / 2; 1973 } 1974 1975 /* Return either the main plane's CCS or - if not a CCS FB - UV plane */ 1976 int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane) 1977 { 1978 if (is_ccs_modifier(fb->modifier)) 1979 return main_to_ccs_plane(fb, main_plane); 1980 1981 return 1; 1982 } 1983 1984 bool 1985 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, 1986 uint64_t modifier) 1987 { 1988 return info->is_yuv && 1989 info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2); 1990 } 1991 1992 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, 1993 int color_plane) 1994 { 1995 return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && 1996 color_plane == 1; 1997 } 1998 1999 static unsigned int 2000 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) 2001 { 2002 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2003 unsigned int cpp = fb->format->cpp[color_plane]; 2004 2005 switch (fb->modifier) { 2006 case DRM_FORMAT_MOD_LINEAR: 2007 return intel_tile_size(dev_priv); 2008 case I915_FORMAT_MOD_X_TILED: 2009 if (IS_GEN(dev_priv, 2)) 2010 return 128; 2011 else 2012 return 512; 2013 case I915_FORMAT_MOD_Y_TILED_CCS: 2014 if (is_ccs_plane(fb, color_plane)) 2015 return 128; 2016 /* fall through */ 2017 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 2018 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 2019 if (is_ccs_plane(fb, color_plane)) 2020 return 64; 2021 /* fall through */ 2022 case I915_FORMAT_MOD_Y_TILED: 2023 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv)) 2024 return 128; 2025 else 2026 return 512; 2027 case I915_FORMAT_MOD_Yf_TILED_CCS: 2028 if (is_ccs_plane(fb, color_plane)) 2029 return 128; 2030 /* fall through */ 2031 case I915_FORMAT_MOD_Yf_TILED: 2032 switch (cpp) { 2033 case 1: 2034 return 64; 2035 case 2: 2036 case 4: 2037 return 128; 2038 case 8: 2039 case 16: 2040 return 256; 2041 default: 2042 MISSING_CASE(cpp); 2043 return cpp; 2044 } 2045 break; 2046 default: 2047 MISSING_CASE(fb->modifier); 2048 return cpp; 2049 } 2050 } 2051 2052 static unsigned int 2053 intel_tile_height(const struct drm_framebuffer *fb, int color_plane) 2054 { 2055 if (is_gen12_ccs_plane(fb, color_plane)) 2056 return 1; 2057 2058 return intel_tile_size(to_i915(fb->dev)) / 2059 intel_tile_width_bytes(fb, color_plane); 2060 } 2061 2062 /* Return the tile dimensions in pixel units */ 2063 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane, 2064 unsigned int *tile_width, 2065 unsigned int *tile_height) 2066 { 2067 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane); 2068 unsigned int cpp = fb->format->cpp[color_plane]; 2069 2070 *tile_width = tile_width_bytes / cpp; 2071 *tile_height = intel_tile_height(fb, color_plane); 2072 } 2073 2074 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, 2075 int color_plane) 2076 { 2077 unsigned int tile_width, tile_height; 2078 2079 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2080 2081 return fb->pitches[color_plane] * tile_height; 2082 } 2083 2084 unsigned int 2085 intel_fb_align_height(const struct drm_framebuffer *fb, 2086 int color_plane, unsigned int height) 2087 { 2088 unsigned int tile_height = intel_tile_height(fb, color_plane); 2089 2090 return ALIGN(height, tile_height); 2091 } 2092 2093 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 2094 { 2095 unsigned int size = 0; 2096 int i; 2097 2098 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 2099 size += rot_info->plane[i].width * rot_info->plane[i].height; 2100 2101 return size; 2102 } 2103 2104 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info) 2105 { 2106 unsigned int size = 0; 2107 int i; 2108 2109 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) 2110 size += rem_info->plane[i].width * rem_info->plane[i].height; 2111 2112 return size; 2113 } 2114 2115 static void 2116 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, 2117 const struct drm_framebuffer *fb, 2118 unsigned int rotation) 2119 { 2120 view->type = I915_GGTT_VIEW_NORMAL; 2121 if (drm_rotation_90_or_270(rotation)) { 2122 view->type = I915_GGTT_VIEW_ROTATED; 2123 view->rotated = to_intel_framebuffer(fb)->rot_info; 2124 } 2125 } 2126 2127 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv) 2128 { 2129 if (IS_I830(dev_priv)) 2130 return 16 * 1024; 2131 else if (IS_I85X(dev_priv)) 2132 return 256; 2133 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) 2134 return 32; 2135 else 2136 return 4 * 1024; 2137 } 2138 2139 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) 2140 { 2141 if (INTEL_GEN(dev_priv) >= 9) 2142 return 256 * 1024; 2143 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || 2144 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2145 return 128 * 1024; 2146 else if (INTEL_GEN(dev_priv) >= 4) 2147 return 4 * 1024; 2148 else 2149 return 0; 2150 } 2151 2152 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, 2153 int color_plane) 2154 { 2155 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2156 2157 /* AUX_DIST needs only 4K alignment */ 2158 if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) || 2159 is_ccs_plane(fb, color_plane)) 2160 return 4096; 2161 2162 switch (fb->modifier) { 2163 case DRM_FORMAT_MOD_LINEAR: 2164 return intel_linear_alignment(dev_priv); 2165 case I915_FORMAT_MOD_X_TILED: 2166 if (INTEL_GEN(dev_priv) >= 9) 2167 return 256 * 1024; 2168 return 0; 2169 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 2170 if (is_semiplanar_uv_plane(fb, color_plane)) 2171 return intel_tile_row_size(fb, color_plane); 2172 /* Fall-through */ 2173 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 2174 return 16 * 1024; 2175 case I915_FORMAT_MOD_Y_TILED_CCS: 2176 case I915_FORMAT_MOD_Yf_TILED_CCS: 2177 case I915_FORMAT_MOD_Y_TILED: 2178 if (INTEL_GEN(dev_priv) >= 12 && 2179 is_semiplanar_uv_plane(fb, color_plane)) 2180 return intel_tile_row_size(fb, color_plane); 2181 /* Fall-through */ 2182 case I915_FORMAT_MOD_Yf_TILED: 2183 return 1 * 1024 * 1024; 2184 default: 2185 MISSING_CASE(fb->modifier); 2186 return 0; 2187 } 2188 } 2189 2190 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) 2191 { 2192 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 2193 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 2194 2195 return INTEL_GEN(dev_priv) < 4 || 2196 (plane->has_fbc && 2197 plane_state->view.type == I915_GGTT_VIEW_NORMAL); 2198 } 2199 2200 struct i915_vma * 2201 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 2202 const struct i915_ggtt_view *view, 2203 bool uses_fence, 2204 unsigned long *out_flags) 2205 { 2206 struct drm_device *dev = fb->dev; 2207 struct drm_i915_private *dev_priv = to_i915(dev); 2208 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2209 intel_wakeref_t wakeref; 2210 struct i915_vma *vma; 2211 unsigned int pinctl; 2212 u32 alignment; 2213 2214 if (WARN_ON(!i915_gem_object_is_framebuffer(obj))) 2215 return ERR_PTR(-EINVAL); 2216 2217 alignment = intel_surf_alignment(fb, 0); 2218 if (WARN_ON(alignment && !is_power_of_2(alignment))) 2219 return ERR_PTR(-EINVAL); 2220 2221 /* Note that the w/a also requires 64 PTE of padding following the 2222 * bo. We currently fill all unused PTE with the shadow page and so 2223 * we should always have valid PTE following the scanout preventing 2224 * the VT-d warning. 2225 */ 2226 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) 2227 alignment = 256 * 1024; 2228 2229 /* 2230 * Global gtt pte registers are special registers which actually forward 2231 * writes to a chunk of system memory. Which means that there is no risk 2232 * that the register values disappear as soon as we call 2233 * intel_runtime_pm_put(), so it is correct to wrap only the 2234 * pin/unpin/fence and not more. 2235 */ 2236 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2237 2238 atomic_inc(&dev_priv->gpu_error.pending_fb_pin); 2239 2240 /* 2241 * Valleyview is definitely limited to scanning out the first 2242 * 512MiB. Lets presume this behaviour was inherited from the 2243 * g4x display engine and that all earlier gen are similarly 2244 * limited. Testing suggests that it is a little more 2245 * complicated than this. For example, Cherryview appears quite 2246 * happy to scanout from anywhere within its global aperture. 2247 */ 2248 pinctl = 0; 2249 if (HAS_GMCH(dev_priv)) 2250 pinctl |= PIN_MAPPABLE; 2251 2252 vma = i915_gem_object_pin_to_display_plane(obj, 2253 alignment, view, pinctl); 2254 if (IS_ERR(vma)) 2255 goto err; 2256 2257 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) { 2258 int ret; 2259 2260 /* 2261 * Install a fence for tiled scan-out. Pre-i965 always needs a 2262 * fence, whereas 965+ only requires a fence if using 2263 * framebuffer compression. For simplicity, we always, when 2264 * possible, install a fence as the cost is not that onerous. 2265 * 2266 * If we fail to fence the tiled scanout, then either the 2267 * modeset will reject the change (which is highly unlikely as 2268 * the affected systems, all but one, do not have unmappable 2269 * space) or we will not be able to enable full powersaving 2270 * techniques (also likely not to apply due to various limits 2271 * FBC and the like impose on the size of the buffer, which 2272 * presumably we violated anyway with this unmappable buffer). 2273 * Anyway, it is presumably better to stumble onwards with 2274 * something and try to run the system in a "less than optimal" 2275 * mode that matches the user configuration. 2276 */ 2277 ret = i915_vma_pin_fence(vma); 2278 if (ret != 0 && INTEL_GEN(dev_priv) < 4) { 2279 i915_gem_object_unpin_from_display_plane(vma); 2280 vma = ERR_PTR(ret); 2281 goto err; 2282 } 2283 2284 if (ret == 0 && vma->fence) 2285 *out_flags |= PLANE_HAS_FENCE; 2286 } 2287 2288 i915_vma_get(vma); 2289 err: 2290 atomic_dec(&dev_priv->gpu_error.pending_fb_pin); 2291 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2292 return vma; 2293 } 2294 2295 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) 2296 { 2297 i915_gem_object_lock(vma->obj); 2298 if (flags & PLANE_HAS_FENCE) 2299 i915_vma_unpin_fence(vma); 2300 i915_gem_object_unpin_from_display_plane(vma); 2301 i915_gem_object_unlock(vma->obj); 2302 2303 i915_vma_put(vma); 2304 } 2305 2306 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane, 2307 unsigned int rotation) 2308 { 2309 if (drm_rotation_90_or_270(rotation)) 2310 return to_intel_framebuffer(fb)->rotated[color_plane].pitch; 2311 else 2312 return fb->pitches[color_plane]; 2313 } 2314 2315 /* 2316 * Convert the x/y offsets into a linear offset. 2317 * Only valid with 0/180 degree rotation, which is fine since linear 2318 * offset is only used with linear buffers on pre-hsw and tiled buffers 2319 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 2320 */ 2321 u32 intel_fb_xy_to_linear(int x, int y, 2322 const struct intel_plane_state *state, 2323 int color_plane) 2324 { 2325 const struct drm_framebuffer *fb = state->hw.fb; 2326 unsigned int cpp = fb->format->cpp[color_plane]; 2327 unsigned int pitch = state->color_plane[color_plane].stride; 2328 2329 return y * pitch + x * cpp; 2330 } 2331 2332 /* 2333 * Add the x/y offsets derived from fb->offsets[] to the user 2334 * specified plane src x/y offsets. The resulting x/y offsets 2335 * specify the start of scanout from the beginning of the gtt mapping. 2336 */ 2337 void intel_add_fb_offsets(int *x, int *y, 2338 const struct intel_plane_state *state, 2339 int color_plane) 2340 2341 { 2342 *x += state->color_plane[color_plane].x; 2343 *y += state->color_plane[color_plane].y; 2344 } 2345 2346 static u32 intel_adjust_tile_offset(int *x, int *y, 2347 unsigned int tile_width, 2348 unsigned int tile_height, 2349 unsigned int tile_size, 2350 unsigned int pitch_tiles, 2351 u32 old_offset, 2352 u32 new_offset) 2353 { 2354 unsigned int pitch_pixels = pitch_tiles * tile_width; 2355 unsigned int tiles; 2356 2357 WARN_ON(old_offset & (tile_size - 1)); 2358 WARN_ON(new_offset & (tile_size - 1)); 2359 WARN_ON(new_offset > old_offset); 2360 2361 tiles = (old_offset - new_offset) / tile_size; 2362 2363 *y += tiles / pitch_tiles * tile_height; 2364 *x += tiles % pitch_tiles * tile_width; 2365 2366 /* minimize x in case it got needlessly big */ 2367 *y += *x / pitch_pixels * tile_height; 2368 *x %= pitch_pixels; 2369 2370 return new_offset; 2371 } 2372 2373 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane) 2374 { 2375 return fb->modifier == DRM_FORMAT_MOD_LINEAR || 2376 is_gen12_ccs_plane(fb, color_plane); 2377 } 2378 2379 static u32 intel_adjust_aligned_offset(int *x, int *y, 2380 const struct drm_framebuffer *fb, 2381 int color_plane, 2382 unsigned int rotation, 2383 unsigned int pitch, 2384 u32 old_offset, u32 new_offset) 2385 { 2386 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2387 unsigned int cpp = fb->format->cpp[color_plane]; 2388 2389 WARN_ON(new_offset > old_offset); 2390 2391 if (!is_surface_linear(fb, color_plane)) { 2392 unsigned int tile_size, tile_width, tile_height; 2393 unsigned int pitch_tiles; 2394 2395 tile_size = intel_tile_size(dev_priv); 2396 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2397 2398 if (drm_rotation_90_or_270(rotation)) { 2399 pitch_tiles = pitch / tile_height; 2400 swap(tile_width, tile_height); 2401 } else { 2402 pitch_tiles = pitch / (tile_width * cpp); 2403 } 2404 2405 intel_adjust_tile_offset(x, y, tile_width, tile_height, 2406 tile_size, pitch_tiles, 2407 old_offset, new_offset); 2408 } else { 2409 old_offset += *y * pitch + *x * cpp; 2410 2411 *y = (old_offset - new_offset) / pitch; 2412 *x = ((old_offset - new_offset) - *y * pitch) / cpp; 2413 } 2414 2415 return new_offset; 2416 } 2417 2418 /* 2419 * Adjust the tile offset by moving the difference into 2420 * the x/y offsets. 2421 */ 2422 static u32 intel_plane_adjust_aligned_offset(int *x, int *y, 2423 const struct intel_plane_state *state, 2424 int color_plane, 2425 u32 old_offset, u32 new_offset) 2426 { 2427 return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane, 2428 state->hw.rotation, 2429 state->color_plane[color_plane].stride, 2430 old_offset, new_offset); 2431 } 2432 2433 /* 2434 * Computes the aligned offset to the base tile and adjusts 2435 * x, y. bytes per pixel is assumed to be a power-of-two. 2436 * 2437 * In the 90/270 rotated case, x and y are assumed 2438 * to be already rotated to match the rotated GTT view, and 2439 * pitch is the tile_height aligned framebuffer height. 2440 * 2441 * This function is used when computing the derived information 2442 * under intel_framebuffer, so using any of that information 2443 * here is not allowed. Anything under drm_framebuffer can be 2444 * used. This is why the user has to pass in the pitch since it 2445 * is specified in the rotated orientation. 2446 */ 2447 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv, 2448 int *x, int *y, 2449 const struct drm_framebuffer *fb, 2450 int color_plane, 2451 unsigned int pitch, 2452 unsigned int rotation, 2453 u32 alignment) 2454 { 2455 unsigned int cpp = fb->format->cpp[color_plane]; 2456 u32 offset, offset_aligned; 2457 2458 if (!is_surface_linear(fb, color_plane)) { 2459 unsigned int tile_size, tile_width, tile_height; 2460 unsigned int tile_rows, tiles, pitch_tiles; 2461 2462 tile_size = intel_tile_size(dev_priv); 2463 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2464 2465 if (drm_rotation_90_or_270(rotation)) { 2466 pitch_tiles = pitch / tile_height; 2467 swap(tile_width, tile_height); 2468 } else { 2469 pitch_tiles = pitch / (tile_width * cpp); 2470 } 2471 2472 tile_rows = *y / tile_height; 2473 *y %= tile_height; 2474 2475 tiles = *x / tile_width; 2476 *x %= tile_width; 2477 2478 offset = (tile_rows * pitch_tiles + tiles) * tile_size; 2479 2480 offset_aligned = offset; 2481 if (alignment) 2482 offset_aligned = rounddown(offset_aligned, alignment); 2483 2484 intel_adjust_tile_offset(x, y, tile_width, tile_height, 2485 tile_size, pitch_tiles, 2486 offset, offset_aligned); 2487 } else { 2488 offset = *y * pitch + *x * cpp; 2489 offset_aligned = offset; 2490 if (alignment) { 2491 offset_aligned = rounddown(offset_aligned, alignment); 2492 *y = (offset % alignment) / pitch; 2493 *x = ((offset % alignment) - *y * pitch) / cpp; 2494 } else { 2495 *y = *x = 0; 2496 } 2497 } 2498 2499 return offset_aligned; 2500 } 2501 2502 static u32 intel_plane_compute_aligned_offset(int *x, int *y, 2503 const struct intel_plane_state *state, 2504 int color_plane) 2505 { 2506 struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane); 2507 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 2508 const struct drm_framebuffer *fb = state->hw.fb; 2509 unsigned int rotation = state->hw.rotation; 2510 int pitch = state->color_plane[color_plane].stride; 2511 u32 alignment; 2512 2513 if (intel_plane->id == PLANE_CURSOR) 2514 alignment = intel_cursor_alignment(dev_priv); 2515 else 2516 alignment = intel_surf_alignment(fb, color_plane); 2517 2518 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane, 2519 pitch, rotation, alignment); 2520 } 2521 2522 /* Convert the fb->offset[] into x/y offsets */ 2523 static int intel_fb_offset_to_xy(int *x, int *y, 2524 const struct drm_framebuffer *fb, 2525 int color_plane) 2526 { 2527 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2528 unsigned int height; 2529 u32 alignment; 2530 2531 if (INTEL_GEN(dev_priv) >= 12 && 2532 is_semiplanar_uv_plane(fb, color_plane)) 2533 alignment = intel_tile_row_size(fb, color_plane); 2534 else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) 2535 alignment = intel_tile_size(dev_priv); 2536 else 2537 alignment = 0; 2538 2539 if (alignment != 0 && fb->offsets[color_plane] % alignment) { 2540 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n", 2541 fb->offsets[color_plane], color_plane); 2542 return -EINVAL; 2543 } 2544 2545 height = drm_framebuffer_plane_height(fb->height, fb, color_plane); 2546 height = ALIGN(height, intel_tile_height(fb, color_plane)); 2547 2548 /* Catch potential overflows early */ 2549 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]), 2550 fb->offsets[color_plane])) { 2551 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n", 2552 fb->offsets[color_plane], fb->pitches[color_plane], 2553 color_plane); 2554 return -ERANGE; 2555 } 2556 2557 *x = 0; 2558 *y = 0; 2559 2560 intel_adjust_aligned_offset(x, y, 2561 fb, color_plane, DRM_MODE_ROTATE_0, 2562 fb->pitches[color_plane], 2563 fb->offsets[color_plane], 0); 2564 2565 return 0; 2566 } 2567 2568 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) 2569 { 2570 switch (fb_modifier) { 2571 case I915_FORMAT_MOD_X_TILED: 2572 return I915_TILING_X; 2573 case I915_FORMAT_MOD_Y_TILED: 2574 case I915_FORMAT_MOD_Y_TILED_CCS: 2575 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 2576 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 2577 return I915_TILING_Y; 2578 default: 2579 return I915_TILING_NONE; 2580 } 2581 } 2582 2583 /* 2584 * From the Sky Lake PRM: 2585 * "The Color Control Surface (CCS) contains the compression status of 2586 * the cache-line pairs. The compression state of the cache-line pair 2587 * is specified by 2 bits in the CCS. Each CCS cache-line represents 2588 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled 2589 * cache-line-pairs. CCS is always Y tiled." 2590 * 2591 * Since cache line pairs refers to horizontally adjacent cache lines, 2592 * each cache line in the CCS corresponds to an area of 32x16 cache 2593 * lines on the main surface. Since each pixel is 4 bytes, this gives 2594 * us a ratio of one byte in the CCS for each 8x16 pixels in the 2595 * main surface. 2596 */ 2597 static const struct drm_format_info skl_ccs_formats[] = { 2598 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 2599 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2600 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 2601 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2602 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 2603 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 2604 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 2605 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 2606 }; 2607 2608 /* 2609 * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the 2610 * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles 2611 * in the main surface. With 4 byte pixels and each Y-tile having dimensions of 2612 * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in 2613 * the main surface. 2614 */ 2615 static const struct drm_format_info gen12_ccs_formats[] = { 2616 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 2617 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2618 .hsub = 1, .vsub = 1, }, 2619 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 2620 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2621 .hsub = 1, .vsub = 1, }, 2622 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 2623 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2624 .hsub = 1, .vsub = 1, .has_alpha = true }, 2625 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 2626 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2627 .hsub = 1, .vsub = 1, .has_alpha = true }, 2628 { .format = DRM_FORMAT_YUYV, .num_planes = 2, 2629 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2630 .hsub = 2, .vsub = 1, .is_yuv = true }, 2631 { .format = DRM_FORMAT_YVYU, .num_planes = 2, 2632 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2633 .hsub = 2, .vsub = 1, .is_yuv = true }, 2634 { .format = DRM_FORMAT_UYVY, .num_planes = 2, 2635 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2636 .hsub = 2, .vsub = 1, .is_yuv = true }, 2637 { .format = DRM_FORMAT_VYUY, .num_planes = 2, 2638 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2639 .hsub = 2, .vsub = 1, .is_yuv = true }, 2640 { .format = DRM_FORMAT_NV12, .num_planes = 4, 2641 .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 }, 2642 .hsub = 2, .vsub = 2, .is_yuv = true }, 2643 { .format = DRM_FORMAT_P010, .num_planes = 4, 2644 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 2645 .hsub = 2, .vsub = 2, .is_yuv = true }, 2646 { .format = DRM_FORMAT_P012, .num_planes = 4, 2647 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 2648 .hsub = 2, .vsub = 2, .is_yuv = true }, 2649 { .format = DRM_FORMAT_P016, .num_planes = 4, 2650 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 2651 .hsub = 2, .vsub = 2, .is_yuv = true }, 2652 }; 2653 2654 static const struct drm_format_info * 2655 lookup_format_info(const struct drm_format_info formats[], 2656 int num_formats, u32 format) 2657 { 2658 int i; 2659 2660 for (i = 0; i < num_formats; i++) { 2661 if (formats[i].format == format) 2662 return &formats[i]; 2663 } 2664 2665 return NULL; 2666 } 2667 2668 static const struct drm_format_info * 2669 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 2670 { 2671 switch (cmd->modifier[0]) { 2672 case I915_FORMAT_MOD_Y_TILED_CCS: 2673 case I915_FORMAT_MOD_Yf_TILED_CCS: 2674 return lookup_format_info(skl_ccs_formats, 2675 ARRAY_SIZE(skl_ccs_formats), 2676 cmd->pixel_format); 2677 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 2678 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 2679 return lookup_format_info(gen12_ccs_formats, 2680 ARRAY_SIZE(gen12_ccs_formats), 2681 cmd->pixel_format); 2682 default: 2683 return NULL; 2684 } 2685 } 2686 2687 bool is_ccs_modifier(u64 modifier) 2688 { 2689 return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || 2690 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || 2691 modifier == I915_FORMAT_MOD_Y_TILED_CCS || 2692 modifier == I915_FORMAT_MOD_Yf_TILED_CCS; 2693 } 2694 2695 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane) 2696 { 2697 return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)], 2698 512) * 64; 2699 } 2700 2701 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 2702 u32 pixel_format, u64 modifier) 2703 { 2704 struct intel_crtc *crtc; 2705 struct intel_plane *plane; 2706 2707 /* 2708 * We assume the primary plane for pipe A has 2709 * the highest stride limits of them all. 2710 */ 2711 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); 2712 if (!crtc) 2713 return 0; 2714 2715 plane = to_intel_plane(crtc->base.primary); 2716 2717 return plane->max_stride(plane, pixel_format, modifier, 2718 DRM_MODE_ROTATE_0); 2719 } 2720 2721 static 2722 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv, 2723 u32 pixel_format, u64 modifier) 2724 { 2725 /* 2726 * Arbitrary limit for gen4+ chosen to match the 2727 * render engine max stride. 2728 * 2729 * The new CCS hash mode makes remapping impossible 2730 */ 2731 if (!is_ccs_modifier(modifier)) { 2732 if (INTEL_GEN(dev_priv) >= 7) 2733 return 256*1024; 2734 else if (INTEL_GEN(dev_priv) >= 4) 2735 return 128*1024; 2736 } 2737 2738 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier); 2739 } 2740 2741 static u32 2742 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) 2743 { 2744 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2745 u32 tile_width; 2746 2747 if (is_surface_linear(fb, color_plane)) { 2748 u32 max_stride = intel_plane_fb_max_stride(dev_priv, 2749 fb->format->format, 2750 fb->modifier); 2751 2752 /* 2753 * To make remapping with linear generally feasible 2754 * we need the stride to be page aligned. 2755 */ 2756 if (fb->pitches[color_plane] > max_stride && 2757 !is_ccs_modifier(fb->modifier)) 2758 return intel_tile_size(dev_priv); 2759 else 2760 return 64; 2761 } 2762 2763 tile_width = intel_tile_width_bytes(fb, color_plane); 2764 if (is_ccs_modifier(fb->modifier)) { 2765 /* 2766 * Display WA #0531: skl,bxt,kbl,glk 2767 * 2768 * Render decompression and plane width > 3840 2769 * combined with horizontal panning requires the 2770 * plane stride to be a multiple of 4. We'll just 2771 * require the entire fb to accommodate that to avoid 2772 * potential runtime errors at plane configuration time. 2773 */ 2774 if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840) 2775 tile_width *= 4; 2776 /* 2777 * The main surface pitch must be padded to a multiple of four 2778 * tile widths. 2779 */ 2780 else if (INTEL_GEN(dev_priv) >= 12) 2781 tile_width *= 4; 2782 } 2783 return tile_width; 2784 } 2785 2786 bool intel_plane_can_remap(const struct intel_plane_state *plane_state) 2787 { 2788 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 2789 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 2790 const struct drm_framebuffer *fb = plane_state->hw.fb; 2791 int i; 2792 2793 /* We don't want to deal with remapping with cursors */ 2794 if (plane->id == PLANE_CURSOR) 2795 return false; 2796 2797 /* 2798 * The display engine limits already match/exceed the 2799 * render engine limits, so not much point in remapping. 2800 * Would also need to deal with the fence POT alignment 2801 * and gen2 2KiB GTT tile size. 2802 */ 2803 if (INTEL_GEN(dev_priv) < 4) 2804 return false; 2805 2806 /* 2807 * The new CCS hash mode isn't compatible with remapping as 2808 * the virtual address of the pages affects the compressed data. 2809 */ 2810 if (is_ccs_modifier(fb->modifier)) 2811 return false; 2812 2813 /* Linear needs a page aligned stride for remapping */ 2814 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) { 2815 unsigned int alignment = intel_tile_size(dev_priv) - 1; 2816 2817 for (i = 0; i < fb->format->num_planes; i++) { 2818 if (fb->pitches[i] & alignment) 2819 return false; 2820 } 2821 } 2822 2823 return true; 2824 } 2825 2826 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) 2827 { 2828 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 2829 const struct drm_framebuffer *fb = plane_state->hw.fb; 2830 unsigned int rotation = plane_state->hw.rotation; 2831 u32 stride, max_stride; 2832 2833 /* 2834 * No remapping for invisible planes since we don't have 2835 * an actual source viewport to remap. 2836 */ 2837 if (!plane_state->uapi.visible) 2838 return false; 2839 2840 if (!intel_plane_can_remap(plane_state)) 2841 return false; 2842 2843 /* 2844 * FIXME: aux plane limits on gen9+ are 2845 * unclear in Bspec, for now no checking. 2846 */ 2847 stride = intel_fb_pitch(fb, 0, rotation); 2848 max_stride = plane->max_stride(plane, fb->format->format, 2849 fb->modifier, rotation); 2850 2851 return stride > max_stride; 2852 } 2853 2854 static void 2855 intel_fb_plane_get_subsampling(int *hsub, int *vsub, 2856 const struct drm_framebuffer *fb, 2857 int color_plane) 2858 { 2859 int main_plane; 2860 2861 if (color_plane == 0) { 2862 *hsub = 1; 2863 *vsub = 1; 2864 2865 return; 2866 } 2867 2868 /* 2869 * TODO: Deduct the subsampling from the char block for all CCS 2870 * formats and planes. 2871 */ 2872 if (!is_gen12_ccs_plane(fb, color_plane)) { 2873 *hsub = fb->format->hsub; 2874 *vsub = fb->format->vsub; 2875 2876 return; 2877 } 2878 2879 main_plane = ccs_to_main_plane(fb, color_plane); 2880 *hsub = drm_format_info_block_width(fb->format, color_plane) / 2881 drm_format_info_block_width(fb->format, main_plane); 2882 2883 /* 2884 * The min stride check in the core framebuffer_check() function 2885 * assumes that format->hsub applies to every plane except for the 2886 * first plane. That's incorrect for the CCS AUX plane of the first 2887 * plane, but for the above check to pass we must define the block 2888 * width with that subsampling applied to it. Adjust the width here 2889 * accordingly, so we can calculate the actual subsampling factor. 2890 */ 2891 if (main_plane == 0) 2892 *hsub *= fb->format->hsub; 2893 2894 *vsub = 32; 2895 } 2896 static int 2897 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y) 2898 { 2899 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2900 int main_plane; 2901 int hsub, vsub; 2902 int tile_width, tile_height; 2903 int ccs_x, ccs_y; 2904 int main_x, main_y; 2905 2906 if (!is_ccs_plane(fb, ccs_plane)) 2907 return 0; 2908 2909 intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height); 2910 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); 2911 2912 tile_width *= hsub; 2913 tile_height *= vsub; 2914 2915 ccs_x = (x * hsub) % tile_width; 2916 ccs_y = (y * vsub) % tile_height; 2917 2918 main_plane = ccs_to_main_plane(fb, ccs_plane); 2919 main_x = intel_fb->normal[main_plane].x % tile_width; 2920 main_y = intel_fb->normal[main_plane].y % tile_height; 2921 2922 /* 2923 * CCS doesn't have its own x/y offset register, so the intra CCS tile 2924 * x/y offsets must match between CCS and the main surface. 2925 */ 2926 if (main_x != ccs_x || main_y != ccs_y) { 2927 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n", 2928 main_x, main_y, 2929 ccs_x, ccs_y, 2930 intel_fb->normal[main_plane].x, 2931 intel_fb->normal[main_plane].y, 2932 x, y); 2933 return -EINVAL; 2934 } 2935 2936 return 0; 2937 } 2938 2939 static void 2940 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane) 2941 { 2942 int main_plane = is_ccs_plane(fb, color_plane) ? 2943 ccs_to_main_plane(fb, color_plane) : 0; 2944 int main_hsub, main_vsub; 2945 int hsub, vsub; 2946 2947 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane); 2948 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane); 2949 *w = fb->width / main_hsub / hsub; 2950 *h = fb->height / main_vsub / vsub; 2951 } 2952 2953 /* 2954 * Setup the rotated view for an FB plane and return the size the GTT mapping 2955 * requires for this view. 2956 */ 2957 static u32 2958 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info, 2959 u32 gtt_offset_rotated, int x, int y, 2960 unsigned int width, unsigned int height, 2961 unsigned int tile_size, 2962 unsigned int tile_width, unsigned int tile_height, 2963 struct drm_framebuffer *fb) 2964 { 2965 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2966 struct intel_rotation_info *rot_info = &intel_fb->rot_info; 2967 unsigned int pitch_tiles; 2968 struct drm_rect r; 2969 2970 /* Y or Yf modifiers required for 90/270 rotation */ 2971 if (fb->modifier != I915_FORMAT_MOD_Y_TILED && 2972 fb->modifier != I915_FORMAT_MOD_Yf_TILED) 2973 return 0; 2974 2975 if (WARN_ON(plane >= ARRAY_SIZE(rot_info->plane))) 2976 return 0; 2977 2978 rot_info->plane[plane] = *plane_info; 2979 2980 intel_fb->rotated[plane].pitch = plane_info->height * tile_height; 2981 2982 /* rotate the x/y offsets to match the GTT view */ 2983 drm_rect_init(&r, x, y, width, height); 2984 drm_rect_rotate(&r, 2985 plane_info->width * tile_width, 2986 plane_info->height * tile_height, 2987 DRM_MODE_ROTATE_270); 2988 x = r.x1; 2989 y = r.y1; 2990 2991 /* rotate the tile dimensions to match the GTT view */ 2992 pitch_tiles = intel_fb->rotated[plane].pitch / tile_height; 2993 swap(tile_width, tile_height); 2994 2995 /* 2996 * We only keep the x/y offsets, so push all of the 2997 * gtt offset into the x/y offsets. 2998 */ 2999 intel_adjust_tile_offset(&x, &y, 3000 tile_width, tile_height, 3001 tile_size, pitch_tiles, 3002 gtt_offset_rotated * tile_size, 0); 3003 3004 /* 3005 * First pixel of the framebuffer from 3006 * the start of the rotated gtt mapping. 3007 */ 3008 intel_fb->rotated[plane].x = x; 3009 intel_fb->rotated[plane].y = y; 3010 3011 return plane_info->width * plane_info->height; 3012 } 3013 3014 static int 3015 intel_fill_fb_info(struct drm_i915_private *dev_priv, 3016 struct drm_framebuffer *fb) 3017 { 3018 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 3019 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 3020 u32 gtt_offset_rotated = 0; 3021 unsigned int max_size = 0; 3022 int i, num_planes = fb->format->num_planes; 3023 unsigned int tile_size = intel_tile_size(dev_priv); 3024 3025 for (i = 0; i < num_planes; i++) { 3026 unsigned int width, height; 3027 unsigned int cpp, size; 3028 u32 offset; 3029 int x, y; 3030 int ret; 3031 3032 cpp = fb->format->cpp[i]; 3033 intel_fb_plane_dims(&width, &height, fb, i); 3034 3035 ret = intel_fb_offset_to_xy(&x, &y, fb, i); 3036 if (ret) { 3037 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", 3038 i, fb->offsets[i]); 3039 return ret; 3040 } 3041 3042 ret = intel_fb_check_ccs_xy(fb, i, x, y); 3043 if (ret) 3044 return ret; 3045 3046 /* 3047 * The fence (if used) is aligned to the start of the object 3048 * so having the framebuffer wrap around across the edge of the 3049 * fenced region doesn't really work. We have no API to configure 3050 * the fence start offset within the object (nor could we probably 3051 * on gen2/3). So it's just easier if we just require that the 3052 * fb layout agrees with the fence layout. We already check that the 3053 * fb stride matches the fence stride elsewhere. 3054 */ 3055 if (i == 0 && i915_gem_object_is_tiled(obj) && 3056 (x + width) * cpp > fb->pitches[i]) { 3057 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", 3058 i, fb->offsets[i]); 3059 return -EINVAL; 3060 } 3061 3062 /* 3063 * First pixel of the framebuffer from 3064 * the start of the normal gtt mapping. 3065 */ 3066 intel_fb->normal[i].x = x; 3067 intel_fb->normal[i].y = y; 3068 3069 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i, 3070 fb->pitches[i], 3071 DRM_MODE_ROTATE_0, 3072 tile_size); 3073 offset /= tile_size; 3074 3075 if (!is_surface_linear(fb, i)) { 3076 struct intel_remapped_plane_info plane_info; 3077 unsigned int tile_width, tile_height; 3078 3079 intel_tile_dims(fb, i, &tile_width, &tile_height); 3080 3081 plane_info.offset = offset; 3082 plane_info.stride = DIV_ROUND_UP(fb->pitches[i], 3083 tile_width * cpp); 3084 plane_info.width = DIV_ROUND_UP(x + width, tile_width); 3085 plane_info.height = DIV_ROUND_UP(y + height, 3086 tile_height); 3087 3088 /* how many tiles does this plane need */ 3089 size = plane_info.stride * plane_info.height; 3090 /* 3091 * If the plane isn't horizontally tile aligned, 3092 * we need one more tile. 3093 */ 3094 if (x != 0) 3095 size++; 3096 3097 gtt_offset_rotated += 3098 setup_fb_rotation(i, &plane_info, 3099 gtt_offset_rotated, 3100 x, y, width, height, 3101 tile_size, 3102 tile_width, tile_height, 3103 fb); 3104 } else { 3105 size = DIV_ROUND_UP((y + height) * fb->pitches[i] + 3106 x * cpp, tile_size); 3107 } 3108 3109 /* how many tiles in total needed in the bo */ 3110 max_size = max(max_size, offset + size); 3111 } 3112 3113 if (mul_u32_u32(max_size, tile_size) > obj->base.size) { 3114 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n", 3115 mul_u32_u32(max_size, tile_size), obj->base.size); 3116 return -EINVAL; 3117 } 3118 3119 return 0; 3120 } 3121 3122 static void 3123 intel_plane_remap_gtt(struct intel_plane_state *plane_state) 3124 { 3125 struct drm_i915_private *dev_priv = 3126 to_i915(plane_state->uapi.plane->dev); 3127 struct drm_framebuffer *fb = plane_state->hw.fb; 3128 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 3129 struct intel_rotation_info *info = &plane_state->view.rotated; 3130 unsigned int rotation = plane_state->hw.rotation; 3131 int i, num_planes = fb->format->num_planes; 3132 unsigned int tile_size = intel_tile_size(dev_priv); 3133 unsigned int src_x, src_y; 3134 unsigned int src_w, src_h; 3135 u32 gtt_offset = 0; 3136 3137 memset(&plane_state->view, 0, sizeof(plane_state->view)); 3138 plane_state->view.type = drm_rotation_90_or_270(rotation) ? 3139 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED; 3140 3141 src_x = plane_state->uapi.src.x1 >> 16; 3142 src_y = plane_state->uapi.src.y1 >> 16; 3143 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 3144 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 3145 3146 WARN_ON(is_ccs_modifier(fb->modifier)); 3147 3148 /* Make src coordinates relative to the viewport */ 3149 drm_rect_translate(&plane_state->uapi.src, 3150 -(src_x << 16), -(src_y << 16)); 3151 3152 /* Rotate src coordinates to match rotated GTT view */ 3153 if (drm_rotation_90_or_270(rotation)) 3154 drm_rect_rotate(&plane_state->uapi.src, 3155 src_w << 16, src_h << 16, 3156 DRM_MODE_ROTATE_270); 3157 3158 for (i = 0; i < num_planes; i++) { 3159 unsigned int hsub = i ? fb->format->hsub : 1; 3160 unsigned int vsub = i ? fb->format->vsub : 1; 3161 unsigned int cpp = fb->format->cpp[i]; 3162 unsigned int tile_width, tile_height; 3163 unsigned int width, height; 3164 unsigned int pitch_tiles; 3165 unsigned int x, y; 3166 u32 offset; 3167 3168 intel_tile_dims(fb, i, &tile_width, &tile_height); 3169 3170 x = src_x / hsub; 3171 y = src_y / vsub; 3172 width = src_w / hsub; 3173 height = src_h / vsub; 3174 3175 /* 3176 * First pixel of the src viewport from the 3177 * start of the normal gtt mapping. 3178 */ 3179 x += intel_fb->normal[i].x; 3180 y += intel_fb->normal[i].y; 3181 3182 offset = intel_compute_aligned_offset(dev_priv, &x, &y, 3183 fb, i, fb->pitches[i], 3184 DRM_MODE_ROTATE_0, tile_size); 3185 offset /= tile_size; 3186 3187 WARN_ON(i >= ARRAY_SIZE(info->plane)); 3188 info->plane[i].offset = offset; 3189 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], 3190 tile_width * cpp); 3191 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); 3192 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); 3193 3194 if (drm_rotation_90_or_270(rotation)) { 3195 struct drm_rect r; 3196 3197 /* rotate the x/y offsets to match the GTT view */ 3198 drm_rect_init(&r, x, y, width, height); 3199 drm_rect_rotate(&r, 3200 info->plane[i].width * tile_width, 3201 info->plane[i].height * tile_height, 3202 DRM_MODE_ROTATE_270); 3203 x = r.x1; 3204 y = r.y1; 3205 3206 pitch_tiles = info->plane[i].height; 3207 plane_state->color_plane[i].stride = pitch_tiles * tile_height; 3208 3209 /* rotate the tile dimensions to match the GTT view */ 3210 swap(tile_width, tile_height); 3211 } else { 3212 pitch_tiles = info->plane[i].width; 3213 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp; 3214 } 3215 3216 /* 3217 * We only keep the x/y offsets, so push all of the 3218 * gtt offset into the x/y offsets. 3219 */ 3220 intel_adjust_tile_offset(&x, &y, 3221 tile_width, tile_height, 3222 tile_size, pitch_tiles, 3223 gtt_offset * tile_size, 0); 3224 3225 gtt_offset += info->plane[i].width * info->plane[i].height; 3226 3227 plane_state->color_plane[i].offset = 0; 3228 plane_state->color_plane[i].x = x; 3229 plane_state->color_plane[i].y = y; 3230 } 3231 } 3232 3233 static int 3234 intel_plane_compute_gtt(struct intel_plane_state *plane_state) 3235 { 3236 const struct intel_framebuffer *fb = 3237 to_intel_framebuffer(plane_state->hw.fb); 3238 unsigned int rotation = plane_state->hw.rotation; 3239 int i, num_planes; 3240 3241 if (!fb) 3242 return 0; 3243 3244 num_planes = fb->base.format->num_planes; 3245 3246 if (intel_plane_needs_remap(plane_state)) { 3247 intel_plane_remap_gtt(plane_state); 3248 3249 /* 3250 * Sometimes even remapping can't overcome 3251 * the stride limitations :( Can happen with 3252 * big plane sizes and suitably misaligned 3253 * offsets. 3254 */ 3255 return intel_plane_check_stride(plane_state); 3256 } 3257 3258 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation); 3259 3260 for (i = 0; i < num_planes; i++) { 3261 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation); 3262 plane_state->color_plane[i].offset = 0; 3263 3264 if (drm_rotation_90_or_270(rotation)) { 3265 plane_state->color_plane[i].x = fb->rotated[i].x; 3266 plane_state->color_plane[i].y = fb->rotated[i].y; 3267 } else { 3268 plane_state->color_plane[i].x = fb->normal[i].x; 3269 plane_state->color_plane[i].y = fb->normal[i].y; 3270 } 3271 } 3272 3273 /* Rotate src coordinates to match rotated GTT view */ 3274 if (drm_rotation_90_or_270(rotation)) 3275 drm_rect_rotate(&plane_state->uapi.src, 3276 fb->base.width << 16, fb->base.height << 16, 3277 DRM_MODE_ROTATE_270); 3278 3279 return intel_plane_check_stride(plane_state); 3280 } 3281 3282 static int i9xx_format_to_fourcc(int format) 3283 { 3284 switch (format) { 3285 case DISPPLANE_8BPP: 3286 return DRM_FORMAT_C8; 3287 case DISPPLANE_BGRA555: 3288 return DRM_FORMAT_ARGB1555; 3289 case DISPPLANE_BGRX555: 3290 return DRM_FORMAT_XRGB1555; 3291 case DISPPLANE_BGRX565: 3292 return DRM_FORMAT_RGB565; 3293 default: 3294 case DISPPLANE_BGRX888: 3295 return DRM_FORMAT_XRGB8888; 3296 case DISPPLANE_RGBX888: 3297 return DRM_FORMAT_XBGR8888; 3298 case DISPPLANE_BGRA888: 3299 return DRM_FORMAT_ARGB8888; 3300 case DISPPLANE_RGBA888: 3301 return DRM_FORMAT_ABGR8888; 3302 case DISPPLANE_BGRX101010: 3303 return DRM_FORMAT_XRGB2101010; 3304 case DISPPLANE_RGBX101010: 3305 return DRM_FORMAT_XBGR2101010; 3306 case DISPPLANE_BGRA101010: 3307 return DRM_FORMAT_ARGB2101010; 3308 case DISPPLANE_RGBA101010: 3309 return DRM_FORMAT_ABGR2101010; 3310 case DISPPLANE_RGBX161616: 3311 return DRM_FORMAT_XBGR16161616F; 3312 } 3313 } 3314 3315 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 3316 { 3317 switch (format) { 3318 case PLANE_CTL_FORMAT_RGB_565: 3319 return DRM_FORMAT_RGB565; 3320 case PLANE_CTL_FORMAT_NV12: 3321 return DRM_FORMAT_NV12; 3322 case PLANE_CTL_FORMAT_P010: 3323 return DRM_FORMAT_P010; 3324 case PLANE_CTL_FORMAT_P012: 3325 return DRM_FORMAT_P012; 3326 case PLANE_CTL_FORMAT_P016: 3327 return DRM_FORMAT_P016; 3328 case PLANE_CTL_FORMAT_Y210: 3329 return DRM_FORMAT_Y210; 3330 case PLANE_CTL_FORMAT_Y212: 3331 return DRM_FORMAT_Y212; 3332 case PLANE_CTL_FORMAT_Y216: 3333 return DRM_FORMAT_Y216; 3334 case PLANE_CTL_FORMAT_Y410: 3335 return DRM_FORMAT_XVYU2101010; 3336 case PLANE_CTL_FORMAT_Y412: 3337 return DRM_FORMAT_XVYU12_16161616; 3338 case PLANE_CTL_FORMAT_Y416: 3339 return DRM_FORMAT_XVYU16161616; 3340 default: 3341 case PLANE_CTL_FORMAT_XRGB_8888: 3342 if (rgb_order) { 3343 if (alpha) 3344 return DRM_FORMAT_ABGR8888; 3345 else 3346 return DRM_FORMAT_XBGR8888; 3347 } else { 3348 if (alpha) 3349 return DRM_FORMAT_ARGB8888; 3350 else 3351 return DRM_FORMAT_XRGB8888; 3352 } 3353 case PLANE_CTL_FORMAT_XRGB_2101010: 3354 if (rgb_order) { 3355 if (alpha) 3356 return DRM_FORMAT_ABGR2101010; 3357 else 3358 return DRM_FORMAT_XBGR2101010; 3359 } else { 3360 if (alpha) 3361 return DRM_FORMAT_ARGB2101010; 3362 else 3363 return DRM_FORMAT_XRGB2101010; 3364 } 3365 case PLANE_CTL_FORMAT_XRGB_16161616F: 3366 if (rgb_order) { 3367 if (alpha) 3368 return DRM_FORMAT_ABGR16161616F; 3369 else 3370 return DRM_FORMAT_XBGR16161616F; 3371 } else { 3372 if (alpha) 3373 return DRM_FORMAT_ARGB16161616F; 3374 else 3375 return DRM_FORMAT_XRGB16161616F; 3376 } 3377 } 3378 } 3379 3380 static bool 3381 intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 3382 struct intel_initial_plane_config *plane_config) 3383 { 3384 struct drm_device *dev = crtc->base.dev; 3385 struct drm_i915_private *dev_priv = to_i915(dev); 3386 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 3387 struct drm_framebuffer *fb = &plane_config->fb->base; 3388 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); 3389 u32 size_aligned = round_up(plane_config->base + plane_config->size, 3390 PAGE_SIZE); 3391 struct drm_i915_gem_object *obj; 3392 bool ret = false; 3393 3394 size_aligned -= base_aligned; 3395 3396 if (plane_config->size == 0) 3397 return false; 3398 3399 /* If the FB is too big, just don't use it since fbdev is not very 3400 * important and we should probably use that space with FBC or other 3401 * features. */ 3402 if (size_aligned * 2 > dev_priv->stolen_usable_size) 3403 return false; 3404 3405 switch (fb->modifier) { 3406 case DRM_FORMAT_MOD_LINEAR: 3407 case I915_FORMAT_MOD_X_TILED: 3408 case I915_FORMAT_MOD_Y_TILED: 3409 break; 3410 default: 3411 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n", 3412 fb->modifier); 3413 return false; 3414 } 3415 3416 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv, 3417 base_aligned, 3418 base_aligned, 3419 size_aligned); 3420 if (IS_ERR(obj)) 3421 return false; 3422 3423 switch (plane_config->tiling) { 3424 case I915_TILING_NONE: 3425 break; 3426 case I915_TILING_X: 3427 case I915_TILING_Y: 3428 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling; 3429 break; 3430 default: 3431 MISSING_CASE(plane_config->tiling); 3432 goto out; 3433 } 3434 3435 mode_cmd.pixel_format = fb->format->format; 3436 mode_cmd.width = fb->width; 3437 mode_cmd.height = fb->height; 3438 mode_cmd.pitches[0] = fb->pitches[0]; 3439 mode_cmd.modifier[0] = fb->modifier; 3440 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 3441 3442 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) { 3443 DRM_DEBUG_KMS("intel fb init failed\n"); 3444 goto out; 3445 } 3446 3447 3448 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); 3449 ret = true; 3450 out: 3451 i915_gem_object_put(obj); 3452 return ret; 3453 } 3454 3455 static void 3456 intel_set_plane_visible(struct intel_crtc_state *crtc_state, 3457 struct intel_plane_state *plane_state, 3458 bool visible) 3459 { 3460 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 3461 3462 plane_state->uapi.visible = visible; 3463 3464 if (visible) 3465 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base); 3466 else 3467 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); 3468 } 3469 3470 static void fixup_active_planes(struct intel_crtc_state *crtc_state) 3471 { 3472 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 3473 struct drm_plane *plane; 3474 3475 /* 3476 * Active_planes aliases if multiple "primary" or cursor planes 3477 * have been used on the same (or wrong) pipe. plane_mask uses 3478 * unique ids, hence we can use that to reconstruct active_planes. 3479 */ 3480 crtc_state->active_planes = 0; 3481 3482 drm_for_each_plane_mask(plane, &dev_priv->drm, 3483 crtc_state->uapi.plane_mask) 3484 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 3485 } 3486 3487 static void intel_plane_disable_noatomic(struct intel_crtc *crtc, 3488 struct intel_plane *plane) 3489 { 3490 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3491 struct intel_crtc_state *crtc_state = 3492 to_intel_crtc_state(crtc->base.state); 3493 struct intel_plane_state *plane_state = 3494 to_intel_plane_state(plane->base.state); 3495 3496 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 3497 plane->base.base.id, plane->base.name, 3498 crtc->base.base.id, crtc->base.name); 3499 3500 intel_set_plane_visible(crtc_state, plane_state, false); 3501 fixup_active_planes(crtc_state); 3502 crtc_state->data_rate[plane->id] = 0; 3503 crtc_state->min_cdclk[plane->id] = 0; 3504 3505 if (plane->id == PLANE_PRIMARY) 3506 hsw_disable_ips(crtc_state); 3507 3508 /* 3509 * Vblank time updates from the shadow to live plane control register 3510 * are blocked if the memory self-refresh mode is active at that 3511 * moment. So to make sure the plane gets truly disabled, disable 3512 * first the self-refresh mode. The self-refresh enable bit in turn 3513 * will be checked/applied by the HW only at the next frame start 3514 * event which is after the vblank start event, so we need to have a 3515 * wait-for-vblank between disabling the plane and the pipe. 3516 */ 3517 if (HAS_GMCH(dev_priv) && 3518 intel_set_memory_cxsr(dev_priv, false)) 3519 intel_wait_for_vblank(dev_priv, crtc->pipe); 3520 3521 /* 3522 * Gen2 reports pipe underruns whenever all planes are disabled. 3523 * So disable underrun reporting before all the planes get disabled. 3524 */ 3525 if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes) 3526 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 3527 3528 intel_disable_plane(plane, crtc_state); 3529 } 3530 3531 static struct intel_frontbuffer * 3532 to_intel_frontbuffer(struct drm_framebuffer *fb) 3533 { 3534 return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL; 3535 } 3536 3537 static void 3538 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 3539 struct intel_initial_plane_config *plane_config) 3540 { 3541 struct drm_device *dev = intel_crtc->base.dev; 3542 struct drm_i915_private *dev_priv = to_i915(dev); 3543 struct drm_crtc *c; 3544 struct drm_plane *primary = intel_crtc->base.primary; 3545 struct drm_plane_state *plane_state = primary->state; 3546 struct intel_plane *intel_plane = to_intel_plane(primary); 3547 struct intel_plane_state *intel_state = 3548 to_intel_plane_state(plane_state); 3549 struct drm_framebuffer *fb; 3550 3551 if (!plane_config->fb) 3552 return; 3553 3554 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 3555 fb = &plane_config->fb->base; 3556 goto valid_fb; 3557 } 3558 3559 kfree(plane_config->fb); 3560 3561 /* 3562 * Failed to alloc the obj, check to see if we should share 3563 * an fb with another CRTC instead 3564 */ 3565 for_each_crtc(dev, c) { 3566 struct intel_plane_state *state; 3567 3568 if (c == &intel_crtc->base) 3569 continue; 3570 3571 if (!to_intel_crtc(c)->active) 3572 continue; 3573 3574 state = to_intel_plane_state(c->primary->state); 3575 if (!state->vma) 3576 continue; 3577 3578 if (intel_plane_ggtt_offset(state) == plane_config->base) { 3579 fb = state->hw.fb; 3580 drm_framebuffer_get(fb); 3581 goto valid_fb; 3582 } 3583 } 3584 3585 /* 3586 * We've failed to reconstruct the BIOS FB. Current display state 3587 * indicates that the primary plane is visible, but has a NULL FB, 3588 * which will lead to problems later if we don't fix it up. The 3589 * simplest solution is to just disable the primary plane now and 3590 * pretend the BIOS never had it enabled. 3591 */ 3592 intel_plane_disable_noatomic(intel_crtc, intel_plane); 3593 3594 return; 3595 3596 valid_fb: 3597 intel_state->hw.rotation = plane_config->rotation; 3598 intel_fill_fb_ggtt_view(&intel_state->view, fb, 3599 intel_state->hw.rotation); 3600 intel_state->color_plane[0].stride = 3601 intel_fb_pitch(fb, 0, intel_state->hw.rotation); 3602 3603 intel_state->vma = 3604 intel_pin_and_fence_fb_obj(fb, 3605 &intel_state->view, 3606 intel_plane_uses_fence(intel_state), 3607 &intel_state->flags); 3608 if (IS_ERR(intel_state->vma)) { 3609 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n", 3610 intel_crtc->pipe, PTR_ERR(intel_state->vma)); 3611 3612 intel_state->vma = NULL; 3613 drm_framebuffer_put(fb); 3614 return; 3615 } 3616 3617 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); 3618 3619 plane_state->src_x = 0; 3620 plane_state->src_y = 0; 3621 plane_state->src_w = fb->width << 16; 3622 plane_state->src_h = fb->height << 16; 3623 3624 plane_state->crtc_x = 0; 3625 plane_state->crtc_y = 0; 3626 plane_state->crtc_w = fb->width; 3627 plane_state->crtc_h = fb->height; 3628 3629 intel_state->uapi.src = drm_plane_state_src(plane_state); 3630 intel_state->uapi.dst = drm_plane_state_dest(plane_state); 3631 3632 if (plane_config->tiling) 3633 dev_priv->preserve_bios_swizzle = true; 3634 3635 plane_state->fb = fb; 3636 plane_state->crtc = &intel_crtc->base; 3637 intel_plane_copy_uapi_to_hw_state(intel_state, intel_state); 3638 3639 atomic_or(to_intel_plane(primary)->frontbuffer_bit, 3640 &to_intel_frontbuffer(fb)->bits); 3641 } 3642 3643 static int skl_max_plane_width(const struct drm_framebuffer *fb, 3644 int color_plane, 3645 unsigned int rotation) 3646 { 3647 int cpp = fb->format->cpp[color_plane]; 3648 3649 switch (fb->modifier) { 3650 case DRM_FORMAT_MOD_LINEAR: 3651 case I915_FORMAT_MOD_X_TILED: 3652 /* 3653 * Validated limit is 4k, but has 5k should 3654 * work apart from the following features: 3655 * - Ytile (already limited to 4k) 3656 * - FP16 (already limited to 4k) 3657 * - render compression (already limited to 4k) 3658 * - KVMR sprite and cursor (don't care) 3659 * - horizontal panning (TODO verify this) 3660 * - pipe and plane scaling (TODO verify this) 3661 */ 3662 if (cpp == 8) 3663 return 4096; 3664 else 3665 return 5120; 3666 case I915_FORMAT_MOD_Y_TILED_CCS: 3667 case I915_FORMAT_MOD_Yf_TILED_CCS: 3668 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 3669 /* FIXME AUX plane? */ 3670 case I915_FORMAT_MOD_Y_TILED: 3671 case I915_FORMAT_MOD_Yf_TILED: 3672 if (cpp == 8) 3673 return 2048; 3674 else 3675 return 4096; 3676 default: 3677 MISSING_CASE(fb->modifier); 3678 return 2048; 3679 } 3680 } 3681 3682 static int glk_max_plane_width(const struct drm_framebuffer *fb, 3683 int color_plane, 3684 unsigned int rotation) 3685 { 3686 int cpp = fb->format->cpp[color_plane]; 3687 3688 switch (fb->modifier) { 3689 case DRM_FORMAT_MOD_LINEAR: 3690 case I915_FORMAT_MOD_X_TILED: 3691 if (cpp == 8) 3692 return 4096; 3693 else 3694 return 5120; 3695 case I915_FORMAT_MOD_Y_TILED_CCS: 3696 case I915_FORMAT_MOD_Yf_TILED_CCS: 3697 /* FIXME AUX plane? */ 3698 case I915_FORMAT_MOD_Y_TILED: 3699 case I915_FORMAT_MOD_Yf_TILED: 3700 if (cpp == 8) 3701 return 2048; 3702 else 3703 return 5120; 3704 default: 3705 MISSING_CASE(fb->modifier); 3706 return 2048; 3707 } 3708 } 3709 3710 static int icl_max_plane_width(const struct drm_framebuffer *fb, 3711 int color_plane, 3712 unsigned int rotation) 3713 { 3714 return 5120; 3715 } 3716 3717 static int skl_max_plane_height(void) 3718 { 3719 return 4096; 3720 } 3721 3722 static int icl_max_plane_height(void) 3723 { 3724 return 4320; 3725 } 3726 3727 static bool 3728 skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, 3729 int main_x, int main_y, u32 main_offset, 3730 int ccs_plane) 3731 { 3732 const struct drm_framebuffer *fb = plane_state->hw.fb; 3733 int aux_x = plane_state->color_plane[ccs_plane].x; 3734 int aux_y = plane_state->color_plane[ccs_plane].y; 3735 u32 aux_offset = plane_state->color_plane[ccs_plane].offset; 3736 u32 alignment = intel_surf_alignment(fb, ccs_plane); 3737 int hsub; 3738 int vsub; 3739 3740 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); 3741 while (aux_offset >= main_offset && aux_y <= main_y) { 3742 int x, y; 3743 3744 if (aux_x == main_x && aux_y == main_y) 3745 break; 3746 3747 if (aux_offset == 0) 3748 break; 3749 3750 x = aux_x / hsub; 3751 y = aux_y / vsub; 3752 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, 3753 plane_state, 3754 ccs_plane, 3755 aux_offset, 3756 aux_offset - 3757 alignment); 3758 aux_x = x * hsub + aux_x % hsub; 3759 aux_y = y * vsub + aux_y % vsub; 3760 } 3761 3762 if (aux_x != main_x || aux_y != main_y) 3763 return false; 3764 3765 plane_state->color_plane[ccs_plane].offset = aux_offset; 3766 plane_state->color_plane[ccs_plane].x = aux_x; 3767 plane_state->color_plane[ccs_plane].y = aux_y; 3768 3769 return true; 3770 } 3771 3772 static int skl_check_main_surface(struct intel_plane_state *plane_state) 3773 { 3774 struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); 3775 const struct drm_framebuffer *fb = plane_state->hw.fb; 3776 unsigned int rotation = plane_state->hw.rotation; 3777 int x = plane_state->uapi.src.x1 >> 16; 3778 int y = plane_state->uapi.src.y1 >> 16; 3779 int w = drm_rect_width(&plane_state->uapi.src) >> 16; 3780 int h = drm_rect_height(&plane_state->uapi.src) >> 16; 3781 int max_width; 3782 int max_height; 3783 u32 alignment; 3784 u32 offset; 3785 int aux_plane = intel_main_to_aux_plane(fb, 0); 3786 u32 aux_offset = plane_state->color_plane[aux_plane].offset; 3787 3788 if (INTEL_GEN(dev_priv) >= 11) 3789 max_width = icl_max_plane_width(fb, 0, rotation); 3790 else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 3791 max_width = glk_max_plane_width(fb, 0, rotation); 3792 else 3793 max_width = skl_max_plane_width(fb, 0, rotation); 3794 3795 if (INTEL_GEN(dev_priv) >= 11) 3796 max_height = icl_max_plane_height(); 3797 else 3798 max_height = skl_max_plane_height(); 3799 3800 if (w > max_width || h > max_height) { 3801 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n", 3802 w, h, max_width, max_height); 3803 return -EINVAL; 3804 } 3805 3806 intel_add_fb_offsets(&x, &y, plane_state, 0); 3807 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0); 3808 alignment = intel_surf_alignment(fb, 0); 3809 if (WARN_ON(alignment && !is_power_of_2(alignment))) 3810 return -EINVAL; 3811 3812 /* 3813 * AUX surface offset is specified as the distance from the 3814 * main surface offset, and it must be non-negative. Make 3815 * sure that is what we will get. 3816 */ 3817 if (offset > aux_offset) 3818 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3819 offset, aux_offset & ~(alignment - 1)); 3820 3821 /* 3822 * When using an X-tiled surface, the plane blows up 3823 * if the x offset + width exceed the stride. 3824 * 3825 * TODO: linear and Y-tiled seem fine, Yf untested, 3826 */ 3827 if (fb->modifier == I915_FORMAT_MOD_X_TILED) { 3828 int cpp = fb->format->cpp[0]; 3829 3830 while ((x + w) * cpp > plane_state->color_plane[0].stride) { 3831 if (offset == 0) { 3832 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n"); 3833 return -EINVAL; 3834 } 3835 3836 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3837 offset, offset - alignment); 3838 } 3839 } 3840 3841 /* 3842 * CCS AUX surface doesn't have its own x/y offsets, we must make sure 3843 * they match with the main surface x/y offsets. 3844 */ 3845 if (is_ccs_modifier(fb->modifier)) { 3846 while (!skl_check_main_ccs_coordinates(plane_state, x, y, 3847 offset, aux_plane)) { 3848 if (offset == 0) 3849 break; 3850 3851 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3852 offset, offset - alignment); 3853 } 3854 3855 if (x != plane_state->color_plane[aux_plane].x || 3856 y != plane_state->color_plane[aux_plane].y) { 3857 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n"); 3858 return -EINVAL; 3859 } 3860 } 3861 3862 plane_state->color_plane[0].offset = offset; 3863 plane_state->color_plane[0].x = x; 3864 plane_state->color_plane[0].y = y; 3865 3866 /* 3867 * Put the final coordinates back so that the src 3868 * coordinate checks will see the right values. 3869 */ 3870 drm_rect_translate_to(&plane_state->uapi.src, 3871 x << 16, y << 16); 3872 3873 return 0; 3874 } 3875 3876 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) 3877 { 3878 const struct drm_framebuffer *fb = plane_state->hw.fb; 3879 unsigned int rotation = plane_state->hw.rotation; 3880 int uv_plane = 1; 3881 int max_width = skl_max_plane_width(fb, uv_plane, rotation); 3882 int max_height = 4096; 3883 int x = plane_state->uapi.src.x1 >> 17; 3884 int y = plane_state->uapi.src.y1 >> 17; 3885 int w = drm_rect_width(&plane_state->uapi.src) >> 17; 3886 int h = drm_rect_height(&plane_state->uapi.src) >> 17; 3887 u32 offset; 3888 3889 intel_add_fb_offsets(&x, &y, plane_state, uv_plane); 3890 offset = intel_plane_compute_aligned_offset(&x, &y, 3891 plane_state, uv_plane); 3892 3893 /* FIXME not quite sure how/if these apply to the chroma plane */ 3894 if (w > max_width || h > max_height) { 3895 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n", 3896 w, h, max_width, max_height); 3897 return -EINVAL; 3898 } 3899 3900 if (is_ccs_modifier(fb->modifier)) { 3901 int ccs_plane = main_to_ccs_plane(fb, uv_plane); 3902 int aux_offset = plane_state->color_plane[ccs_plane].offset; 3903 int alignment = intel_surf_alignment(fb, uv_plane); 3904 3905 if (offset > aux_offset) 3906 offset = intel_plane_adjust_aligned_offset(&x, &y, 3907 plane_state, 3908 uv_plane, 3909 offset, 3910 aux_offset & ~(alignment - 1)); 3911 3912 while (!skl_check_main_ccs_coordinates(plane_state, x, y, 3913 offset, ccs_plane)) { 3914 if (offset == 0) 3915 break; 3916 3917 offset = intel_plane_adjust_aligned_offset(&x, &y, 3918 plane_state, 3919 uv_plane, 3920 offset, offset - alignment); 3921 } 3922 3923 if (x != plane_state->color_plane[ccs_plane].x || 3924 y != plane_state->color_plane[ccs_plane].y) { 3925 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n"); 3926 return -EINVAL; 3927 } 3928 } 3929 3930 plane_state->color_plane[uv_plane].offset = offset; 3931 plane_state->color_plane[uv_plane].x = x; 3932 plane_state->color_plane[uv_plane].y = y; 3933 3934 return 0; 3935 } 3936 3937 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) 3938 { 3939 const struct drm_framebuffer *fb = plane_state->hw.fb; 3940 int src_x = plane_state->uapi.src.x1 >> 16; 3941 int src_y = plane_state->uapi.src.y1 >> 16; 3942 u32 offset; 3943 int ccs_plane; 3944 3945 for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) { 3946 int main_hsub, main_vsub; 3947 int hsub, vsub; 3948 int x, y; 3949 3950 if (!is_ccs_plane(fb, ccs_plane)) 3951 continue; 3952 3953 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, 3954 ccs_to_main_plane(fb, ccs_plane)); 3955 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); 3956 3957 hsub *= main_hsub; 3958 vsub *= main_vsub; 3959 x = src_x / hsub; 3960 y = src_y / vsub; 3961 3962 intel_add_fb_offsets(&x, &y, plane_state, ccs_plane); 3963 3964 offset = intel_plane_compute_aligned_offset(&x, &y, 3965 plane_state, 3966 ccs_plane); 3967 3968 plane_state->color_plane[ccs_plane].offset = offset; 3969 plane_state->color_plane[ccs_plane].x = (x * hsub + 3970 src_x % hsub) / 3971 main_hsub; 3972 plane_state->color_plane[ccs_plane].y = (y * vsub + 3973 src_y % vsub) / 3974 main_vsub; 3975 } 3976 3977 return 0; 3978 } 3979 3980 int skl_check_plane_surface(struct intel_plane_state *plane_state) 3981 { 3982 const struct drm_framebuffer *fb = plane_state->hw.fb; 3983 int ret; 3984 bool needs_aux = false; 3985 3986 ret = intel_plane_compute_gtt(plane_state); 3987 if (ret) 3988 return ret; 3989 3990 if (!plane_state->uapi.visible) 3991 return 0; 3992 3993 /* 3994 * Handle the AUX surface first since the main surface setup depends on 3995 * it. 3996 */ 3997 if (is_ccs_modifier(fb->modifier)) { 3998 needs_aux = true; 3999 ret = skl_check_ccs_aux_surface(plane_state); 4000 if (ret) 4001 return ret; 4002 } 4003 4004 if (intel_format_info_is_yuv_semiplanar(fb->format, 4005 fb->modifier)) { 4006 needs_aux = true; 4007 ret = skl_check_nv12_aux_surface(plane_state); 4008 if (ret) 4009 return ret; 4010 } 4011 4012 if (!needs_aux) { 4013 int i; 4014 4015 for (i = 1; i < fb->format->num_planes; i++) { 4016 plane_state->color_plane[i].offset = ~0xfff; 4017 plane_state->color_plane[i].x = 0; 4018 plane_state->color_plane[i].y = 0; 4019 } 4020 } 4021 4022 ret = skl_check_main_surface(plane_state); 4023 if (ret) 4024 return ret; 4025 4026 return 0; 4027 } 4028 4029 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state, 4030 const struct intel_plane_state *plane_state, 4031 unsigned int *num, unsigned int *den) 4032 { 4033 const struct drm_framebuffer *fb = plane_state->hw.fb; 4034 unsigned int cpp = fb->format->cpp[0]; 4035 4036 /* 4037 * g4x bspec says 64bpp pixel rate can't exceed 80% 4038 * of cdclk when the sprite plane is enabled on the 4039 * same pipe. ilk/snb bspec says 64bpp pixel rate is 4040 * never allowed to exceed 80% of cdclk. Let's just go 4041 * with the ilk/snb limit always. 4042 */ 4043 if (cpp == 8) { 4044 *num = 10; 4045 *den = 8; 4046 } else { 4047 *num = 1; 4048 *den = 1; 4049 } 4050 } 4051 4052 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state, 4053 const struct intel_plane_state *plane_state) 4054 { 4055 unsigned int pixel_rate; 4056 unsigned int num, den; 4057 4058 /* 4059 * Note that crtc_state->pixel_rate accounts for both 4060 * horizontal and vertical panel fitter downscaling factors. 4061 * Pre-HSW bspec tells us to only consider the horizontal 4062 * downscaling factor here. We ignore that and just consider 4063 * both for simplicity. 4064 */ 4065 pixel_rate = crtc_state->pixel_rate; 4066 4067 i9xx_plane_ratio(crtc_state, plane_state, &num, &den); 4068 4069 /* two pixels per clock with double wide pipe */ 4070 if (crtc_state->double_wide) 4071 den *= 2; 4072 4073 return DIV_ROUND_UP(pixel_rate * num, den); 4074 } 4075 4076 unsigned int 4077 i9xx_plane_max_stride(struct intel_plane *plane, 4078 u32 pixel_format, u64 modifier, 4079 unsigned int rotation) 4080 { 4081 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4082 4083 if (!HAS_GMCH(dev_priv)) { 4084 return 32*1024; 4085 } else if (INTEL_GEN(dev_priv) >= 4) { 4086 if (modifier == I915_FORMAT_MOD_X_TILED) 4087 return 16*1024; 4088 else 4089 return 32*1024; 4090 } else if (INTEL_GEN(dev_priv) >= 3) { 4091 if (modifier == I915_FORMAT_MOD_X_TILED) 4092 return 8*1024; 4093 else 4094 return 16*1024; 4095 } else { 4096 if (plane->i9xx_plane == PLANE_C) 4097 return 4*1024; 4098 else 4099 return 8*1024; 4100 } 4101 } 4102 4103 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 4104 { 4105 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4106 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4107 u32 dspcntr = 0; 4108 4109 if (crtc_state->gamma_enable) 4110 dspcntr |= DISPPLANE_GAMMA_ENABLE; 4111 4112 if (crtc_state->csc_enable) 4113 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 4114 4115 if (INTEL_GEN(dev_priv) < 5) 4116 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); 4117 4118 return dspcntr; 4119 } 4120 4121 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, 4122 const struct intel_plane_state *plane_state) 4123 { 4124 struct drm_i915_private *dev_priv = 4125 to_i915(plane_state->uapi.plane->dev); 4126 const struct drm_framebuffer *fb = plane_state->hw.fb; 4127 unsigned int rotation = plane_state->hw.rotation; 4128 u32 dspcntr; 4129 4130 dspcntr = DISPLAY_PLANE_ENABLE; 4131 4132 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) || 4133 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 4134 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 4135 4136 switch (fb->format->format) { 4137 case DRM_FORMAT_C8: 4138 dspcntr |= DISPPLANE_8BPP; 4139 break; 4140 case DRM_FORMAT_XRGB1555: 4141 dspcntr |= DISPPLANE_BGRX555; 4142 break; 4143 case DRM_FORMAT_ARGB1555: 4144 dspcntr |= DISPPLANE_BGRA555; 4145 break; 4146 case DRM_FORMAT_RGB565: 4147 dspcntr |= DISPPLANE_BGRX565; 4148 break; 4149 case DRM_FORMAT_XRGB8888: 4150 dspcntr |= DISPPLANE_BGRX888; 4151 break; 4152 case DRM_FORMAT_XBGR8888: 4153 dspcntr |= DISPPLANE_RGBX888; 4154 break; 4155 case DRM_FORMAT_ARGB8888: 4156 dspcntr |= DISPPLANE_BGRA888; 4157 break; 4158 case DRM_FORMAT_ABGR8888: 4159 dspcntr |= DISPPLANE_RGBA888; 4160 break; 4161 case DRM_FORMAT_XRGB2101010: 4162 dspcntr |= DISPPLANE_BGRX101010; 4163 break; 4164 case DRM_FORMAT_XBGR2101010: 4165 dspcntr |= DISPPLANE_RGBX101010; 4166 break; 4167 case DRM_FORMAT_ARGB2101010: 4168 dspcntr |= DISPPLANE_BGRA101010; 4169 break; 4170 case DRM_FORMAT_ABGR2101010: 4171 dspcntr |= DISPPLANE_RGBA101010; 4172 break; 4173 case DRM_FORMAT_XBGR16161616F: 4174 dspcntr |= DISPPLANE_RGBX161616; 4175 break; 4176 default: 4177 MISSING_CASE(fb->format->format); 4178 return 0; 4179 } 4180 4181 if (INTEL_GEN(dev_priv) >= 4 && 4182 fb->modifier == I915_FORMAT_MOD_X_TILED) 4183 dspcntr |= DISPPLANE_TILED; 4184 4185 if (rotation & DRM_MODE_ROTATE_180) 4186 dspcntr |= DISPPLANE_ROTATE_180; 4187 4188 if (rotation & DRM_MODE_REFLECT_X) 4189 dspcntr |= DISPPLANE_MIRROR; 4190 4191 return dspcntr; 4192 } 4193 4194 int i9xx_check_plane_surface(struct intel_plane_state *plane_state) 4195 { 4196 struct drm_i915_private *dev_priv = 4197 to_i915(plane_state->uapi.plane->dev); 4198 const struct drm_framebuffer *fb = plane_state->hw.fb; 4199 int src_x, src_y, src_w; 4200 u32 offset; 4201 int ret; 4202 4203 ret = intel_plane_compute_gtt(plane_state); 4204 if (ret) 4205 return ret; 4206 4207 if (!plane_state->uapi.visible) 4208 return 0; 4209 4210 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 4211 src_x = plane_state->uapi.src.x1 >> 16; 4212 src_y = plane_state->uapi.src.y1 >> 16; 4213 4214 /* Undocumented hardware limit on i965/g4x/vlv/chv */ 4215 if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048) 4216 return -EINVAL; 4217 4218 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 4219 4220 if (INTEL_GEN(dev_priv) >= 4) 4221 offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 4222 plane_state, 0); 4223 else 4224 offset = 0; 4225 4226 /* 4227 * Put the final coordinates back so that the src 4228 * coordinate checks will see the right values. 4229 */ 4230 drm_rect_translate_to(&plane_state->uapi.src, 4231 src_x << 16, src_y << 16); 4232 4233 /* HSW/BDW do this automagically in hardware */ 4234 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { 4235 unsigned int rotation = plane_state->hw.rotation; 4236 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 4237 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 4238 4239 if (rotation & DRM_MODE_ROTATE_180) { 4240 src_x += src_w - 1; 4241 src_y += src_h - 1; 4242 } else if (rotation & DRM_MODE_REFLECT_X) { 4243 src_x += src_w - 1; 4244 } 4245 } 4246 4247 plane_state->color_plane[0].offset = offset; 4248 plane_state->color_plane[0].x = src_x; 4249 plane_state->color_plane[0].y = src_y; 4250 4251 return 0; 4252 } 4253 4254 static bool i9xx_plane_has_windowing(struct intel_plane *plane) 4255 { 4256 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4257 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4258 4259 if (IS_CHERRYVIEW(dev_priv)) 4260 return i9xx_plane == PLANE_B; 4261 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 4262 return false; 4263 else if (IS_GEN(dev_priv, 4)) 4264 return i9xx_plane == PLANE_C; 4265 else 4266 return i9xx_plane == PLANE_B || 4267 i9xx_plane == PLANE_C; 4268 } 4269 4270 static int 4271 i9xx_plane_check(struct intel_crtc_state *crtc_state, 4272 struct intel_plane_state *plane_state) 4273 { 4274 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 4275 int ret; 4276 4277 ret = chv_plane_check_rotation(plane_state); 4278 if (ret) 4279 return ret; 4280 4281 ret = drm_atomic_helper_check_plane_state(&plane_state->uapi, 4282 &crtc_state->uapi, 4283 DRM_PLANE_HELPER_NO_SCALING, 4284 DRM_PLANE_HELPER_NO_SCALING, 4285 i9xx_plane_has_windowing(plane), 4286 true); 4287 if (ret) 4288 return ret; 4289 4290 ret = i9xx_check_plane_surface(plane_state); 4291 if (ret) 4292 return ret; 4293 4294 if (!plane_state->uapi.visible) 4295 return 0; 4296 4297 ret = intel_plane_check_src_coordinates(plane_state); 4298 if (ret) 4299 return ret; 4300 4301 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state); 4302 4303 return 0; 4304 } 4305 4306 static void i9xx_update_plane(struct intel_plane *plane, 4307 const struct intel_crtc_state *crtc_state, 4308 const struct intel_plane_state *plane_state) 4309 { 4310 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4311 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4312 u32 linear_offset; 4313 int x = plane_state->color_plane[0].x; 4314 int y = plane_state->color_plane[0].y; 4315 int crtc_x = plane_state->uapi.dst.x1; 4316 int crtc_y = plane_state->uapi.dst.y1; 4317 int crtc_w = drm_rect_width(&plane_state->uapi.dst); 4318 int crtc_h = drm_rect_height(&plane_state->uapi.dst); 4319 unsigned long irqflags; 4320 u32 dspaddr_offset; 4321 u32 dspcntr; 4322 4323 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); 4324 4325 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 4326 4327 if (INTEL_GEN(dev_priv) >= 4) 4328 dspaddr_offset = plane_state->color_plane[0].offset; 4329 else 4330 dspaddr_offset = linear_offset; 4331 4332 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 4333 4334 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride); 4335 4336 if (INTEL_GEN(dev_priv) < 4) { 4337 /* 4338 * PLANE_A doesn't actually have a full window 4339 * generator but let's assume we still need to 4340 * program whatever is there. 4341 */ 4342 I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x); 4343 I915_WRITE_FW(DSPSIZE(i9xx_plane), 4344 ((crtc_h - 1) << 16) | (crtc_w - 1)); 4345 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { 4346 I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x); 4347 I915_WRITE_FW(PRIMSIZE(i9xx_plane), 4348 ((crtc_h - 1) << 16) | (crtc_w - 1)); 4349 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0); 4350 } 4351 4352 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 4353 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x); 4354 } else if (INTEL_GEN(dev_priv) >= 4) { 4355 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset); 4356 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x); 4357 } 4358 4359 /* 4360 * The control register self-arms if the plane was previously 4361 * disabled. Try to make the plane enable atomic by writing 4362 * the control register just before the surface register. 4363 */ 4364 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr); 4365 if (INTEL_GEN(dev_priv) >= 4) 4366 I915_WRITE_FW(DSPSURF(i9xx_plane), 4367 intel_plane_ggtt_offset(plane_state) + 4368 dspaddr_offset); 4369 else 4370 I915_WRITE_FW(DSPADDR(i9xx_plane), 4371 intel_plane_ggtt_offset(plane_state) + 4372 dspaddr_offset); 4373 4374 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 4375 } 4376 4377 static void i9xx_disable_plane(struct intel_plane *plane, 4378 const struct intel_crtc_state *crtc_state) 4379 { 4380 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4381 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4382 unsigned long irqflags; 4383 u32 dspcntr; 4384 4385 /* 4386 * DSPCNTR pipe gamma enable on g4x+ and pipe csc 4387 * enable on ilk+ affect the pipe bottom color as 4388 * well, so we must configure them even if the plane 4389 * is disabled. 4390 * 4391 * On pre-g4x there is no way to gamma correct the 4392 * pipe bottom color but we'll keep on doing this 4393 * anyway so that the crtc state readout works correctly. 4394 */ 4395 dspcntr = i9xx_plane_ctl_crtc(crtc_state); 4396 4397 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 4398 4399 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr); 4400 if (INTEL_GEN(dev_priv) >= 4) 4401 I915_WRITE_FW(DSPSURF(i9xx_plane), 0); 4402 else 4403 I915_WRITE_FW(DSPADDR(i9xx_plane), 0); 4404 4405 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 4406 } 4407 4408 static bool i9xx_plane_get_hw_state(struct intel_plane *plane, 4409 enum pipe *pipe) 4410 { 4411 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4412 enum intel_display_power_domain power_domain; 4413 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4414 intel_wakeref_t wakeref; 4415 bool ret; 4416 u32 val; 4417 4418 /* 4419 * Not 100% correct for planes that can move between pipes, 4420 * but that's only the case for gen2-4 which don't have any 4421 * display power wells. 4422 */ 4423 power_domain = POWER_DOMAIN_PIPE(plane->pipe); 4424 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 4425 if (!wakeref) 4426 return false; 4427 4428 val = I915_READ(DSPCNTR(i9xx_plane)); 4429 4430 ret = val & DISPLAY_PLANE_ENABLE; 4431 4432 if (INTEL_GEN(dev_priv) >= 5) 4433 *pipe = plane->pipe; 4434 else 4435 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 4436 DISPPLANE_SEL_PIPE_SHIFT; 4437 4438 intel_display_power_put(dev_priv, power_domain, wakeref); 4439 4440 return ret; 4441 } 4442 4443 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 4444 { 4445 struct drm_device *dev = intel_crtc->base.dev; 4446 struct drm_i915_private *dev_priv = to_i915(dev); 4447 4448 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); 4449 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 4450 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); 4451 } 4452 4453 /* 4454 * This function detaches (aka. unbinds) unused scalers in hardware 4455 */ 4456 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state) 4457 { 4458 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 4459 const struct intel_crtc_scaler_state *scaler_state = 4460 &crtc_state->scaler_state; 4461 int i; 4462 4463 /* loop through and disable scalers that aren't in use */ 4464 for (i = 0; i < intel_crtc->num_scalers; i++) { 4465 if (!scaler_state->scalers[i].in_use) 4466 skl_detach_scaler(intel_crtc, i); 4467 } 4468 } 4469 4470 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb, 4471 int color_plane, unsigned int rotation) 4472 { 4473 /* 4474 * The stride is either expressed as a multiple of 64 bytes chunks for 4475 * linear buffers or in number of tiles for tiled buffers. 4476 */ 4477 if (is_surface_linear(fb, color_plane)) 4478 return 64; 4479 else if (drm_rotation_90_or_270(rotation)) 4480 return intel_tile_height(fb, color_plane); 4481 else 4482 return intel_tile_width_bytes(fb, color_plane); 4483 } 4484 4485 u32 skl_plane_stride(const struct intel_plane_state *plane_state, 4486 int color_plane) 4487 { 4488 const struct drm_framebuffer *fb = plane_state->hw.fb; 4489 unsigned int rotation = plane_state->hw.rotation; 4490 u32 stride = plane_state->color_plane[color_plane].stride; 4491 4492 if (color_plane >= fb->format->num_planes) 4493 return 0; 4494 4495 return stride / skl_plane_stride_mult(fb, color_plane, rotation); 4496 } 4497 4498 static u32 skl_plane_ctl_format(u32 pixel_format) 4499 { 4500 switch (pixel_format) { 4501 case DRM_FORMAT_C8: 4502 return PLANE_CTL_FORMAT_INDEXED; 4503 case DRM_FORMAT_RGB565: 4504 return PLANE_CTL_FORMAT_RGB_565; 4505 case DRM_FORMAT_XBGR8888: 4506 case DRM_FORMAT_ABGR8888: 4507 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; 4508 case DRM_FORMAT_XRGB8888: 4509 case DRM_FORMAT_ARGB8888: 4510 return PLANE_CTL_FORMAT_XRGB_8888; 4511 case DRM_FORMAT_XBGR2101010: 4512 case DRM_FORMAT_ABGR2101010: 4513 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX; 4514 case DRM_FORMAT_XRGB2101010: 4515 case DRM_FORMAT_ARGB2101010: 4516 return PLANE_CTL_FORMAT_XRGB_2101010; 4517 case DRM_FORMAT_XBGR16161616F: 4518 case DRM_FORMAT_ABGR16161616F: 4519 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX; 4520 case DRM_FORMAT_XRGB16161616F: 4521 case DRM_FORMAT_ARGB16161616F: 4522 return PLANE_CTL_FORMAT_XRGB_16161616F; 4523 case DRM_FORMAT_YUYV: 4524 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 4525 case DRM_FORMAT_YVYU: 4526 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; 4527 case DRM_FORMAT_UYVY: 4528 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; 4529 case DRM_FORMAT_VYUY: 4530 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 4531 case DRM_FORMAT_NV12: 4532 return PLANE_CTL_FORMAT_NV12; 4533 case DRM_FORMAT_P010: 4534 return PLANE_CTL_FORMAT_P010; 4535 case DRM_FORMAT_P012: 4536 return PLANE_CTL_FORMAT_P012; 4537 case DRM_FORMAT_P016: 4538 return PLANE_CTL_FORMAT_P016; 4539 case DRM_FORMAT_Y210: 4540 return PLANE_CTL_FORMAT_Y210; 4541 case DRM_FORMAT_Y212: 4542 return PLANE_CTL_FORMAT_Y212; 4543 case DRM_FORMAT_Y216: 4544 return PLANE_CTL_FORMAT_Y216; 4545 case DRM_FORMAT_XVYU2101010: 4546 return PLANE_CTL_FORMAT_Y410; 4547 case DRM_FORMAT_XVYU12_16161616: 4548 return PLANE_CTL_FORMAT_Y412; 4549 case DRM_FORMAT_XVYU16161616: 4550 return PLANE_CTL_FORMAT_Y416; 4551 default: 4552 MISSING_CASE(pixel_format); 4553 } 4554 4555 return 0; 4556 } 4557 4558 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state) 4559 { 4560 if (!plane_state->hw.fb->format->has_alpha) 4561 return PLANE_CTL_ALPHA_DISABLE; 4562 4563 switch (plane_state->hw.pixel_blend_mode) { 4564 case DRM_MODE_BLEND_PIXEL_NONE: 4565 return PLANE_CTL_ALPHA_DISABLE; 4566 case DRM_MODE_BLEND_PREMULTI: 4567 return PLANE_CTL_ALPHA_SW_PREMULTIPLY; 4568 case DRM_MODE_BLEND_COVERAGE: 4569 return PLANE_CTL_ALPHA_HW_PREMULTIPLY; 4570 default: 4571 MISSING_CASE(plane_state->hw.pixel_blend_mode); 4572 return PLANE_CTL_ALPHA_DISABLE; 4573 } 4574 } 4575 4576 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state) 4577 { 4578 if (!plane_state->hw.fb->format->has_alpha) 4579 return PLANE_COLOR_ALPHA_DISABLE; 4580 4581 switch (plane_state->hw.pixel_blend_mode) { 4582 case DRM_MODE_BLEND_PIXEL_NONE: 4583 return PLANE_COLOR_ALPHA_DISABLE; 4584 case DRM_MODE_BLEND_PREMULTI: 4585 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY; 4586 case DRM_MODE_BLEND_COVERAGE: 4587 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY; 4588 default: 4589 MISSING_CASE(plane_state->hw.pixel_blend_mode); 4590 return PLANE_COLOR_ALPHA_DISABLE; 4591 } 4592 } 4593 4594 static u32 skl_plane_ctl_tiling(u64 fb_modifier) 4595 { 4596 switch (fb_modifier) { 4597 case DRM_FORMAT_MOD_LINEAR: 4598 break; 4599 case I915_FORMAT_MOD_X_TILED: 4600 return PLANE_CTL_TILED_X; 4601 case I915_FORMAT_MOD_Y_TILED: 4602 return PLANE_CTL_TILED_Y; 4603 case I915_FORMAT_MOD_Y_TILED_CCS: 4604 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; 4605 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 4606 return PLANE_CTL_TILED_Y | 4607 PLANE_CTL_RENDER_DECOMPRESSION_ENABLE | 4608 PLANE_CTL_CLEAR_COLOR_DISABLE; 4609 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 4610 return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE; 4611 case I915_FORMAT_MOD_Yf_TILED: 4612 return PLANE_CTL_TILED_YF; 4613 case I915_FORMAT_MOD_Yf_TILED_CCS: 4614 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; 4615 default: 4616 MISSING_CASE(fb_modifier); 4617 } 4618 4619 return 0; 4620 } 4621 4622 static u32 skl_plane_ctl_rotate(unsigned int rotate) 4623 { 4624 switch (rotate) { 4625 case DRM_MODE_ROTATE_0: 4626 break; 4627 /* 4628 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 4629 * while i915 HW rotation is clockwise, thats why this swapping. 4630 */ 4631 case DRM_MODE_ROTATE_90: 4632 return PLANE_CTL_ROTATE_270; 4633 case DRM_MODE_ROTATE_180: 4634 return PLANE_CTL_ROTATE_180; 4635 case DRM_MODE_ROTATE_270: 4636 return PLANE_CTL_ROTATE_90; 4637 default: 4638 MISSING_CASE(rotate); 4639 } 4640 4641 return 0; 4642 } 4643 4644 static u32 cnl_plane_ctl_flip(unsigned int reflect) 4645 { 4646 switch (reflect) { 4647 case 0: 4648 break; 4649 case DRM_MODE_REFLECT_X: 4650 return PLANE_CTL_FLIP_HORIZONTAL; 4651 case DRM_MODE_REFLECT_Y: 4652 default: 4653 MISSING_CASE(reflect); 4654 } 4655 4656 return 0; 4657 } 4658 4659 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 4660 { 4661 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 4662 u32 plane_ctl = 0; 4663 4664 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 4665 return plane_ctl; 4666 4667 if (crtc_state->gamma_enable) 4668 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE; 4669 4670 if (crtc_state->csc_enable) 4671 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE; 4672 4673 return plane_ctl; 4674 } 4675 4676 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, 4677 const struct intel_plane_state *plane_state) 4678 { 4679 struct drm_i915_private *dev_priv = 4680 to_i915(plane_state->uapi.plane->dev); 4681 const struct drm_framebuffer *fb = plane_state->hw.fb; 4682 unsigned int rotation = plane_state->hw.rotation; 4683 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 4684 u32 plane_ctl; 4685 4686 plane_ctl = PLANE_CTL_ENABLE; 4687 4688 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) { 4689 plane_ctl |= skl_plane_ctl_alpha(plane_state); 4690 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 4691 4692 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) 4693 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709; 4694 4695 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 4696 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE; 4697 } 4698 4699 plane_ctl |= skl_plane_ctl_format(fb->format->format); 4700 plane_ctl |= skl_plane_ctl_tiling(fb->modifier); 4701 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK); 4702 4703 if (INTEL_GEN(dev_priv) >= 10) 4704 plane_ctl |= cnl_plane_ctl_flip(rotation & 4705 DRM_MODE_REFLECT_MASK); 4706 4707 if (key->flags & I915_SET_COLORKEY_DESTINATION) 4708 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; 4709 else if (key->flags & I915_SET_COLORKEY_SOURCE) 4710 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; 4711 4712 return plane_ctl; 4713 } 4714 4715 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state) 4716 { 4717 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 4718 u32 plane_color_ctl = 0; 4719 4720 if (INTEL_GEN(dev_priv) >= 11) 4721 return plane_color_ctl; 4722 4723 if (crtc_state->gamma_enable) 4724 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE; 4725 4726 if (crtc_state->csc_enable) 4727 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; 4728 4729 return plane_color_ctl; 4730 } 4731 4732 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, 4733 const struct intel_plane_state *plane_state) 4734 { 4735 struct drm_i915_private *dev_priv = 4736 to_i915(plane_state->uapi.plane->dev); 4737 const struct drm_framebuffer *fb = plane_state->hw.fb; 4738 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 4739 u32 plane_color_ctl = 0; 4740 4741 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; 4742 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state); 4743 4744 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) { 4745 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) 4746 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; 4747 else 4748 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709; 4749 4750 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 4751 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; 4752 } else if (fb->format->is_yuv) { 4753 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE; 4754 } 4755 4756 return plane_color_ctl; 4757 } 4758 4759 static int 4760 __intel_display_resume(struct drm_device *dev, 4761 struct drm_atomic_state *state, 4762 struct drm_modeset_acquire_ctx *ctx) 4763 { 4764 struct drm_crtc_state *crtc_state; 4765 struct drm_crtc *crtc; 4766 int i, ret; 4767 4768 intel_modeset_setup_hw_state(dev, ctx); 4769 intel_vga_redisable(to_i915(dev)); 4770 4771 if (!state) 4772 return 0; 4773 4774 /* 4775 * We've duplicated the state, pointers to the old state are invalid. 4776 * 4777 * Don't attempt to use the old state until we commit the duplicated state. 4778 */ 4779 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 4780 /* 4781 * Force recalculation even if we restore 4782 * current state. With fast modeset this may not result 4783 * in a modeset when the state is compatible. 4784 */ 4785 crtc_state->mode_changed = true; 4786 } 4787 4788 /* ignore any reset values/BIOS leftovers in the WM registers */ 4789 if (!HAS_GMCH(to_i915(dev))) 4790 to_intel_atomic_state(state)->skip_intermediate_wm = true; 4791 4792 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 4793 4794 WARN_ON(ret == -EDEADLK); 4795 return ret; 4796 } 4797 4798 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) 4799 { 4800 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display && 4801 intel_has_gpu_reset(&dev_priv->gt)); 4802 } 4803 4804 void intel_prepare_reset(struct drm_i915_private *dev_priv) 4805 { 4806 struct drm_device *dev = &dev_priv->drm; 4807 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 4808 struct drm_atomic_state *state; 4809 int ret; 4810 4811 /* reset doesn't touch the display */ 4812 if (!i915_modparams.force_reset_modeset_test && 4813 !gpu_reset_clobbers_display(dev_priv)) 4814 return; 4815 4816 /* We have a modeset vs reset deadlock, defensively unbreak it. */ 4817 set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 4818 smp_mb__after_atomic(); 4819 wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET); 4820 4821 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) { 4822 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n"); 4823 intel_gt_set_wedged(&dev_priv->gt); 4824 } 4825 4826 /* 4827 * Need mode_config.mutex so that we don't 4828 * trample ongoing ->detect() and whatnot. 4829 */ 4830 mutex_lock(&dev->mode_config.mutex); 4831 drm_modeset_acquire_init(ctx, 0); 4832 while (1) { 4833 ret = drm_modeset_lock_all_ctx(dev, ctx); 4834 if (ret != -EDEADLK) 4835 break; 4836 4837 drm_modeset_backoff(ctx); 4838 } 4839 /* 4840 * Disabling the crtcs gracefully seems nicer. Also the 4841 * g33 docs say we should at least disable all the planes. 4842 */ 4843 state = drm_atomic_helper_duplicate_state(dev, ctx); 4844 if (IS_ERR(state)) { 4845 ret = PTR_ERR(state); 4846 DRM_ERROR("Duplicating state failed with %i\n", ret); 4847 return; 4848 } 4849 4850 ret = drm_atomic_helper_disable_all(dev, ctx); 4851 if (ret) { 4852 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 4853 drm_atomic_state_put(state); 4854 return; 4855 } 4856 4857 dev_priv->modeset_restore_state = state; 4858 state->acquire_ctx = ctx; 4859 } 4860 4861 void intel_finish_reset(struct drm_i915_private *dev_priv) 4862 { 4863 struct drm_device *dev = &dev_priv->drm; 4864 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 4865 struct drm_atomic_state *state; 4866 int ret; 4867 4868 /* reset doesn't touch the display */ 4869 if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) 4870 return; 4871 4872 state = fetch_and_zero(&dev_priv->modeset_restore_state); 4873 if (!state) 4874 goto unlock; 4875 4876 /* reset doesn't touch the display */ 4877 if (!gpu_reset_clobbers_display(dev_priv)) { 4878 /* for testing only restore the display */ 4879 ret = __intel_display_resume(dev, state, ctx); 4880 if (ret) 4881 DRM_ERROR("Restoring old state failed with %i\n", ret); 4882 } else { 4883 /* 4884 * The display has been reset as well, 4885 * so need a full re-initialization. 4886 */ 4887 intel_pps_unlock_regs_wa(dev_priv); 4888 intel_modeset_init_hw(dev_priv); 4889 intel_init_clock_gating(dev_priv); 4890 4891 spin_lock_irq(&dev_priv->irq_lock); 4892 if (dev_priv->display.hpd_irq_setup) 4893 dev_priv->display.hpd_irq_setup(dev_priv); 4894 spin_unlock_irq(&dev_priv->irq_lock); 4895 4896 ret = __intel_display_resume(dev, state, ctx); 4897 if (ret) 4898 DRM_ERROR("Restoring old state failed with %i\n", ret); 4899 4900 intel_hpd_init(dev_priv); 4901 } 4902 4903 drm_atomic_state_put(state); 4904 unlock: 4905 drm_modeset_drop_locks(ctx); 4906 drm_modeset_acquire_fini(ctx); 4907 mutex_unlock(&dev->mode_config.mutex); 4908 4909 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 4910 } 4911 4912 static void icl_set_pipe_chicken(struct intel_crtc *crtc) 4913 { 4914 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4915 enum pipe pipe = crtc->pipe; 4916 u32 tmp; 4917 4918 tmp = I915_READ(PIPE_CHICKEN(pipe)); 4919 4920 /* 4921 * Display WA #1153: icl 4922 * enable hardware to bypass the alpha math 4923 * and rounding for per-pixel values 00 and 0xff 4924 */ 4925 tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 4926 /* 4927 * Display WA # 1605353570: icl 4928 * Set the pixel rounding bit to 1 for allowing 4929 * passthrough of Frame buffer pixels unmodified 4930 * across pipe 4931 */ 4932 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 4933 I915_WRITE(PIPE_CHICKEN(pipe), tmp); 4934 } 4935 4936 static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state) 4937 { 4938 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4939 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4940 u32 trans_ddi_func_ctl2_val; 4941 u8 master_select; 4942 4943 /* 4944 * Configure the master select and enable Transcoder Port Sync for 4945 * Slave CRTCs transcoder. 4946 */ 4947 if (crtc_state->master_transcoder == INVALID_TRANSCODER) 4948 return; 4949 4950 if (crtc_state->master_transcoder == TRANSCODER_EDP) 4951 master_select = 0; 4952 else 4953 master_select = crtc_state->master_transcoder + 1; 4954 4955 /* Set the master select bits for Tranascoder Port Sync */ 4956 trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) & 4957 PORT_SYNC_MODE_MASTER_SELECT_MASK) << 4958 PORT_SYNC_MODE_MASTER_SELECT_SHIFT; 4959 /* Enable Transcoder Port Sync */ 4960 trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE; 4961 4962 I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder), 4963 trans_ddi_func_ctl2_val); 4964 } 4965 4966 static void intel_fdi_normal_train(struct intel_crtc *crtc) 4967 { 4968 struct drm_device *dev = crtc->base.dev; 4969 struct drm_i915_private *dev_priv = to_i915(dev); 4970 enum pipe pipe = crtc->pipe; 4971 i915_reg_t reg; 4972 u32 temp; 4973 4974 /* enable normal train */ 4975 reg = FDI_TX_CTL(pipe); 4976 temp = I915_READ(reg); 4977 if (IS_IVYBRIDGE(dev_priv)) { 4978 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 4979 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 4980 } else { 4981 temp &= ~FDI_LINK_TRAIN_NONE; 4982 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 4983 } 4984 I915_WRITE(reg, temp); 4985 4986 reg = FDI_RX_CTL(pipe); 4987 temp = I915_READ(reg); 4988 if (HAS_PCH_CPT(dev_priv)) { 4989 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4990 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 4991 } else { 4992 temp &= ~FDI_LINK_TRAIN_NONE; 4993 temp |= FDI_LINK_TRAIN_NONE; 4994 } 4995 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 4996 4997 /* wait one idle pattern time */ 4998 POSTING_READ(reg); 4999 udelay(1000); 5000 5001 /* IVB wants error correction enabled */ 5002 if (IS_IVYBRIDGE(dev_priv)) 5003 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 5004 FDI_FE_ERRC_ENABLE); 5005 } 5006 5007 /* The FDI link training functions for ILK/Ibexpeak. */ 5008 static void ilk_fdi_link_train(struct intel_crtc *crtc, 5009 const struct intel_crtc_state *crtc_state) 5010 { 5011 struct drm_device *dev = crtc->base.dev; 5012 struct drm_i915_private *dev_priv = to_i915(dev); 5013 enum pipe pipe = crtc->pipe; 5014 i915_reg_t reg; 5015 u32 temp, tries; 5016 5017 /* FDI needs bits from pipe first */ 5018 assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder); 5019 5020 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 5021 for train result */ 5022 reg = FDI_RX_IMR(pipe); 5023 temp = I915_READ(reg); 5024 temp &= ~FDI_RX_SYMBOL_LOCK; 5025 temp &= ~FDI_RX_BIT_LOCK; 5026 I915_WRITE(reg, temp); 5027 I915_READ(reg); 5028 udelay(150); 5029 5030 /* enable CPU FDI TX and PCH FDI RX */ 5031 reg = FDI_TX_CTL(pipe); 5032 temp = I915_READ(reg); 5033 temp &= ~FDI_DP_PORT_WIDTH_MASK; 5034 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 5035 temp &= ~FDI_LINK_TRAIN_NONE; 5036 temp |= FDI_LINK_TRAIN_PATTERN_1; 5037 I915_WRITE(reg, temp | FDI_TX_ENABLE); 5038 5039 reg = FDI_RX_CTL(pipe); 5040 temp = I915_READ(reg); 5041 temp &= ~FDI_LINK_TRAIN_NONE; 5042 temp |= FDI_LINK_TRAIN_PATTERN_1; 5043 I915_WRITE(reg, temp | FDI_RX_ENABLE); 5044 5045 POSTING_READ(reg); 5046 udelay(150); 5047 5048 /* Ironlake workaround, enable clock pointer after FDI enable*/ 5049 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 5050 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 5051 FDI_RX_PHASE_SYNC_POINTER_EN); 5052 5053 reg = FDI_RX_IIR(pipe); 5054 for (tries = 0; tries < 5; tries++) { 5055 temp = I915_READ(reg); 5056 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 5057 5058 if ((temp & FDI_RX_BIT_LOCK)) { 5059 DRM_DEBUG_KMS("FDI train 1 done.\n"); 5060 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 5061 break; 5062 } 5063 } 5064 if (tries == 5) 5065 DRM_ERROR("FDI train 1 fail!\n"); 5066 5067 /* Train 2 */ 5068 reg = FDI_TX_CTL(pipe); 5069 temp = I915_READ(reg); 5070 temp &= ~FDI_LINK_TRAIN_NONE; 5071 temp |= FDI_LINK_TRAIN_PATTERN_2; 5072 I915_WRITE(reg, temp); 5073 5074 reg = FDI_RX_CTL(pipe); 5075 temp = I915_READ(reg); 5076 temp &= ~FDI_LINK_TRAIN_NONE; 5077 temp |= FDI_LINK_TRAIN_PATTERN_2; 5078 I915_WRITE(reg, temp); 5079 5080 POSTING_READ(reg); 5081 udelay(150); 5082 5083 reg = FDI_RX_IIR(pipe); 5084 for (tries = 0; tries < 5; tries++) { 5085 temp = I915_READ(reg); 5086 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 5087 5088 if (temp & FDI_RX_SYMBOL_LOCK) { 5089 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 5090 DRM_DEBUG_KMS("FDI train 2 done.\n"); 5091 break; 5092 } 5093 } 5094 if (tries == 5) 5095 DRM_ERROR("FDI train 2 fail!\n"); 5096 5097 DRM_DEBUG_KMS("FDI train done\n"); 5098 5099 } 5100 5101 static const int snb_b_fdi_train_param[] = { 5102 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 5103 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 5104 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 5105 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 5106 }; 5107 5108 /* The FDI link training functions for SNB/Cougarpoint. */ 5109 static void gen6_fdi_link_train(struct intel_crtc *crtc, 5110 const struct intel_crtc_state *crtc_state) 5111 { 5112 struct drm_device *dev = crtc->base.dev; 5113 struct drm_i915_private *dev_priv = to_i915(dev); 5114 enum pipe pipe = crtc->pipe; 5115 i915_reg_t reg; 5116 u32 temp, i, retry; 5117 5118 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 5119 for train result */ 5120 reg = FDI_RX_IMR(pipe); 5121 temp = I915_READ(reg); 5122 temp &= ~FDI_RX_SYMBOL_LOCK; 5123 temp &= ~FDI_RX_BIT_LOCK; 5124 I915_WRITE(reg, temp); 5125 5126 POSTING_READ(reg); 5127 udelay(150); 5128 5129 /* enable CPU FDI TX and PCH FDI RX */ 5130 reg = FDI_TX_CTL(pipe); 5131 temp = I915_READ(reg); 5132 temp &= ~FDI_DP_PORT_WIDTH_MASK; 5133 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 5134 temp &= ~FDI_LINK_TRAIN_NONE; 5135 temp |= FDI_LINK_TRAIN_PATTERN_1; 5136 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5137 /* SNB-B */ 5138 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 5139 I915_WRITE(reg, temp | FDI_TX_ENABLE); 5140 5141 I915_WRITE(FDI_RX_MISC(pipe), 5142 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 5143 5144 reg = FDI_RX_CTL(pipe); 5145 temp = I915_READ(reg); 5146 if (HAS_PCH_CPT(dev_priv)) { 5147 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5148 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 5149 } else { 5150 temp &= ~FDI_LINK_TRAIN_NONE; 5151 temp |= FDI_LINK_TRAIN_PATTERN_1; 5152 } 5153 I915_WRITE(reg, temp | FDI_RX_ENABLE); 5154 5155 POSTING_READ(reg); 5156 udelay(150); 5157 5158 for (i = 0; i < 4; i++) { 5159 reg = FDI_TX_CTL(pipe); 5160 temp = I915_READ(reg); 5161 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5162 temp |= snb_b_fdi_train_param[i]; 5163 I915_WRITE(reg, temp); 5164 5165 POSTING_READ(reg); 5166 udelay(500); 5167 5168 for (retry = 0; retry < 5; retry++) { 5169 reg = FDI_RX_IIR(pipe); 5170 temp = I915_READ(reg); 5171 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 5172 if (temp & FDI_RX_BIT_LOCK) { 5173 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 5174 DRM_DEBUG_KMS("FDI train 1 done.\n"); 5175 break; 5176 } 5177 udelay(50); 5178 } 5179 if (retry < 5) 5180 break; 5181 } 5182 if (i == 4) 5183 DRM_ERROR("FDI train 1 fail!\n"); 5184 5185 /* Train 2 */ 5186 reg = FDI_TX_CTL(pipe); 5187 temp = I915_READ(reg); 5188 temp &= ~FDI_LINK_TRAIN_NONE; 5189 temp |= FDI_LINK_TRAIN_PATTERN_2; 5190 if (IS_GEN(dev_priv, 6)) { 5191 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5192 /* SNB-B */ 5193 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 5194 } 5195 I915_WRITE(reg, temp); 5196 5197 reg = FDI_RX_CTL(pipe); 5198 temp = I915_READ(reg); 5199 if (HAS_PCH_CPT(dev_priv)) { 5200 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5201 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 5202 } else { 5203 temp &= ~FDI_LINK_TRAIN_NONE; 5204 temp |= FDI_LINK_TRAIN_PATTERN_2; 5205 } 5206 I915_WRITE(reg, temp); 5207 5208 POSTING_READ(reg); 5209 udelay(150); 5210 5211 for (i = 0; i < 4; i++) { 5212 reg = FDI_TX_CTL(pipe); 5213 temp = I915_READ(reg); 5214 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5215 temp |= snb_b_fdi_train_param[i]; 5216 I915_WRITE(reg, temp); 5217 5218 POSTING_READ(reg); 5219 udelay(500); 5220 5221 for (retry = 0; retry < 5; retry++) { 5222 reg = FDI_RX_IIR(pipe); 5223 temp = I915_READ(reg); 5224 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 5225 if (temp & FDI_RX_SYMBOL_LOCK) { 5226 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 5227 DRM_DEBUG_KMS("FDI train 2 done.\n"); 5228 break; 5229 } 5230 udelay(50); 5231 } 5232 if (retry < 5) 5233 break; 5234 } 5235 if (i == 4) 5236 DRM_ERROR("FDI train 2 fail!\n"); 5237 5238 DRM_DEBUG_KMS("FDI train done.\n"); 5239 } 5240 5241 /* Manual link training for Ivy Bridge A0 parts */ 5242 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, 5243 const struct intel_crtc_state *crtc_state) 5244 { 5245 struct drm_device *dev = crtc->base.dev; 5246 struct drm_i915_private *dev_priv = to_i915(dev); 5247 enum pipe pipe = crtc->pipe; 5248 i915_reg_t reg; 5249 u32 temp, i, j; 5250 5251 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 5252 for train result */ 5253 reg = FDI_RX_IMR(pipe); 5254 temp = I915_READ(reg); 5255 temp &= ~FDI_RX_SYMBOL_LOCK; 5256 temp &= ~FDI_RX_BIT_LOCK; 5257 I915_WRITE(reg, temp); 5258 5259 POSTING_READ(reg); 5260 udelay(150); 5261 5262 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 5263 I915_READ(FDI_RX_IIR(pipe))); 5264 5265 /* Try each vswing and preemphasis setting twice before moving on */ 5266 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 5267 /* disable first in case we need to retry */ 5268 reg = FDI_TX_CTL(pipe); 5269 temp = I915_READ(reg); 5270 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 5271 temp &= ~FDI_TX_ENABLE; 5272 I915_WRITE(reg, temp); 5273 5274 reg = FDI_RX_CTL(pipe); 5275 temp = I915_READ(reg); 5276 temp &= ~FDI_LINK_TRAIN_AUTO; 5277 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5278 temp &= ~FDI_RX_ENABLE; 5279 I915_WRITE(reg, temp); 5280 5281 /* enable CPU FDI TX and PCH FDI RX */ 5282 reg = FDI_TX_CTL(pipe); 5283 temp = I915_READ(reg); 5284 temp &= ~FDI_DP_PORT_WIDTH_MASK; 5285 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 5286 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 5287 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5288 temp |= snb_b_fdi_train_param[j/2]; 5289 temp |= FDI_COMPOSITE_SYNC; 5290 I915_WRITE(reg, temp | FDI_TX_ENABLE); 5291 5292 I915_WRITE(FDI_RX_MISC(pipe), 5293 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 5294 5295 reg = FDI_RX_CTL(pipe); 5296 temp = I915_READ(reg); 5297 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 5298 temp |= FDI_COMPOSITE_SYNC; 5299 I915_WRITE(reg, temp | FDI_RX_ENABLE); 5300 5301 POSTING_READ(reg); 5302 udelay(1); /* should be 0.5us */ 5303 5304 for (i = 0; i < 4; i++) { 5305 reg = FDI_RX_IIR(pipe); 5306 temp = I915_READ(reg); 5307 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 5308 5309 if (temp & FDI_RX_BIT_LOCK || 5310 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 5311 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 5312 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", 5313 i); 5314 break; 5315 } 5316 udelay(1); /* should be 0.5us */ 5317 } 5318 if (i == 4) { 5319 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); 5320 continue; 5321 } 5322 5323 /* Train 2 */ 5324 reg = FDI_TX_CTL(pipe); 5325 temp = I915_READ(reg); 5326 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 5327 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 5328 I915_WRITE(reg, temp); 5329 5330 reg = FDI_RX_CTL(pipe); 5331 temp = I915_READ(reg); 5332 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5333 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 5334 I915_WRITE(reg, temp); 5335 5336 POSTING_READ(reg); 5337 udelay(2); /* should be 1.5us */ 5338 5339 for (i = 0; i < 4; i++) { 5340 reg = FDI_RX_IIR(pipe); 5341 temp = I915_READ(reg); 5342 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 5343 5344 if (temp & FDI_RX_SYMBOL_LOCK || 5345 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { 5346 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 5347 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", 5348 i); 5349 goto train_done; 5350 } 5351 udelay(2); /* should be 1.5us */ 5352 } 5353 if (i == 4) 5354 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); 5355 } 5356 5357 train_done: 5358 DRM_DEBUG_KMS("FDI train done.\n"); 5359 } 5360 5361 static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) 5362 { 5363 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 5364 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 5365 enum pipe pipe = intel_crtc->pipe; 5366 i915_reg_t reg; 5367 u32 temp; 5368 5369 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 5370 reg = FDI_RX_CTL(pipe); 5371 temp = I915_READ(reg); 5372 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 5373 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 5374 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 5375 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 5376 5377 POSTING_READ(reg); 5378 udelay(200); 5379 5380 /* Switch from Rawclk to PCDclk */ 5381 temp = I915_READ(reg); 5382 I915_WRITE(reg, temp | FDI_PCDCLK); 5383 5384 POSTING_READ(reg); 5385 udelay(200); 5386 5387 /* Enable CPU FDI TX PLL, always on for Ironlake */ 5388 reg = FDI_TX_CTL(pipe); 5389 temp = I915_READ(reg); 5390 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 5391 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 5392 5393 POSTING_READ(reg); 5394 udelay(100); 5395 } 5396 } 5397 5398 static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc) 5399 { 5400 struct drm_device *dev = intel_crtc->base.dev; 5401 struct drm_i915_private *dev_priv = to_i915(dev); 5402 enum pipe pipe = intel_crtc->pipe; 5403 i915_reg_t reg; 5404 u32 temp; 5405 5406 /* Switch from PCDclk to Rawclk */ 5407 reg = FDI_RX_CTL(pipe); 5408 temp = I915_READ(reg); 5409 I915_WRITE(reg, temp & ~FDI_PCDCLK); 5410 5411 /* Disable CPU FDI TX PLL */ 5412 reg = FDI_TX_CTL(pipe); 5413 temp = I915_READ(reg); 5414 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 5415 5416 POSTING_READ(reg); 5417 udelay(100); 5418 5419 reg = FDI_RX_CTL(pipe); 5420 temp = I915_READ(reg); 5421 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 5422 5423 /* Wait for the clocks to turn off. */ 5424 POSTING_READ(reg); 5425 udelay(100); 5426 } 5427 5428 static void ilk_fdi_disable(struct intel_crtc *crtc) 5429 { 5430 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5431 enum pipe pipe = crtc->pipe; 5432 i915_reg_t reg; 5433 u32 temp; 5434 5435 /* disable CPU FDI tx and PCH FDI rx */ 5436 reg = FDI_TX_CTL(pipe); 5437 temp = I915_READ(reg); 5438 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 5439 POSTING_READ(reg); 5440 5441 reg = FDI_RX_CTL(pipe); 5442 temp = I915_READ(reg); 5443 temp &= ~(0x7 << 16); 5444 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 5445 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 5446 5447 POSTING_READ(reg); 5448 udelay(100); 5449 5450 /* Ironlake workaround, disable clock pointer after downing FDI */ 5451 if (HAS_PCH_IBX(dev_priv)) 5452 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 5453 5454 /* still set train pattern 1 */ 5455 reg = FDI_TX_CTL(pipe); 5456 temp = I915_READ(reg); 5457 temp &= ~FDI_LINK_TRAIN_NONE; 5458 temp |= FDI_LINK_TRAIN_PATTERN_1; 5459 I915_WRITE(reg, temp); 5460 5461 reg = FDI_RX_CTL(pipe); 5462 temp = I915_READ(reg); 5463 if (HAS_PCH_CPT(dev_priv)) { 5464 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5465 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 5466 } else { 5467 temp &= ~FDI_LINK_TRAIN_NONE; 5468 temp |= FDI_LINK_TRAIN_PATTERN_1; 5469 } 5470 /* BPC in FDI rx is consistent with that in PIPECONF */ 5471 temp &= ~(0x07 << 16); 5472 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 5473 I915_WRITE(reg, temp); 5474 5475 POSTING_READ(reg); 5476 udelay(100); 5477 } 5478 5479 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 5480 { 5481 struct drm_crtc *crtc; 5482 bool cleanup_done; 5483 5484 drm_for_each_crtc(crtc, &dev_priv->drm) { 5485 struct drm_crtc_commit *commit; 5486 spin_lock(&crtc->commit_lock); 5487 commit = list_first_entry_or_null(&crtc->commit_list, 5488 struct drm_crtc_commit, commit_entry); 5489 cleanup_done = commit ? 5490 try_wait_for_completion(&commit->cleanup_done) : true; 5491 spin_unlock(&crtc->commit_lock); 5492 5493 if (cleanup_done) 5494 continue; 5495 5496 drm_crtc_wait_one_vblank(crtc); 5497 5498 return true; 5499 } 5500 5501 return false; 5502 } 5503 5504 void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 5505 { 5506 u32 temp; 5507 5508 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 5509 5510 mutex_lock(&dev_priv->sb_lock); 5511 5512 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5513 temp |= SBI_SSCCTL_DISABLE; 5514 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 5515 5516 mutex_unlock(&dev_priv->sb_lock); 5517 } 5518 5519 /* Program iCLKIP clock to the desired frequency */ 5520 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) 5521 { 5522 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5523 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5524 int clock = crtc_state->hw.adjusted_mode.crtc_clock; 5525 u32 divsel, phaseinc, auxdiv, phasedir = 0; 5526 u32 temp; 5527 5528 lpt_disable_iclkip(dev_priv); 5529 5530 /* The iCLK virtual clock root frequency is in MHz, 5531 * but the adjusted_mode->crtc_clock in in KHz. To get the 5532 * divisors, it is necessary to divide one by another, so we 5533 * convert the virtual clock precision to KHz here for higher 5534 * precision. 5535 */ 5536 for (auxdiv = 0; auxdiv < 2; auxdiv++) { 5537 u32 iclk_virtual_root_freq = 172800 * 1000; 5538 u32 iclk_pi_range = 64; 5539 u32 desired_divisor; 5540 5541 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 5542 clock << auxdiv); 5543 divsel = (desired_divisor / iclk_pi_range) - 2; 5544 phaseinc = desired_divisor % iclk_pi_range; 5545 5546 /* 5547 * Near 20MHz is a corner case which is 5548 * out of range for the 7-bit divisor 5549 */ 5550 if (divsel <= 0x7f) 5551 break; 5552 } 5553 5554 /* This should not happen with any sane values */ 5555 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 5556 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 5557 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 5558 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 5559 5560 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 5561 clock, 5562 auxdiv, 5563 divsel, 5564 phasedir, 5565 phaseinc); 5566 5567 mutex_lock(&dev_priv->sb_lock); 5568 5569 /* Program SSCDIVINTPHASE6 */ 5570 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 5571 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 5572 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 5573 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 5574 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 5575 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 5576 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 5577 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 5578 5579 /* Program SSCAUXDIV */ 5580 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 5581 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 5582 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 5583 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 5584 5585 /* Enable modulator and associated divider */ 5586 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5587 temp &= ~SBI_SSCCTL_DISABLE; 5588 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 5589 5590 mutex_unlock(&dev_priv->sb_lock); 5591 5592 /* Wait for initialization time */ 5593 udelay(24); 5594 5595 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 5596 } 5597 5598 int lpt_get_iclkip(struct drm_i915_private *dev_priv) 5599 { 5600 u32 divsel, phaseinc, auxdiv; 5601 u32 iclk_virtual_root_freq = 172800 * 1000; 5602 u32 iclk_pi_range = 64; 5603 u32 desired_divisor; 5604 u32 temp; 5605 5606 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) 5607 return 0; 5608 5609 mutex_lock(&dev_priv->sb_lock); 5610 5611 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5612 if (temp & SBI_SSCCTL_DISABLE) { 5613 mutex_unlock(&dev_priv->sb_lock); 5614 return 0; 5615 } 5616 5617 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 5618 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> 5619 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; 5620 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> 5621 SBI_SSCDIVINTPHASE_INCVAL_SHIFT; 5622 5623 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 5624 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> 5625 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; 5626 5627 mutex_unlock(&dev_priv->sb_lock); 5628 5629 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; 5630 5631 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 5632 desired_divisor << auxdiv); 5633 } 5634 5635 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, 5636 enum pipe pch_transcoder) 5637 { 5638 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5639 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5640 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 5641 5642 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 5643 I915_READ(HTOTAL(cpu_transcoder))); 5644 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), 5645 I915_READ(HBLANK(cpu_transcoder))); 5646 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), 5647 I915_READ(HSYNC(cpu_transcoder))); 5648 5649 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), 5650 I915_READ(VTOTAL(cpu_transcoder))); 5651 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), 5652 I915_READ(VBLANK(cpu_transcoder))); 5653 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), 5654 I915_READ(VSYNC(cpu_transcoder))); 5655 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), 5656 I915_READ(VSYNCSHIFT(cpu_transcoder))); 5657 } 5658 5659 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable) 5660 { 5661 u32 temp; 5662 5663 temp = I915_READ(SOUTH_CHICKEN1); 5664 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 5665 return; 5666 5667 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 5668 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 5669 5670 temp &= ~FDI_BC_BIFURCATION_SELECT; 5671 if (enable) 5672 temp |= FDI_BC_BIFURCATION_SELECT; 5673 5674 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); 5675 I915_WRITE(SOUTH_CHICKEN1, temp); 5676 POSTING_READ(SOUTH_CHICKEN1); 5677 } 5678 5679 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) 5680 { 5681 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5682 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5683 5684 switch (crtc->pipe) { 5685 case PIPE_A: 5686 break; 5687 case PIPE_B: 5688 if (crtc_state->fdi_lanes > 2) 5689 cpt_set_fdi_bc_bifurcation(dev_priv, false); 5690 else 5691 cpt_set_fdi_bc_bifurcation(dev_priv, true); 5692 5693 break; 5694 case PIPE_C: 5695 cpt_set_fdi_bc_bifurcation(dev_priv, true); 5696 5697 break; 5698 default: 5699 BUG(); 5700 } 5701 } 5702 5703 /* 5704 * Finds the encoder associated with the given CRTC. This can only be 5705 * used when we know that the CRTC isn't feeding multiple encoders! 5706 */ 5707 static struct intel_encoder * 5708 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 5709 const struct intel_crtc_state *crtc_state) 5710 { 5711 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5712 const struct drm_connector_state *connector_state; 5713 const struct drm_connector *connector; 5714 struct intel_encoder *encoder = NULL; 5715 int num_encoders = 0; 5716 int i; 5717 5718 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 5719 if (connector_state->crtc != &crtc->base) 5720 continue; 5721 5722 encoder = to_intel_encoder(connector_state->best_encoder); 5723 num_encoders++; 5724 } 5725 5726 WARN(num_encoders != 1, "%d encoders for pipe %c\n", 5727 num_encoders, pipe_name(crtc->pipe)); 5728 5729 return encoder; 5730 } 5731 5732 /* 5733 * Enable PCH resources required for PCH ports: 5734 * - PCH PLLs 5735 * - FDI training & RX/TX 5736 * - update transcoder timings 5737 * - DP transcoding bits 5738 * - transcoder 5739 */ 5740 static void ilk_pch_enable(const struct intel_atomic_state *state, 5741 const struct intel_crtc_state *crtc_state) 5742 { 5743 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5744 struct drm_device *dev = crtc->base.dev; 5745 struct drm_i915_private *dev_priv = to_i915(dev); 5746 enum pipe pipe = crtc->pipe; 5747 u32 temp; 5748 5749 assert_pch_transcoder_disabled(dev_priv, pipe); 5750 5751 if (IS_IVYBRIDGE(dev_priv)) 5752 ivb_update_fdi_bc_bifurcation(crtc_state); 5753 5754 /* Write the TU size bits before fdi link training, so that error 5755 * detection works. */ 5756 I915_WRITE(FDI_RX_TUSIZE1(pipe), 5757 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 5758 5759 /* For PCH output, training FDI link */ 5760 dev_priv->display.fdi_link_train(crtc, crtc_state); 5761 5762 /* We need to program the right clock selection before writing the pixel 5763 * mutliplier into the DPLL. */ 5764 if (HAS_PCH_CPT(dev_priv)) { 5765 u32 sel; 5766 5767 temp = I915_READ(PCH_DPLL_SEL); 5768 temp |= TRANS_DPLL_ENABLE(pipe); 5769 sel = TRANS_DPLLB_SEL(pipe); 5770 if (crtc_state->shared_dpll == 5771 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) 5772 temp |= sel; 5773 else 5774 temp &= ~sel; 5775 I915_WRITE(PCH_DPLL_SEL, temp); 5776 } 5777 5778 /* XXX: pch pll's can be enabled any time before we enable the PCH 5779 * transcoder, and we actually should do this to not upset any PCH 5780 * transcoder that already use the clock when we share it. 5781 * 5782 * Note that enable_shared_dpll tries to do the right thing, but 5783 * get_shared_dpll unconditionally resets the pll - we need that to have 5784 * the right LVDS enable sequence. */ 5785 intel_enable_shared_dpll(crtc_state); 5786 5787 /* set transcoder timing, panel must allow it */ 5788 assert_panel_unlocked(dev_priv, pipe); 5789 ilk_pch_transcoder_set_timings(crtc_state, pipe); 5790 5791 intel_fdi_normal_train(crtc); 5792 5793 /* For PCH DP, enable TRANS_DP_CTL */ 5794 if (HAS_PCH_CPT(dev_priv) && 5795 intel_crtc_has_dp_encoder(crtc_state)) { 5796 const struct drm_display_mode *adjusted_mode = 5797 &crtc_state->hw.adjusted_mode; 5798 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 5799 i915_reg_t reg = TRANS_DP_CTL(pipe); 5800 enum port port; 5801 5802 temp = I915_READ(reg); 5803 temp &= ~(TRANS_DP_PORT_SEL_MASK | 5804 TRANS_DP_SYNC_MASK | 5805 TRANS_DP_BPC_MASK); 5806 temp |= TRANS_DP_OUTPUT_ENABLE; 5807 temp |= bpc << 9; /* same format but at 11:9 */ 5808 5809 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 5810 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 5811 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 5812 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 5813 5814 port = intel_get_crtc_new_encoder(state, crtc_state)->port; 5815 WARN_ON(port < PORT_B || port > PORT_D); 5816 temp |= TRANS_DP_PORT_SEL(port); 5817 5818 I915_WRITE(reg, temp); 5819 } 5820 5821 ilk_enable_pch_transcoder(crtc_state); 5822 } 5823 5824 static void lpt_pch_enable(const struct intel_atomic_state *state, 5825 const struct intel_crtc_state *crtc_state) 5826 { 5827 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5828 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5829 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 5830 5831 assert_pch_transcoder_disabled(dev_priv, PIPE_A); 5832 5833 lpt_program_iclkip(crtc_state); 5834 5835 /* Set transcoder timing. */ 5836 ilk_pch_transcoder_set_timings(crtc_state, PIPE_A); 5837 5838 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 5839 } 5840 5841 static void cpt_verify_modeset(struct drm_i915_private *dev_priv, 5842 enum pipe pipe) 5843 { 5844 i915_reg_t dslreg = PIPEDSL(pipe); 5845 u32 temp; 5846 5847 temp = I915_READ(dslreg); 5848 udelay(500); 5849 if (wait_for(I915_READ(dslreg) != temp, 5)) { 5850 if (wait_for(I915_READ(dslreg) != temp, 5)) 5851 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); 5852 } 5853 } 5854 5855 /* 5856 * The hardware phase 0.0 refers to the center of the pixel. 5857 * We want to start from the top/left edge which is phase 5858 * -0.5. That matches how the hardware calculates the scaling 5859 * factors (from top-left of the first pixel to bottom-right 5860 * of the last pixel, as opposed to the pixel centers). 5861 * 5862 * For 4:2:0 subsampled chroma planes we obviously have to 5863 * adjust that so that the chroma sample position lands in 5864 * the right spot. 5865 * 5866 * Note that for packed YCbCr 4:2:2 formats there is no way to 5867 * control chroma siting. The hardware simply replicates the 5868 * chroma samples for both of the luma samples, and thus we don't 5869 * actually get the expected MPEG2 chroma siting convention :( 5870 * The same behaviour is observed on pre-SKL platforms as well. 5871 * 5872 * Theory behind the formula (note that we ignore sub-pixel 5873 * source coordinates): 5874 * s = source sample position 5875 * d = destination sample position 5876 * 5877 * Downscaling 4:1: 5878 * -0.5 5879 * | 0.0 5880 * | | 1.5 (initial phase) 5881 * | | | 5882 * v v v 5883 * | s | s | s | s | 5884 * | d | 5885 * 5886 * Upscaling 1:4: 5887 * -0.5 5888 * | -0.375 (initial phase) 5889 * | | 0.0 5890 * | | | 5891 * v v v 5892 * | s | 5893 * | d | d | d | d | 5894 */ 5895 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited) 5896 { 5897 int phase = -0x8000; 5898 u16 trip = 0; 5899 5900 if (chroma_cosited) 5901 phase += (sub - 1) * 0x8000 / sub; 5902 5903 phase += scale / (2 * sub); 5904 5905 /* 5906 * Hardware initial phase limited to [-0.5:1.5]. 5907 * Since the max hardware scale factor is 3.0, we 5908 * should never actually excdeed 1.0 here. 5909 */ 5910 WARN_ON(phase < -0x8000 || phase > 0x18000); 5911 5912 if (phase < 0) 5913 phase = 0x10000 + phase; 5914 else 5915 trip = PS_PHASE_TRIP; 5916 5917 return ((phase >> 2) & PS_PHASE_MASK) | trip; 5918 } 5919 5920 #define SKL_MIN_SRC_W 8 5921 #define SKL_MAX_SRC_W 4096 5922 #define SKL_MIN_SRC_H 8 5923 #define SKL_MAX_SRC_H 4096 5924 #define SKL_MIN_DST_W 8 5925 #define SKL_MAX_DST_W 4096 5926 #define SKL_MIN_DST_H 8 5927 #define SKL_MAX_DST_H 4096 5928 #define ICL_MAX_SRC_W 5120 5929 #define ICL_MAX_SRC_H 4096 5930 #define ICL_MAX_DST_W 5120 5931 #define ICL_MAX_DST_H 4096 5932 #define SKL_MIN_YUV_420_SRC_W 16 5933 #define SKL_MIN_YUV_420_SRC_H 16 5934 5935 static int 5936 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 5937 unsigned int scaler_user, int *scaler_id, 5938 int src_w, int src_h, int dst_w, int dst_h, 5939 const struct drm_format_info *format, 5940 u64 modifier, bool need_scaler) 5941 { 5942 struct intel_crtc_scaler_state *scaler_state = 5943 &crtc_state->scaler_state; 5944 struct intel_crtc *intel_crtc = 5945 to_intel_crtc(crtc_state->uapi.crtc); 5946 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 5947 const struct drm_display_mode *adjusted_mode = 5948 &crtc_state->hw.adjusted_mode; 5949 5950 /* 5951 * Src coordinates are already rotated by 270 degrees for 5952 * the 90/270 degree plane rotation cases (to match the 5953 * GTT mapping), hence no need to account for rotation here. 5954 */ 5955 if (src_w != dst_w || src_h != dst_h) 5956 need_scaler = true; 5957 5958 /* 5959 * Scaling/fitting not supported in IF-ID mode in GEN9+ 5960 * TODO: Interlace fetch mode doesn't support YUV420 planar formats. 5961 * Once NV12 is enabled, handle it here while allocating scaler 5962 * for NV12. 5963 */ 5964 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable && 5965 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 5966 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n"); 5967 return -EINVAL; 5968 } 5969 5970 /* 5971 * if plane is being disabled or scaler is no more required or force detach 5972 * - free scaler binded to this plane/crtc 5973 * - in order to do this, update crtc->scaler_usage 5974 * 5975 * Here scaler state in crtc_state is set free so that 5976 * scaler can be assigned to other user. Actual register 5977 * update to free the scaler is done in plane/panel-fit programming. 5978 * For this purpose crtc/plane_state->scaler_id isn't reset here. 5979 */ 5980 if (force_detach || !need_scaler) { 5981 if (*scaler_id >= 0) { 5982 scaler_state->scaler_users &= ~(1 << scaler_user); 5983 scaler_state->scalers[*scaler_id].in_use = 0; 5984 5985 DRM_DEBUG_KMS("scaler_user index %u.%u: " 5986 "Staged freeing scaler id %d scaler_users = 0x%x\n", 5987 intel_crtc->pipe, scaler_user, *scaler_id, 5988 scaler_state->scaler_users); 5989 *scaler_id = -1; 5990 } 5991 return 0; 5992 } 5993 5994 if (format && intel_format_info_is_yuv_semiplanar(format, modifier) && 5995 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { 5996 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n"); 5997 return -EINVAL; 5998 } 5999 6000 /* range checks */ 6001 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 6002 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 6003 (INTEL_GEN(dev_priv) >= 11 && 6004 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H || 6005 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) || 6006 (INTEL_GEN(dev_priv) < 11 && 6007 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 6008 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) { 6009 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " 6010 "size is out of scaler range\n", 6011 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h); 6012 return -EINVAL; 6013 } 6014 6015 /* mark this plane as a scaler user in crtc_state */ 6016 scaler_state->scaler_users |= (1 << scaler_user); 6017 DRM_DEBUG_KMS("scaler_user index %u.%u: " 6018 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 6019 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 6020 scaler_state->scaler_users); 6021 6022 return 0; 6023 } 6024 6025 /** 6026 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc. 6027 * 6028 * @state: crtc's scaler state 6029 * 6030 * Return 6031 * 0 - scaler_usage updated successfully 6032 * error - requested scaling cannot be supported or other error condition 6033 */ 6034 int skl_update_scaler_crtc(struct intel_crtc_state *state) 6035 { 6036 const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode; 6037 bool need_scaler = false; 6038 6039 if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 6040 need_scaler = true; 6041 6042 return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX, 6043 &state->scaler_state.scaler_id, 6044 state->pipe_src_w, state->pipe_src_h, 6045 adjusted_mode->crtc_hdisplay, 6046 adjusted_mode->crtc_vdisplay, NULL, 0, 6047 need_scaler); 6048 } 6049 6050 /** 6051 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 6052 * @crtc_state: crtc's scaler state 6053 * @plane_state: atomic plane state to update 6054 * 6055 * Return 6056 * 0 - scaler_usage updated successfully 6057 * error - requested scaling cannot be supported or other error condition 6058 */ 6059 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 6060 struct intel_plane_state *plane_state) 6061 { 6062 struct intel_plane *intel_plane = 6063 to_intel_plane(plane_state->uapi.plane); 6064 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 6065 struct drm_framebuffer *fb = plane_state->hw.fb; 6066 int ret; 6067 bool force_detach = !fb || !plane_state->uapi.visible; 6068 bool need_scaler = false; 6069 6070 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ 6071 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) && 6072 fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) 6073 need_scaler = true; 6074 6075 ret = skl_update_scaler(crtc_state, force_detach, 6076 drm_plane_index(&intel_plane->base), 6077 &plane_state->scaler_id, 6078 drm_rect_width(&plane_state->uapi.src) >> 16, 6079 drm_rect_height(&plane_state->uapi.src) >> 16, 6080 drm_rect_width(&plane_state->uapi.dst), 6081 drm_rect_height(&plane_state->uapi.dst), 6082 fb ? fb->format : NULL, 6083 fb ? fb->modifier : 0, 6084 need_scaler); 6085 6086 if (ret || plane_state->scaler_id < 0) 6087 return ret; 6088 6089 /* check colorkey */ 6090 if (plane_state->ckey.flags) { 6091 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed", 6092 intel_plane->base.base.id, 6093 intel_plane->base.name); 6094 return -EINVAL; 6095 } 6096 6097 /* Check src format */ 6098 switch (fb->format->format) { 6099 case DRM_FORMAT_RGB565: 6100 case DRM_FORMAT_XBGR8888: 6101 case DRM_FORMAT_XRGB8888: 6102 case DRM_FORMAT_ABGR8888: 6103 case DRM_FORMAT_ARGB8888: 6104 case DRM_FORMAT_XRGB2101010: 6105 case DRM_FORMAT_XBGR2101010: 6106 case DRM_FORMAT_ARGB2101010: 6107 case DRM_FORMAT_ABGR2101010: 6108 case DRM_FORMAT_YUYV: 6109 case DRM_FORMAT_YVYU: 6110 case DRM_FORMAT_UYVY: 6111 case DRM_FORMAT_VYUY: 6112 case DRM_FORMAT_NV12: 6113 case DRM_FORMAT_P010: 6114 case DRM_FORMAT_P012: 6115 case DRM_FORMAT_P016: 6116 case DRM_FORMAT_Y210: 6117 case DRM_FORMAT_Y212: 6118 case DRM_FORMAT_Y216: 6119 case DRM_FORMAT_XVYU2101010: 6120 case DRM_FORMAT_XVYU12_16161616: 6121 case DRM_FORMAT_XVYU16161616: 6122 break; 6123 case DRM_FORMAT_XBGR16161616F: 6124 case DRM_FORMAT_ABGR16161616F: 6125 case DRM_FORMAT_XRGB16161616F: 6126 case DRM_FORMAT_ARGB16161616F: 6127 if (INTEL_GEN(dev_priv) >= 11) 6128 break; 6129 /* fall through */ 6130 default: 6131 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", 6132 intel_plane->base.base.id, intel_plane->base.name, 6133 fb->base.id, fb->format->format); 6134 return -EINVAL; 6135 } 6136 6137 return 0; 6138 } 6139 6140 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state) 6141 { 6142 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 6143 int i; 6144 6145 for (i = 0; i < crtc->num_scalers; i++) 6146 skl_detach_scaler(crtc, i); 6147 } 6148 6149 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state) 6150 { 6151 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6152 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6153 enum pipe pipe = crtc->pipe; 6154 const struct intel_crtc_scaler_state *scaler_state = 6155 &crtc_state->scaler_state; 6156 6157 if (crtc_state->pch_pfit.enabled) { 6158 u16 uv_rgb_hphase, uv_rgb_vphase; 6159 int pfit_w, pfit_h, hscale, vscale; 6160 int id; 6161 6162 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0)) 6163 return; 6164 6165 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF; 6166 pfit_h = crtc_state->pch_pfit.size & 0xFFFF; 6167 6168 hscale = (crtc_state->pipe_src_w << 16) / pfit_w; 6169 vscale = (crtc_state->pipe_src_h << 16) / pfit_h; 6170 6171 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); 6172 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); 6173 6174 id = scaler_state->scaler_id; 6175 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 6176 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 6177 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id), 6178 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); 6179 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id), 6180 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); 6181 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos); 6182 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size); 6183 } 6184 } 6185 6186 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) 6187 { 6188 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6189 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6190 enum pipe pipe = crtc->pipe; 6191 6192 if (crtc_state->pch_pfit.enabled) { 6193 /* Force use of hard-coded filter coefficients 6194 * as some pre-programmed values are broken, 6195 * e.g. x201. 6196 */ 6197 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 6198 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 6199 PF_PIPE_SEL_IVB(pipe)); 6200 else 6201 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 6202 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos); 6203 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size); 6204 } 6205 } 6206 6207 void hsw_enable_ips(const struct intel_crtc_state *crtc_state) 6208 { 6209 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6210 struct drm_device *dev = crtc->base.dev; 6211 struct drm_i915_private *dev_priv = to_i915(dev); 6212 6213 if (!crtc_state->ips_enabled) 6214 return; 6215 6216 /* 6217 * We can only enable IPS after we enable a plane and wait for a vblank 6218 * This function is called from post_plane_update, which is run after 6219 * a vblank wait. 6220 */ 6221 WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); 6222 6223 if (IS_BROADWELL(dev_priv)) { 6224 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 6225 IPS_ENABLE | IPS_PCODE_CONTROL)); 6226 /* Quoting Art Runyan: "its not safe to expect any particular 6227 * value in IPS_CTL bit 31 after enabling IPS through the 6228 * mailbox." Moreover, the mailbox may return a bogus state, 6229 * so we need to just enable it and continue on. 6230 */ 6231 } else { 6232 I915_WRITE(IPS_CTL, IPS_ENABLE); 6233 /* The bit only becomes 1 in the next vblank, so this wait here 6234 * is essentially intel_wait_for_vblank. If we don't have this 6235 * and don't wait for vblanks until the end of crtc_enable, then 6236 * the HW state readout code will complain that the expected 6237 * IPS_CTL value is not the one we read. */ 6238 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50)) 6239 DRM_ERROR("Timed out waiting for IPS enable\n"); 6240 } 6241 } 6242 6243 void hsw_disable_ips(const struct intel_crtc_state *crtc_state) 6244 { 6245 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6246 struct drm_device *dev = crtc->base.dev; 6247 struct drm_i915_private *dev_priv = to_i915(dev); 6248 6249 if (!crtc_state->ips_enabled) 6250 return; 6251 6252 if (IS_BROADWELL(dev_priv)) { 6253 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 6254 /* 6255 * Wait for PCODE to finish disabling IPS. The BSpec specified 6256 * 42ms timeout value leads to occasional timeouts so use 100ms 6257 * instead. 6258 */ 6259 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100)) 6260 DRM_ERROR("Timed out waiting for IPS disable\n"); 6261 } else { 6262 I915_WRITE(IPS_CTL, 0); 6263 POSTING_READ(IPS_CTL); 6264 } 6265 6266 /* We need to wait for a vblank before we can disable the plane. */ 6267 intel_wait_for_vblank(dev_priv, crtc->pipe); 6268 } 6269 6270 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) 6271 { 6272 if (intel_crtc->overlay) 6273 (void) intel_overlay_switch_off(intel_crtc->overlay); 6274 6275 /* Let userspace switch the overlay on again. In most cases userspace 6276 * has to recompute where to put it anyway. 6277 */ 6278 } 6279 6280 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state, 6281 const struct intel_crtc_state *new_crtc_state) 6282 { 6283 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 6284 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6285 6286 if (!old_crtc_state->ips_enabled) 6287 return false; 6288 6289 if (needs_modeset(new_crtc_state)) 6290 return true; 6291 6292 /* 6293 * Workaround : Do not read or write the pipe palette/gamma data while 6294 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 6295 * 6296 * Disable IPS before we program the LUT. 6297 */ 6298 if (IS_HASWELL(dev_priv) && 6299 (new_crtc_state->uapi.color_mgmt_changed || 6300 new_crtc_state->update_pipe) && 6301 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) 6302 return true; 6303 6304 return !new_crtc_state->ips_enabled; 6305 } 6306 6307 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state, 6308 const struct intel_crtc_state *new_crtc_state) 6309 { 6310 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 6311 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6312 6313 if (!new_crtc_state->ips_enabled) 6314 return false; 6315 6316 if (needs_modeset(new_crtc_state)) 6317 return true; 6318 6319 /* 6320 * Workaround : Do not read or write the pipe palette/gamma data while 6321 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 6322 * 6323 * Re-enable IPS after the LUT has been programmed. 6324 */ 6325 if (IS_HASWELL(dev_priv) && 6326 (new_crtc_state->uapi.color_mgmt_changed || 6327 new_crtc_state->update_pipe) && 6328 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) 6329 return true; 6330 6331 /* 6332 * We can't read out IPS on broadwell, assume the worst and 6333 * forcibly enable IPS on the first fastset. 6334 */ 6335 if (new_crtc_state->update_pipe && 6336 old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED) 6337 return true; 6338 6339 return !old_crtc_state->ips_enabled; 6340 } 6341 6342 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) 6343 { 6344 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 6345 6346 if (!crtc_state->nv12_planes) 6347 return false; 6348 6349 /* WA Display #0827: Gen9:all */ 6350 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) 6351 return true; 6352 6353 return false; 6354 } 6355 6356 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) 6357 { 6358 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 6359 6360 /* Wa_2006604312:icl */ 6361 if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv)) 6362 return true; 6363 6364 return false; 6365 } 6366 6367 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, 6368 const struct intel_crtc_state *new_crtc_state) 6369 { 6370 return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) && 6371 new_crtc_state->active_planes; 6372 } 6373 6374 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, 6375 const struct intel_crtc_state *new_crtc_state) 6376 { 6377 return old_crtc_state->active_planes && 6378 (!new_crtc_state->active_planes || needs_modeset(new_crtc_state)); 6379 } 6380 6381 static void intel_post_plane_update(struct intel_atomic_state *state, 6382 struct intel_crtc *crtc) 6383 { 6384 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6385 struct intel_plane *primary = to_intel_plane(crtc->base.primary); 6386 const struct intel_crtc_state *old_crtc_state = 6387 intel_atomic_get_old_crtc_state(state, crtc); 6388 const struct intel_crtc_state *new_crtc_state = 6389 intel_atomic_get_new_crtc_state(state, crtc); 6390 const struct intel_plane_state *new_primary_state = 6391 intel_atomic_get_new_plane_state(state, primary); 6392 enum pipe pipe = crtc->pipe; 6393 6394 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); 6395 6396 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) 6397 intel_update_watermarks(crtc); 6398 6399 if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state)) 6400 hsw_enable_ips(new_crtc_state); 6401 6402 if (new_primary_state) 6403 intel_fbc_post_update(crtc); 6404 6405 if (needs_nv12_wa(old_crtc_state) && 6406 !needs_nv12_wa(new_crtc_state)) 6407 skl_wa_827(dev_priv, pipe, false); 6408 6409 if (needs_scalerclk_wa(old_crtc_state) && 6410 !needs_scalerclk_wa(new_crtc_state)) 6411 icl_wa_scalerclkgating(dev_priv, pipe, false); 6412 } 6413 6414 static void intel_pre_plane_update(struct intel_atomic_state *state, 6415 struct intel_crtc *crtc) 6416 { 6417 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6418 struct intel_plane *primary = to_intel_plane(crtc->base.primary); 6419 const struct intel_crtc_state *old_crtc_state = 6420 intel_atomic_get_old_crtc_state(state, crtc); 6421 const struct intel_crtc_state *new_crtc_state = 6422 intel_atomic_get_new_crtc_state(state, crtc); 6423 const struct intel_plane_state *new_primary_state = 6424 intel_atomic_get_new_plane_state(state, primary); 6425 enum pipe pipe = crtc->pipe; 6426 6427 if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state)) 6428 hsw_disable_ips(old_crtc_state); 6429 6430 if (new_primary_state && 6431 intel_fbc_pre_update(crtc, new_crtc_state, new_primary_state)) 6432 intel_wait_for_vblank(dev_priv, pipe); 6433 6434 /* Display WA 827 */ 6435 if (!needs_nv12_wa(old_crtc_state) && 6436 needs_nv12_wa(new_crtc_state)) 6437 skl_wa_827(dev_priv, pipe, true); 6438 6439 /* Wa_2006604312:icl */ 6440 if (!needs_scalerclk_wa(old_crtc_state) && 6441 needs_scalerclk_wa(new_crtc_state)) 6442 icl_wa_scalerclkgating(dev_priv, pipe, true); 6443 6444 /* 6445 * Vblank time updates from the shadow to live plane control register 6446 * are blocked if the memory self-refresh mode is active at that 6447 * moment. So to make sure the plane gets truly disabled, disable 6448 * first the self-refresh mode. The self-refresh enable bit in turn 6449 * will be checked/applied by the HW only at the next frame start 6450 * event which is after the vblank start event, so we need to have a 6451 * wait-for-vblank between disabling the plane and the pipe. 6452 */ 6453 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && 6454 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 6455 intel_wait_for_vblank(dev_priv, pipe); 6456 6457 /* 6458 * IVB workaround: must disable low power watermarks for at least 6459 * one frame before enabling scaling. LP watermarks can be re-enabled 6460 * when scaling is disabled. 6461 * 6462 * WaCxSRDisabledForSpriteScaling:ivb 6463 */ 6464 if (old_crtc_state->hw.active && 6465 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv)) 6466 intel_wait_for_vblank(dev_priv, pipe); 6467 6468 /* 6469 * If we're doing a modeset we don't need to do any 6470 * pre-vblank watermark programming here. 6471 */ 6472 if (!needs_modeset(new_crtc_state)) { 6473 /* 6474 * For platforms that support atomic watermarks, program the 6475 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 6476 * will be the intermediate values that are safe for both pre- and 6477 * post- vblank; when vblank happens, the 'active' values will be set 6478 * to the final 'target' values and we'll do this again to get the 6479 * optimal watermarks. For gen9+ platforms, the values we program here 6480 * will be the final target values which will get automatically latched 6481 * at vblank time; no further programming will be necessary. 6482 * 6483 * If a platform hasn't been transitioned to atomic watermarks yet, 6484 * we'll continue to update watermarks the old way, if flags tell 6485 * us to. 6486 */ 6487 if (dev_priv->display.initial_watermarks) 6488 dev_priv->display.initial_watermarks(state, crtc); 6489 else if (new_crtc_state->update_wm_pre) 6490 intel_update_watermarks(crtc); 6491 } 6492 6493 /* 6494 * Gen2 reports pipe underruns whenever all planes are disabled. 6495 * So disable underrun reporting before all the planes get disabled. 6496 * 6497 * We do this after .initial_watermarks() so that we have a 6498 * chance of catching underruns with the intermediate watermarks 6499 * vs. the old plane configuration. 6500 */ 6501 if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state)) 6502 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6503 } 6504 6505 static void intel_crtc_disable_planes(struct intel_atomic_state *state, 6506 struct intel_crtc *crtc) 6507 { 6508 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6509 const struct intel_crtc_state *new_crtc_state = 6510 intel_atomic_get_new_crtc_state(state, crtc); 6511 unsigned int update_mask = new_crtc_state->update_planes; 6512 const struct intel_plane_state *old_plane_state; 6513 struct intel_plane *plane; 6514 unsigned fb_bits = 0; 6515 int i; 6516 6517 intel_crtc_dpms_overlay_disable(crtc); 6518 6519 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 6520 if (crtc->pipe != plane->pipe || 6521 !(update_mask & BIT(plane->id))) 6522 continue; 6523 6524 intel_disable_plane(plane, new_crtc_state); 6525 6526 if (old_plane_state->uapi.visible) 6527 fb_bits |= plane->frontbuffer_bit; 6528 } 6529 6530 intel_frontbuffer_flip(dev_priv, fb_bits); 6531 } 6532 6533 /* 6534 * intel_connector_primary_encoder - get the primary encoder for a connector 6535 * @connector: connector for which to return the encoder 6536 * 6537 * Returns the primary encoder for a connector. There is a 1:1 mapping from 6538 * all connectors to their encoder, except for DP-MST connectors which have 6539 * both a virtual and a primary encoder. These DP-MST primary encoders can be 6540 * pointed to by as many DP-MST connectors as there are pipes. 6541 */ 6542 static struct intel_encoder * 6543 intel_connector_primary_encoder(struct intel_connector *connector) 6544 { 6545 struct intel_encoder *encoder; 6546 6547 if (connector->mst_port) 6548 return &dp_to_dig_port(connector->mst_port)->base; 6549 6550 encoder = intel_attached_encoder(connector); 6551 WARN_ON(!encoder); 6552 6553 return encoder; 6554 } 6555 6556 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 6557 { 6558 struct drm_connector_state *new_conn_state; 6559 struct drm_connector *connector; 6560 int i; 6561 6562 for_each_new_connector_in_state(&state->base, connector, new_conn_state, 6563 i) { 6564 struct intel_connector *intel_connector; 6565 struct intel_encoder *encoder; 6566 struct intel_crtc *crtc; 6567 6568 if (!intel_connector_needs_modeset(state, connector)) 6569 continue; 6570 6571 intel_connector = to_intel_connector(connector); 6572 encoder = intel_connector_primary_encoder(intel_connector); 6573 if (!encoder->update_prepare) 6574 continue; 6575 6576 crtc = new_conn_state->crtc ? 6577 to_intel_crtc(new_conn_state->crtc) : NULL; 6578 encoder->update_prepare(state, encoder, crtc); 6579 } 6580 } 6581 6582 static void intel_encoders_update_complete(struct intel_atomic_state *state) 6583 { 6584 struct drm_connector_state *new_conn_state; 6585 struct drm_connector *connector; 6586 int i; 6587 6588 for_each_new_connector_in_state(&state->base, connector, new_conn_state, 6589 i) { 6590 struct intel_connector *intel_connector; 6591 struct intel_encoder *encoder; 6592 struct intel_crtc *crtc; 6593 6594 if (!intel_connector_needs_modeset(state, connector)) 6595 continue; 6596 6597 intel_connector = to_intel_connector(connector); 6598 encoder = intel_connector_primary_encoder(intel_connector); 6599 if (!encoder->update_complete) 6600 continue; 6601 6602 crtc = new_conn_state->crtc ? 6603 to_intel_crtc(new_conn_state->crtc) : NULL; 6604 encoder->update_complete(state, encoder, crtc); 6605 } 6606 } 6607 6608 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, 6609 struct intel_crtc *crtc) 6610 { 6611 const struct intel_crtc_state *crtc_state = 6612 intel_atomic_get_new_crtc_state(state, crtc); 6613 const struct drm_connector_state *conn_state; 6614 struct drm_connector *conn; 6615 int i; 6616 6617 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6618 struct intel_encoder *encoder = 6619 to_intel_encoder(conn_state->best_encoder); 6620 6621 if (conn_state->crtc != &crtc->base) 6622 continue; 6623 6624 if (encoder->pre_pll_enable) 6625 encoder->pre_pll_enable(encoder, crtc_state, conn_state); 6626 } 6627 } 6628 6629 static void intel_encoders_pre_enable(struct intel_atomic_state *state, 6630 struct intel_crtc *crtc) 6631 { 6632 const struct intel_crtc_state *crtc_state = 6633 intel_atomic_get_new_crtc_state(state, crtc); 6634 const struct drm_connector_state *conn_state; 6635 struct drm_connector *conn; 6636 int i; 6637 6638 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6639 struct intel_encoder *encoder = 6640 to_intel_encoder(conn_state->best_encoder); 6641 6642 if (conn_state->crtc != &crtc->base) 6643 continue; 6644 6645 if (encoder->pre_enable) 6646 encoder->pre_enable(encoder, crtc_state, conn_state); 6647 } 6648 } 6649 6650 static void intel_encoders_enable(struct intel_atomic_state *state, 6651 struct intel_crtc *crtc) 6652 { 6653 const struct intel_crtc_state *crtc_state = 6654 intel_atomic_get_new_crtc_state(state, crtc); 6655 const struct drm_connector_state *conn_state; 6656 struct drm_connector *conn; 6657 int i; 6658 6659 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6660 struct intel_encoder *encoder = 6661 to_intel_encoder(conn_state->best_encoder); 6662 6663 if (conn_state->crtc != &crtc->base) 6664 continue; 6665 6666 if (encoder->enable) 6667 encoder->enable(encoder, crtc_state, conn_state); 6668 intel_opregion_notify_encoder(encoder, true); 6669 } 6670 } 6671 6672 static void intel_encoders_disable(struct intel_atomic_state *state, 6673 struct intel_crtc *crtc) 6674 { 6675 const struct intel_crtc_state *old_crtc_state = 6676 intel_atomic_get_old_crtc_state(state, crtc); 6677 const struct drm_connector_state *old_conn_state; 6678 struct drm_connector *conn; 6679 int i; 6680 6681 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6682 struct intel_encoder *encoder = 6683 to_intel_encoder(old_conn_state->best_encoder); 6684 6685 if (old_conn_state->crtc != &crtc->base) 6686 continue; 6687 6688 intel_opregion_notify_encoder(encoder, false); 6689 if (encoder->disable) 6690 encoder->disable(encoder, old_crtc_state, old_conn_state); 6691 } 6692 } 6693 6694 static void intel_encoders_post_disable(struct intel_atomic_state *state, 6695 struct intel_crtc *crtc) 6696 { 6697 const struct intel_crtc_state *old_crtc_state = 6698 intel_atomic_get_old_crtc_state(state, crtc); 6699 const struct drm_connector_state *old_conn_state; 6700 struct drm_connector *conn; 6701 int i; 6702 6703 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6704 struct intel_encoder *encoder = 6705 to_intel_encoder(old_conn_state->best_encoder); 6706 6707 if (old_conn_state->crtc != &crtc->base) 6708 continue; 6709 6710 if (encoder->post_disable) 6711 encoder->post_disable(encoder, old_crtc_state, old_conn_state); 6712 } 6713 } 6714 6715 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, 6716 struct intel_crtc *crtc) 6717 { 6718 const struct intel_crtc_state *old_crtc_state = 6719 intel_atomic_get_old_crtc_state(state, crtc); 6720 const struct drm_connector_state *old_conn_state; 6721 struct drm_connector *conn; 6722 int i; 6723 6724 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6725 struct intel_encoder *encoder = 6726 to_intel_encoder(old_conn_state->best_encoder); 6727 6728 if (old_conn_state->crtc != &crtc->base) 6729 continue; 6730 6731 if (encoder->post_pll_disable) 6732 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state); 6733 } 6734 } 6735 6736 static void intel_encoders_update_pipe(struct intel_atomic_state *state, 6737 struct intel_crtc *crtc) 6738 { 6739 const struct intel_crtc_state *crtc_state = 6740 intel_atomic_get_new_crtc_state(state, crtc); 6741 const struct drm_connector_state *conn_state; 6742 struct drm_connector *conn; 6743 int i; 6744 6745 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6746 struct intel_encoder *encoder = 6747 to_intel_encoder(conn_state->best_encoder); 6748 6749 if (conn_state->crtc != &crtc->base) 6750 continue; 6751 6752 if (encoder->update_pipe) 6753 encoder->update_pipe(encoder, crtc_state, conn_state); 6754 } 6755 } 6756 6757 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) 6758 { 6759 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6760 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 6761 6762 plane->disable_plane(plane, crtc_state); 6763 } 6764 6765 static void ilk_crtc_enable(struct intel_atomic_state *state, 6766 struct intel_crtc *crtc) 6767 { 6768 const struct intel_crtc_state *new_crtc_state = 6769 intel_atomic_get_new_crtc_state(state, crtc); 6770 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6771 enum pipe pipe = crtc->pipe; 6772 6773 if (WARN_ON(crtc->active)) 6774 return; 6775 6776 /* 6777 * Sometimes spurious CPU pipe underruns happen during FDI 6778 * training, at least with VGA+HDMI cloning. Suppress them. 6779 * 6780 * On ILK we get an occasional spurious CPU pipe underruns 6781 * between eDP port A enable and vdd enable. Also PCH port 6782 * enable seems to result in the occasional CPU pipe underrun. 6783 * 6784 * Spurious PCH underruns also occur during PCH enabling. 6785 */ 6786 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6787 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 6788 6789 if (new_crtc_state->has_pch_encoder) 6790 intel_prepare_shared_dpll(new_crtc_state); 6791 6792 if (intel_crtc_has_dp_encoder(new_crtc_state)) 6793 intel_dp_set_m_n(new_crtc_state, M1_N1); 6794 6795 intel_set_pipe_timings(new_crtc_state); 6796 intel_set_pipe_src_size(new_crtc_state); 6797 6798 if (new_crtc_state->has_pch_encoder) 6799 intel_cpu_transcoder_set_m_n(new_crtc_state, 6800 &new_crtc_state->fdi_m_n, NULL); 6801 6802 ilk_set_pipeconf(new_crtc_state); 6803 6804 crtc->active = true; 6805 6806 intel_encoders_pre_enable(state, crtc); 6807 6808 if (new_crtc_state->has_pch_encoder) { 6809 /* Note: FDI PLL enabling _must_ be done before we enable the 6810 * cpu pipes, hence this is separate from all the other fdi/pch 6811 * enabling. */ 6812 ilk_fdi_pll_enable(new_crtc_state); 6813 } else { 6814 assert_fdi_tx_disabled(dev_priv, pipe); 6815 assert_fdi_rx_disabled(dev_priv, pipe); 6816 } 6817 6818 ilk_pfit_enable(new_crtc_state); 6819 6820 /* 6821 * On ILK+ LUT must be loaded before the pipe is running but with 6822 * clocks enabled 6823 */ 6824 intel_color_load_luts(new_crtc_state); 6825 intel_color_commit(new_crtc_state); 6826 /* update DSPCNTR to configure gamma for pipe bottom color */ 6827 intel_disable_primary_plane(new_crtc_state); 6828 6829 if (dev_priv->display.initial_watermarks) 6830 dev_priv->display.initial_watermarks(state, crtc); 6831 intel_enable_pipe(new_crtc_state); 6832 6833 if (new_crtc_state->has_pch_encoder) 6834 ilk_pch_enable(state, new_crtc_state); 6835 6836 intel_crtc_vblank_on(new_crtc_state); 6837 6838 intel_encoders_enable(state, crtc); 6839 6840 if (HAS_PCH_CPT(dev_priv)) 6841 cpt_verify_modeset(dev_priv, pipe); 6842 6843 /* 6844 * Must wait for vblank to avoid spurious PCH FIFO underruns. 6845 * And a second vblank wait is needed at least on ILK with 6846 * some interlaced HDMI modes. Let's do the double wait always 6847 * in case there are more corner cases we don't know about. 6848 */ 6849 if (new_crtc_state->has_pch_encoder) { 6850 intel_wait_for_vblank(dev_priv, pipe); 6851 intel_wait_for_vblank(dev_priv, pipe); 6852 } 6853 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6854 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 6855 } 6856 6857 /* IPS only exists on ULT machines and is tied to pipe A. */ 6858 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 6859 { 6860 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A; 6861 } 6862 6863 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, 6864 enum pipe pipe, bool apply) 6865 { 6866 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe)); 6867 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 6868 6869 if (apply) 6870 val |= mask; 6871 else 6872 val &= ~mask; 6873 6874 I915_WRITE(CLKGATE_DIS_PSL(pipe), val); 6875 } 6876 6877 static void icl_pipe_mbus_enable(struct intel_crtc *crtc) 6878 { 6879 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6880 enum pipe pipe = crtc->pipe; 6881 u32 val; 6882 6883 val = MBUS_DBOX_A_CREDIT(2); 6884 6885 if (INTEL_GEN(dev_priv) >= 12) { 6886 val |= MBUS_DBOX_BW_CREDIT(2); 6887 val |= MBUS_DBOX_B_CREDIT(12); 6888 } else { 6889 val |= MBUS_DBOX_BW_CREDIT(1); 6890 val |= MBUS_DBOX_B_CREDIT(8); 6891 } 6892 6893 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val); 6894 } 6895 6896 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) 6897 { 6898 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6899 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6900 i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder); 6901 u32 val; 6902 6903 val = I915_READ(reg); 6904 val &= ~HSW_FRAME_START_DELAY_MASK; 6905 val |= HSW_FRAME_START_DELAY(0); 6906 I915_WRITE(reg, val); 6907 } 6908 6909 static void hsw_crtc_enable(struct intel_atomic_state *state, 6910 struct intel_crtc *crtc) 6911 { 6912 const struct intel_crtc_state *new_crtc_state = 6913 intel_atomic_get_new_crtc_state(state, crtc); 6914 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6915 enum pipe pipe = crtc->pipe, hsw_workaround_pipe; 6916 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 6917 bool psl_clkgate_wa; 6918 6919 if (WARN_ON(crtc->active)) 6920 return; 6921 6922 intel_encoders_pre_pll_enable(state, crtc); 6923 6924 if (new_crtc_state->shared_dpll) 6925 intel_enable_shared_dpll(new_crtc_state); 6926 6927 intel_encoders_pre_enable(state, crtc); 6928 6929 if (intel_crtc_has_dp_encoder(new_crtc_state)) 6930 intel_dp_set_m_n(new_crtc_state, M1_N1); 6931 6932 if (!transcoder_is_dsi(cpu_transcoder)) 6933 intel_set_pipe_timings(new_crtc_state); 6934 6935 if (INTEL_GEN(dev_priv) >= 11) 6936 icl_enable_trans_port_sync(new_crtc_state); 6937 6938 intel_set_pipe_src_size(new_crtc_state); 6939 6940 if (cpu_transcoder != TRANSCODER_EDP && 6941 !transcoder_is_dsi(cpu_transcoder)) 6942 I915_WRITE(PIPE_MULT(cpu_transcoder), 6943 new_crtc_state->pixel_multiplier - 1); 6944 6945 if (new_crtc_state->has_pch_encoder) 6946 intel_cpu_transcoder_set_m_n(new_crtc_state, 6947 &new_crtc_state->fdi_m_n, NULL); 6948 6949 if (!transcoder_is_dsi(cpu_transcoder)) { 6950 hsw_set_frame_start_delay(new_crtc_state); 6951 hsw_set_pipeconf(new_crtc_state); 6952 } 6953 6954 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 6955 bdw_set_pipemisc(new_crtc_state); 6956 6957 crtc->active = true; 6958 6959 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */ 6960 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && 6961 new_crtc_state->pch_pfit.enabled; 6962 if (psl_clkgate_wa) 6963 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); 6964 6965 if (INTEL_GEN(dev_priv) >= 9) 6966 skl_pfit_enable(new_crtc_state); 6967 else 6968 ilk_pfit_enable(new_crtc_state); 6969 6970 /* 6971 * On ILK+ LUT must be loaded before the pipe is running but with 6972 * clocks enabled 6973 */ 6974 intel_color_load_luts(new_crtc_state); 6975 intel_color_commit(new_crtc_state); 6976 /* update DSPCNTR to configure gamma/csc for pipe bottom color */ 6977 if (INTEL_GEN(dev_priv) < 9) 6978 intel_disable_primary_plane(new_crtc_state); 6979 6980 if (INTEL_GEN(dev_priv) >= 11) 6981 icl_set_pipe_chicken(crtc); 6982 6983 if (!transcoder_is_dsi(cpu_transcoder)) 6984 intel_ddi_enable_transcoder_func(new_crtc_state); 6985 6986 if (dev_priv->display.initial_watermarks) 6987 dev_priv->display.initial_watermarks(state, crtc); 6988 6989 if (INTEL_GEN(dev_priv) >= 11) 6990 icl_pipe_mbus_enable(crtc); 6991 6992 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 6993 if (!transcoder_is_dsi(cpu_transcoder)) 6994 intel_enable_pipe(new_crtc_state); 6995 6996 if (new_crtc_state->has_pch_encoder) 6997 lpt_pch_enable(state, new_crtc_state); 6998 6999 intel_crtc_vblank_on(new_crtc_state); 7000 7001 intel_encoders_enable(state, crtc); 7002 7003 if (psl_clkgate_wa) { 7004 intel_wait_for_vblank(dev_priv, pipe); 7005 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); 7006 } 7007 7008 /* If we change the relative order between pipe/planes enabling, we need 7009 * to change the workaround. */ 7010 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe; 7011 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 7012 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 7013 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 7014 } 7015 } 7016 7017 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state) 7018 { 7019 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 7020 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7021 enum pipe pipe = crtc->pipe; 7022 7023 /* To avoid upsetting the power well on haswell only disable the pfit if 7024 * it's in use. The hw state code will make sure we get this right. */ 7025 if (old_crtc_state->pch_pfit.enabled) { 7026 I915_WRITE(PF_CTL(pipe), 0); 7027 I915_WRITE(PF_WIN_POS(pipe), 0); 7028 I915_WRITE(PF_WIN_SZ(pipe), 0); 7029 } 7030 } 7031 7032 static void ilk_crtc_disable(struct intel_atomic_state *state, 7033 struct intel_crtc *crtc) 7034 { 7035 const struct intel_crtc_state *old_crtc_state = 7036 intel_atomic_get_old_crtc_state(state, crtc); 7037 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7038 enum pipe pipe = crtc->pipe; 7039 7040 /* 7041 * Sometimes spurious CPU pipe underruns happen when the 7042 * pipe is already disabled, but FDI RX/TX is still enabled. 7043 * Happens at least with VGA+HDMI cloning. Suppress them. 7044 */ 7045 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 7046 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 7047 7048 intel_encoders_disable(state, crtc); 7049 7050 intel_crtc_vblank_off(old_crtc_state); 7051 7052 intel_disable_pipe(old_crtc_state); 7053 7054 ilk_pfit_disable(old_crtc_state); 7055 7056 if (old_crtc_state->has_pch_encoder) 7057 ilk_fdi_disable(crtc); 7058 7059 intel_encoders_post_disable(state, crtc); 7060 7061 if (old_crtc_state->has_pch_encoder) { 7062 ilk_disable_pch_transcoder(dev_priv, pipe); 7063 7064 if (HAS_PCH_CPT(dev_priv)) { 7065 i915_reg_t reg; 7066 u32 temp; 7067 7068 /* disable TRANS_DP_CTL */ 7069 reg = TRANS_DP_CTL(pipe); 7070 temp = I915_READ(reg); 7071 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 7072 TRANS_DP_PORT_SEL_MASK); 7073 temp |= TRANS_DP_PORT_SEL_NONE; 7074 I915_WRITE(reg, temp); 7075 7076 /* disable DPLL_SEL */ 7077 temp = I915_READ(PCH_DPLL_SEL); 7078 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 7079 I915_WRITE(PCH_DPLL_SEL, temp); 7080 } 7081 7082 ilk_fdi_pll_disable(crtc); 7083 } 7084 7085 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 7086 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 7087 } 7088 7089 static void hsw_crtc_disable(struct intel_atomic_state *state, 7090 struct intel_crtc *crtc) 7091 { 7092 /* 7093 * FIXME collapse everything to one hook. 7094 * Need care with mst->ddi interactions. 7095 */ 7096 intel_encoders_disable(state, crtc); 7097 intel_encoders_post_disable(state, crtc); 7098 } 7099 7100 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) 7101 { 7102 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7103 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7104 7105 if (!crtc_state->gmch_pfit.control) 7106 return; 7107 7108 /* 7109 * The panel fitter should only be adjusted whilst the pipe is disabled, 7110 * according to register description and PRM. 7111 */ 7112 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 7113 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); 7114 7115 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios); 7116 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control); 7117 7118 /* Border color in case we don't scale up to the full screen. Black by 7119 * default, change to something else for debugging. */ 7120 I915_WRITE(BCLRPAT(crtc->pipe), 0); 7121 } 7122 7123 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) 7124 { 7125 if (phy == PHY_NONE) 7126 return false; 7127 7128 if (IS_ELKHARTLAKE(dev_priv)) 7129 return phy <= PHY_C; 7130 7131 if (INTEL_GEN(dev_priv) >= 11) 7132 return phy <= PHY_B; 7133 7134 return false; 7135 } 7136 7137 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) 7138 { 7139 if (INTEL_GEN(dev_priv) >= 12) 7140 return phy >= PHY_D && phy <= PHY_I; 7141 7142 if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv)) 7143 return phy >= PHY_C && phy <= PHY_F; 7144 7145 return false; 7146 } 7147 7148 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) 7149 { 7150 if (IS_ELKHARTLAKE(i915) && port == PORT_D) 7151 return PHY_A; 7152 7153 return (enum phy)port; 7154 } 7155 7156 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) 7157 { 7158 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) 7159 return PORT_TC_NONE; 7160 7161 if (INTEL_GEN(dev_priv) >= 12) 7162 return port - PORT_D; 7163 7164 return port - PORT_C; 7165 } 7166 7167 enum intel_display_power_domain intel_port_to_power_domain(enum port port) 7168 { 7169 switch (port) { 7170 case PORT_A: 7171 return POWER_DOMAIN_PORT_DDI_A_LANES; 7172 case PORT_B: 7173 return POWER_DOMAIN_PORT_DDI_B_LANES; 7174 case PORT_C: 7175 return POWER_DOMAIN_PORT_DDI_C_LANES; 7176 case PORT_D: 7177 return POWER_DOMAIN_PORT_DDI_D_LANES; 7178 case PORT_E: 7179 return POWER_DOMAIN_PORT_DDI_E_LANES; 7180 case PORT_F: 7181 return POWER_DOMAIN_PORT_DDI_F_LANES; 7182 case PORT_G: 7183 return POWER_DOMAIN_PORT_DDI_G_LANES; 7184 default: 7185 MISSING_CASE(port); 7186 return POWER_DOMAIN_PORT_OTHER; 7187 } 7188 } 7189 7190 enum intel_display_power_domain 7191 intel_aux_power_domain(struct intel_digital_port *dig_port) 7192 { 7193 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 7194 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 7195 7196 if (intel_phy_is_tc(dev_priv, phy) && 7197 dig_port->tc_mode == TC_PORT_TBT_ALT) { 7198 switch (dig_port->aux_ch) { 7199 case AUX_CH_C: 7200 return POWER_DOMAIN_AUX_C_TBT; 7201 case AUX_CH_D: 7202 return POWER_DOMAIN_AUX_D_TBT; 7203 case AUX_CH_E: 7204 return POWER_DOMAIN_AUX_E_TBT; 7205 case AUX_CH_F: 7206 return POWER_DOMAIN_AUX_F_TBT; 7207 case AUX_CH_G: 7208 return POWER_DOMAIN_AUX_G_TBT; 7209 default: 7210 MISSING_CASE(dig_port->aux_ch); 7211 return POWER_DOMAIN_AUX_C_TBT; 7212 } 7213 } 7214 7215 switch (dig_port->aux_ch) { 7216 case AUX_CH_A: 7217 return POWER_DOMAIN_AUX_A; 7218 case AUX_CH_B: 7219 return POWER_DOMAIN_AUX_B; 7220 case AUX_CH_C: 7221 return POWER_DOMAIN_AUX_C; 7222 case AUX_CH_D: 7223 return POWER_DOMAIN_AUX_D; 7224 case AUX_CH_E: 7225 return POWER_DOMAIN_AUX_E; 7226 case AUX_CH_F: 7227 return POWER_DOMAIN_AUX_F; 7228 case AUX_CH_G: 7229 return POWER_DOMAIN_AUX_G; 7230 default: 7231 MISSING_CASE(dig_port->aux_ch); 7232 return POWER_DOMAIN_AUX_A; 7233 } 7234 } 7235 7236 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) 7237 { 7238 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7239 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7240 struct drm_encoder *encoder; 7241 enum pipe pipe = crtc->pipe; 7242 u64 mask; 7243 enum transcoder transcoder = crtc_state->cpu_transcoder; 7244 7245 if (!crtc_state->hw.active) 7246 return 0; 7247 7248 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe)); 7249 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder)); 7250 if (crtc_state->pch_pfit.enabled || 7251 crtc_state->pch_pfit.force_thru) 7252 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 7253 7254 drm_for_each_encoder_mask(encoder, &dev_priv->drm, 7255 crtc_state->uapi.encoder_mask) { 7256 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 7257 7258 mask |= BIT_ULL(intel_encoder->power_domain); 7259 } 7260 7261 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 7262 mask |= BIT_ULL(POWER_DOMAIN_AUDIO); 7263 7264 if (crtc_state->shared_dpll) 7265 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE); 7266 7267 return mask; 7268 } 7269 7270 static u64 7271 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state) 7272 { 7273 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7274 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7275 enum intel_display_power_domain domain; 7276 u64 domains, new_domains, old_domains; 7277 7278 old_domains = crtc->enabled_power_domains; 7279 crtc->enabled_power_domains = new_domains = 7280 get_crtc_power_domains(crtc_state); 7281 7282 domains = new_domains & ~old_domains; 7283 7284 for_each_power_domain(domain, domains) 7285 intel_display_power_get(dev_priv, domain); 7286 7287 return old_domains & ~new_domains; 7288 } 7289 7290 static void modeset_put_power_domains(struct drm_i915_private *dev_priv, 7291 u64 domains) 7292 { 7293 enum intel_display_power_domain domain; 7294 7295 for_each_power_domain(domain, domains) 7296 intel_display_power_put_unchecked(dev_priv, domain); 7297 } 7298 7299 static void valleyview_crtc_enable(struct intel_atomic_state *state, 7300 struct intel_crtc *crtc) 7301 { 7302 const struct intel_crtc_state *new_crtc_state = 7303 intel_atomic_get_new_crtc_state(state, crtc); 7304 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7305 enum pipe pipe = crtc->pipe; 7306 7307 if (WARN_ON(crtc->active)) 7308 return; 7309 7310 if (intel_crtc_has_dp_encoder(new_crtc_state)) 7311 intel_dp_set_m_n(new_crtc_state, M1_N1); 7312 7313 intel_set_pipe_timings(new_crtc_state); 7314 intel_set_pipe_src_size(new_crtc_state); 7315 7316 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 7317 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 7318 I915_WRITE(CHV_CANVAS(pipe), 0); 7319 } 7320 7321 i9xx_set_pipeconf(new_crtc_state); 7322 7323 crtc->active = true; 7324 7325 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 7326 7327 intel_encoders_pre_pll_enable(state, crtc); 7328 7329 if (IS_CHERRYVIEW(dev_priv)) { 7330 chv_prepare_pll(crtc, new_crtc_state); 7331 chv_enable_pll(crtc, new_crtc_state); 7332 } else { 7333 vlv_prepare_pll(crtc, new_crtc_state); 7334 vlv_enable_pll(crtc, new_crtc_state); 7335 } 7336 7337 intel_encoders_pre_enable(state, crtc); 7338 7339 i9xx_pfit_enable(new_crtc_state); 7340 7341 intel_color_load_luts(new_crtc_state); 7342 intel_color_commit(new_crtc_state); 7343 /* update DSPCNTR to configure gamma for pipe bottom color */ 7344 intel_disable_primary_plane(new_crtc_state); 7345 7346 dev_priv->display.initial_watermarks(state, crtc); 7347 intel_enable_pipe(new_crtc_state); 7348 7349 intel_crtc_vblank_on(new_crtc_state); 7350 7351 intel_encoders_enable(state, crtc); 7352 } 7353 7354 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state) 7355 { 7356 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7357 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7358 7359 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0); 7360 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1); 7361 } 7362 7363 static void i9xx_crtc_enable(struct intel_atomic_state *state, 7364 struct intel_crtc *crtc) 7365 { 7366 const struct intel_crtc_state *new_crtc_state = 7367 intel_atomic_get_new_crtc_state(state, crtc); 7368 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7369 enum pipe pipe = crtc->pipe; 7370 7371 if (WARN_ON(crtc->active)) 7372 return; 7373 7374 i9xx_set_pll_dividers(new_crtc_state); 7375 7376 if (intel_crtc_has_dp_encoder(new_crtc_state)) 7377 intel_dp_set_m_n(new_crtc_state, M1_N1); 7378 7379 intel_set_pipe_timings(new_crtc_state); 7380 intel_set_pipe_src_size(new_crtc_state); 7381 7382 i9xx_set_pipeconf(new_crtc_state); 7383 7384 crtc->active = true; 7385 7386 if (!IS_GEN(dev_priv, 2)) 7387 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 7388 7389 intel_encoders_pre_enable(state, crtc); 7390 7391 i9xx_enable_pll(crtc, new_crtc_state); 7392 7393 i9xx_pfit_enable(new_crtc_state); 7394 7395 intel_color_load_luts(new_crtc_state); 7396 intel_color_commit(new_crtc_state); 7397 /* update DSPCNTR to configure gamma for pipe bottom color */ 7398 intel_disable_primary_plane(new_crtc_state); 7399 7400 if (dev_priv->display.initial_watermarks) 7401 dev_priv->display.initial_watermarks(state, crtc); 7402 else 7403 intel_update_watermarks(crtc); 7404 intel_enable_pipe(new_crtc_state); 7405 7406 intel_crtc_vblank_on(new_crtc_state); 7407 7408 intel_encoders_enable(state, crtc); 7409 } 7410 7411 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) 7412 { 7413 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 7414 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7415 7416 if (!old_crtc_state->gmch_pfit.control) 7417 return; 7418 7419 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); 7420 7421 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n", 7422 I915_READ(PFIT_CONTROL)); 7423 I915_WRITE(PFIT_CONTROL, 0); 7424 } 7425 7426 static void i9xx_crtc_disable(struct intel_atomic_state *state, 7427 struct intel_crtc *crtc) 7428 { 7429 struct intel_crtc_state *old_crtc_state = 7430 intel_atomic_get_old_crtc_state(state, crtc); 7431 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7432 enum pipe pipe = crtc->pipe; 7433 7434 /* 7435 * On gen2 planes are double buffered but the pipe isn't, so we must 7436 * wait for planes to fully turn off before disabling the pipe. 7437 */ 7438 if (IS_GEN(dev_priv, 2)) 7439 intel_wait_for_vblank(dev_priv, pipe); 7440 7441 intel_encoders_disable(state, crtc); 7442 7443 intel_crtc_vblank_off(old_crtc_state); 7444 7445 intel_disable_pipe(old_crtc_state); 7446 7447 i9xx_pfit_disable(old_crtc_state); 7448 7449 intel_encoders_post_disable(state, crtc); 7450 7451 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 7452 if (IS_CHERRYVIEW(dev_priv)) 7453 chv_disable_pll(dev_priv, pipe); 7454 else if (IS_VALLEYVIEW(dev_priv)) 7455 vlv_disable_pll(dev_priv, pipe); 7456 else 7457 i9xx_disable_pll(old_crtc_state); 7458 } 7459 7460 intel_encoders_post_pll_disable(state, crtc); 7461 7462 if (!IS_GEN(dev_priv, 2)) 7463 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 7464 7465 if (!dev_priv->display.initial_watermarks) 7466 intel_update_watermarks(crtc); 7467 7468 /* clock the pipe down to 640x480@60 to potentially save power */ 7469 if (IS_I830(dev_priv)) 7470 i830_enable_pipe(dev_priv, pipe); 7471 } 7472 7473 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, 7474 struct drm_modeset_acquire_ctx *ctx) 7475 { 7476 struct intel_encoder *encoder; 7477 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7478 struct intel_bw_state *bw_state = 7479 to_intel_bw_state(dev_priv->bw_obj.state); 7480 struct intel_crtc_state *crtc_state = 7481 to_intel_crtc_state(crtc->base.state); 7482 enum intel_display_power_domain domain; 7483 struct intel_plane *plane; 7484 struct drm_atomic_state *state; 7485 struct intel_crtc_state *temp_crtc_state; 7486 enum pipe pipe = crtc->pipe; 7487 u64 domains; 7488 int ret; 7489 7490 if (!crtc_state->hw.active) 7491 return; 7492 7493 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 7494 const struct intel_plane_state *plane_state = 7495 to_intel_plane_state(plane->base.state); 7496 7497 if (plane_state->uapi.visible) 7498 intel_plane_disable_noatomic(crtc, plane); 7499 } 7500 7501 state = drm_atomic_state_alloc(&dev_priv->drm); 7502 if (!state) { 7503 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory", 7504 crtc->base.base.id, crtc->base.name); 7505 return; 7506 } 7507 7508 state->acquire_ctx = ctx; 7509 7510 /* Everything's already locked, -EDEADLK can't happen. */ 7511 temp_crtc_state = intel_atomic_get_crtc_state(state, crtc); 7512 ret = drm_atomic_add_affected_connectors(state, &crtc->base); 7513 7514 WARN_ON(IS_ERR(temp_crtc_state) || ret); 7515 7516 dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc); 7517 7518 drm_atomic_state_put(state); 7519 7520 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", 7521 crtc->base.base.id, crtc->base.name); 7522 7523 crtc->active = false; 7524 crtc->base.enabled = false; 7525 7526 WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0); 7527 crtc_state->uapi.active = false; 7528 crtc_state->uapi.connector_mask = 0; 7529 crtc_state->uapi.encoder_mask = 0; 7530 intel_crtc_free_hw_state(crtc_state); 7531 memset(&crtc_state->hw, 0, sizeof(crtc_state->hw)); 7532 7533 for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder) 7534 encoder->base.crtc = NULL; 7535 7536 intel_fbc_disable(crtc); 7537 intel_update_watermarks(crtc); 7538 intel_disable_shared_dpll(crtc_state); 7539 7540 domains = crtc->enabled_power_domains; 7541 for_each_power_domain(domain, domains) 7542 intel_display_power_put_unchecked(dev_priv, domain); 7543 crtc->enabled_power_domains = 0; 7544 7545 dev_priv->active_pipes &= ~BIT(pipe); 7546 dev_priv->min_cdclk[pipe] = 0; 7547 dev_priv->min_voltage_level[pipe] = 0; 7548 7549 bw_state->data_rate[pipe] = 0; 7550 bw_state->num_active_planes[pipe] = 0; 7551 } 7552 7553 /* 7554 * turn all crtc's off, but do not adjust state 7555 * This has to be paired with a call to intel_modeset_setup_hw_state. 7556 */ 7557 int intel_display_suspend(struct drm_device *dev) 7558 { 7559 struct drm_i915_private *dev_priv = to_i915(dev); 7560 struct drm_atomic_state *state; 7561 int ret; 7562 7563 state = drm_atomic_helper_suspend(dev); 7564 ret = PTR_ERR_OR_ZERO(state); 7565 if (ret) 7566 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 7567 else 7568 dev_priv->modeset_restore_state = state; 7569 return ret; 7570 } 7571 7572 void intel_encoder_destroy(struct drm_encoder *encoder) 7573 { 7574 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 7575 7576 drm_encoder_cleanup(encoder); 7577 kfree(intel_encoder); 7578 } 7579 7580 /* Cross check the actual hw state with our own modeset state tracking (and it's 7581 * internal consistency). */ 7582 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, 7583 struct drm_connector_state *conn_state) 7584 { 7585 struct intel_connector *connector = to_intel_connector(conn_state->connector); 7586 7587 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 7588 connector->base.base.id, 7589 connector->base.name); 7590 7591 if (connector->get_hw_state(connector)) { 7592 struct intel_encoder *encoder = connector->encoder; 7593 7594 I915_STATE_WARN(!crtc_state, 7595 "connector enabled without attached crtc\n"); 7596 7597 if (!crtc_state) 7598 return; 7599 7600 I915_STATE_WARN(!crtc_state->hw.active, 7601 "connector is active, but attached crtc isn't\n"); 7602 7603 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 7604 return; 7605 7606 I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 7607 "atomic encoder doesn't match attached encoder\n"); 7608 7609 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 7610 "attached encoder crtc differs from connector crtc\n"); 7611 } else { 7612 I915_STATE_WARN(crtc_state && crtc_state->hw.active, 7613 "attached crtc is active, but connector isn't\n"); 7614 I915_STATE_WARN(!crtc_state && conn_state->best_encoder, 7615 "best encoder set without crtc!\n"); 7616 } 7617 } 7618 7619 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 7620 { 7621 if (crtc_state->hw.enable && crtc_state->has_pch_encoder) 7622 return crtc_state->fdi_lanes; 7623 7624 return 0; 7625 } 7626 7627 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, 7628 struct intel_crtc_state *pipe_config) 7629 { 7630 struct drm_i915_private *dev_priv = to_i915(dev); 7631 struct drm_atomic_state *state = pipe_config->uapi.state; 7632 struct intel_crtc *other_crtc; 7633 struct intel_crtc_state *other_crtc_state; 7634 7635 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", 7636 pipe_name(pipe), pipe_config->fdi_lanes); 7637 if (pipe_config->fdi_lanes > 4) { 7638 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", 7639 pipe_name(pipe), pipe_config->fdi_lanes); 7640 return -EINVAL; 7641 } 7642 7643 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 7644 if (pipe_config->fdi_lanes > 2) { 7645 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 7646 pipe_config->fdi_lanes); 7647 return -EINVAL; 7648 } else { 7649 return 0; 7650 } 7651 } 7652 7653 if (INTEL_NUM_PIPES(dev_priv) == 2) 7654 return 0; 7655 7656 /* Ivybridge 3 pipe is really complicated */ 7657 switch (pipe) { 7658 case PIPE_A: 7659 return 0; 7660 case PIPE_B: 7661 if (pipe_config->fdi_lanes <= 2) 7662 return 0; 7663 7664 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C); 7665 other_crtc_state = 7666 intel_atomic_get_crtc_state(state, other_crtc); 7667 if (IS_ERR(other_crtc_state)) 7668 return PTR_ERR(other_crtc_state); 7669 7670 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 7671 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 7672 pipe_name(pipe), pipe_config->fdi_lanes); 7673 return -EINVAL; 7674 } 7675 return 0; 7676 case PIPE_C: 7677 if (pipe_config->fdi_lanes > 2) { 7678 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", 7679 pipe_name(pipe), pipe_config->fdi_lanes); 7680 return -EINVAL; 7681 } 7682 7683 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B); 7684 other_crtc_state = 7685 intel_atomic_get_crtc_state(state, other_crtc); 7686 if (IS_ERR(other_crtc_state)) 7687 return PTR_ERR(other_crtc_state); 7688 7689 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 7690 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 7691 return -EINVAL; 7692 } 7693 return 0; 7694 default: 7695 BUG(); 7696 } 7697 } 7698 7699 #define RETRY 1 7700 static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc, 7701 struct intel_crtc_state *pipe_config) 7702 { 7703 struct drm_device *dev = intel_crtc->base.dev; 7704 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 7705 int lane, link_bw, fdi_dotclock, ret; 7706 bool needs_recompute = false; 7707 7708 retry: 7709 /* FDI is a binary signal running at ~2.7GHz, encoding 7710 * each output octet as 10 bits. The actual frequency 7711 * is stored as a divider into a 100MHz clock, and the 7712 * mode pixel clock is stored in units of 1KHz. 7713 * Hence the bw of each lane in terms of the mode signal 7714 * is: 7715 */ 7716 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config); 7717 7718 fdi_dotclock = adjusted_mode->crtc_clock; 7719 7720 lane = ilk_get_lanes_required(fdi_dotclock, link_bw, 7721 pipe_config->pipe_bpp); 7722 7723 pipe_config->fdi_lanes = lane; 7724 7725 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 7726 link_bw, &pipe_config->fdi_m_n, false, false); 7727 7728 ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 7729 if (ret == -EDEADLK) 7730 return ret; 7731 7732 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 7733 pipe_config->pipe_bpp -= 2*3; 7734 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 7735 pipe_config->pipe_bpp); 7736 needs_recompute = true; 7737 pipe_config->bw_constrained = true; 7738 7739 goto retry; 7740 } 7741 7742 if (needs_recompute) 7743 return RETRY; 7744 7745 return ret; 7746 } 7747 7748 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) 7749 { 7750 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7751 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7752 7753 /* IPS only exists on ULT machines and is tied to pipe A. */ 7754 if (!hsw_crtc_supports_ips(crtc)) 7755 return false; 7756 7757 if (!i915_modparams.enable_ips) 7758 return false; 7759 7760 if (crtc_state->pipe_bpp > 24) 7761 return false; 7762 7763 /* 7764 * We compare against max which means we must take 7765 * the increased cdclk requirement into account when 7766 * calculating the new cdclk. 7767 * 7768 * Should measure whether using a lower cdclk w/o IPS 7769 */ 7770 if (IS_BROADWELL(dev_priv) && 7771 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100) 7772 return false; 7773 7774 return true; 7775 } 7776 7777 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state) 7778 { 7779 struct drm_i915_private *dev_priv = 7780 to_i915(crtc_state->uapi.crtc->dev); 7781 struct intel_atomic_state *intel_state = 7782 to_intel_atomic_state(crtc_state->uapi.state); 7783 7784 if (!hsw_crtc_state_ips_capable(crtc_state)) 7785 return false; 7786 7787 /* 7788 * When IPS gets enabled, the pipe CRC changes. Since IPS gets 7789 * enabled and disabled dynamically based on package C states, 7790 * user space can't make reliable use of the CRCs, so let's just 7791 * completely disable it. 7792 */ 7793 if (crtc_state->crc_enabled) 7794 return false; 7795 7796 /* IPS should be fine as long as at least one plane is enabled. */ 7797 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))) 7798 return false; 7799 7800 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 7801 if (IS_BROADWELL(dev_priv) && 7802 crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100) 7803 return false; 7804 7805 return true; 7806 } 7807 7808 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 7809 { 7810 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7811 7812 /* GDG double wide on either pipe, otherwise pipe A only */ 7813 return INTEL_GEN(dev_priv) < 4 && 7814 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 7815 } 7816 7817 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) 7818 { 7819 u32 pixel_rate; 7820 7821 pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock; 7822 7823 /* 7824 * We only use IF-ID interlacing. If we ever use 7825 * PF-ID we'll need to adjust the pixel_rate here. 7826 */ 7827 7828 if (pipe_config->pch_pfit.enabled) { 7829 u64 pipe_w, pipe_h, pfit_w, pfit_h; 7830 u32 pfit_size = pipe_config->pch_pfit.size; 7831 7832 pipe_w = pipe_config->pipe_src_w; 7833 pipe_h = pipe_config->pipe_src_h; 7834 7835 pfit_w = (pfit_size >> 16) & 0xFFFF; 7836 pfit_h = pfit_size & 0xFFFF; 7837 if (pipe_w < pfit_w) 7838 pipe_w = pfit_w; 7839 if (pipe_h < pfit_h) 7840 pipe_h = pfit_h; 7841 7842 if (WARN_ON(!pfit_w || !pfit_h)) 7843 return pixel_rate; 7844 7845 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h), 7846 pfit_w * pfit_h); 7847 } 7848 7849 return pixel_rate; 7850 } 7851 7852 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 7853 { 7854 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 7855 7856 if (HAS_GMCH(dev_priv)) 7857 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 7858 crtc_state->pixel_rate = 7859 crtc_state->hw.adjusted_mode.crtc_clock; 7860 else 7861 crtc_state->pixel_rate = 7862 ilk_pipe_pixel_rate(crtc_state); 7863 } 7864 7865 static int intel_crtc_compute_config(struct intel_crtc *crtc, 7866 struct intel_crtc_state *pipe_config) 7867 { 7868 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7869 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 7870 int clock_limit = dev_priv->max_dotclk_freq; 7871 7872 if (INTEL_GEN(dev_priv) < 4) { 7873 clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 7874 7875 /* 7876 * Enable double wide mode when the dot clock 7877 * is > 90% of the (display) core speed. 7878 */ 7879 if (intel_crtc_supports_double_wide(crtc) && 7880 adjusted_mode->crtc_clock > clock_limit) { 7881 clock_limit = dev_priv->max_dotclk_freq; 7882 pipe_config->double_wide = true; 7883 } 7884 } 7885 7886 if (adjusted_mode->crtc_clock > clock_limit) { 7887 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 7888 adjusted_mode->crtc_clock, clock_limit, 7889 yesno(pipe_config->double_wide)); 7890 return -EINVAL; 7891 } 7892 7893 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 7894 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) && 7895 pipe_config->hw.ctm) { 7896 /* 7897 * There is only one pipe CSC unit per pipe, and we need that 7898 * for output conversion from RGB->YCBCR. So if CTM is already 7899 * applied we can't support YCBCR420 output. 7900 */ 7901 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n"); 7902 return -EINVAL; 7903 } 7904 7905 /* 7906 * Pipe horizontal size must be even in: 7907 * - DVO ganged mode 7908 * - LVDS dual channel mode 7909 * - Double wide pipe 7910 */ 7911 if (pipe_config->pipe_src_w & 1) { 7912 if (pipe_config->double_wide) { 7913 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n"); 7914 return -EINVAL; 7915 } 7916 7917 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && 7918 intel_is_dual_link_lvds(dev_priv)) { 7919 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n"); 7920 return -EINVAL; 7921 } 7922 } 7923 7924 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 7925 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 7926 */ 7927 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) && 7928 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay) 7929 return -EINVAL; 7930 7931 intel_crtc_compute_pixel_rate(pipe_config); 7932 7933 if (pipe_config->has_pch_encoder) 7934 return ilk_fdi_compute_config(crtc, pipe_config); 7935 7936 return 0; 7937 } 7938 7939 static void 7940 intel_reduce_m_n_ratio(u32 *num, u32 *den) 7941 { 7942 while (*num > DATA_LINK_M_N_MASK || 7943 *den > DATA_LINK_M_N_MASK) { 7944 *num >>= 1; 7945 *den >>= 1; 7946 } 7947 } 7948 7949 static void compute_m_n(unsigned int m, unsigned int n, 7950 u32 *ret_m, u32 *ret_n, 7951 bool constant_n) 7952 { 7953 /* 7954 * Several DP dongles in particular seem to be fussy about 7955 * too large link M/N values. Give N value as 0x8000 that 7956 * should be acceptable by specific devices. 0x8000 is the 7957 * specified fixed N value for asynchronous clock mode, 7958 * which the devices expect also in synchronous clock mode. 7959 */ 7960 if (constant_n) 7961 *ret_n = 0x8000; 7962 else 7963 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 7964 7965 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 7966 intel_reduce_m_n_ratio(ret_m, ret_n); 7967 } 7968 7969 void 7970 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, 7971 int pixel_clock, int link_clock, 7972 struct intel_link_m_n *m_n, 7973 bool constant_n, bool fec_enable) 7974 { 7975 u32 data_clock = bits_per_pixel * pixel_clock; 7976 7977 if (fec_enable) 7978 data_clock = intel_dp_mode_to_fec_clock(data_clock); 7979 7980 m_n->tu = 64; 7981 compute_m_n(data_clock, 7982 link_clock * nlanes * 8, 7983 &m_n->gmch_m, &m_n->gmch_n, 7984 constant_n); 7985 7986 compute_m_n(pixel_clock, link_clock, 7987 &m_n->link_m, &m_n->link_n, 7988 constant_n); 7989 } 7990 7991 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) 7992 { 7993 /* 7994 * There may be no VBT; and if the BIOS enabled SSC we can 7995 * just keep using it to avoid unnecessary flicker. Whereas if the 7996 * BIOS isn't using it, don't assume it will work even if the VBT 7997 * indicates as much. 7998 */ 7999 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 8000 bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) & 8001 DREF_SSC1_ENABLE; 8002 8003 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 8004 DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n", 8005 enableddisabled(bios_lvds_use_ssc), 8006 enableddisabled(dev_priv->vbt.lvds_use_ssc)); 8007 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 8008 } 8009 } 8010 } 8011 8012 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 8013 { 8014 if (i915_modparams.panel_use_ssc >= 0) 8015 return i915_modparams.panel_use_ssc != 0; 8016 return dev_priv->vbt.lvds_use_ssc 8017 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 8018 } 8019 8020 static u32 pnv_dpll_compute_fp(struct dpll *dpll) 8021 { 8022 return (1 << dpll->n) << 16 | dpll->m2; 8023 } 8024 8025 static u32 i9xx_dpll_compute_fp(struct dpll *dpll) 8026 { 8027 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 8028 } 8029 8030 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 8031 struct intel_crtc_state *crtc_state, 8032 struct dpll *reduced_clock) 8033 { 8034 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8035 u32 fp, fp2 = 0; 8036 8037 if (IS_PINEVIEW(dev_priv)) { 8038 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 8039 if (reduced_clock) 8040 fp2 = pnv_dpll_compute_fp(reduced_clock); 8041 } else { 8042 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 8043 if (reduced_clock) 8044 fp2 = i9xx_dpll_compute_fp(reduced_clock); 8045 } 8046 8047 crtc_state->dpll_hw_state.fp0 = fp; 8048 8049 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8050 reduced_clock) { 8051 crtc_state->dpll_hw_state.fp1 = fp2; 8052 } else { 8053 crtc_state->dpll_hw_state.fp1 = fp; 8054 } 8055 } 8056 8057 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe 8058 pipe) 8059 { 8060 u32 reg_val; 8061 8062 /* 8063 * PLLB opamp always calibrates to max value of 0x3f, force enable it 8064 * and set it to a reasonable value instead. 8065 */ 8066 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 8067 reg_val &= 0xffffff00; 8068 reg_val |= 0x00000030; 8069 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 8070 8071 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 8072 reg_val &= 0x00ffffff; 8073 reg_val |= 0x8c000000; 8074 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 8075 8076 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 8077 reg_val &= 0xffffff00; 8078 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 8079 8080 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 8081 reg_val &= 0x00ffffff; 8082 reg_val |= 0xb0000000; 8083 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 8084 } 8085 8086 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 8087 const struct intel_link_m_n *m_n) 8088 { 8089 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8090 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8091 enum pipe pipe = crtc->pipe; 8092 8093 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 8094 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 8095 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); 8096 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 8097 } 8098 8099 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, 8100 enum transcoder transcoder) 8101 { 8102 if (IS_HASWELL(dev_priv)) 8103 return transcoder == TRANSCODER_EDP; 8104 8105 /* 8106 * Strictly speaking some registers are available before 8107 * gen7, but we only support DRRS on gen7+ 8108 */ 8109 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv); 8110 } 8111 8112 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 8113 const struct intel_link_m_n *m_n, 8114 const struct intel_link_m_n *m2_n2) 8115 { 8116 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8117 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8118 enum pipe pipe = crtc->pipe; 8119 enum transcoder transcoder = crtc_state->cpu_transcoder; 8120 8121 if (INTEL_GEN(dev_priv) >= 5) { 8122 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 8123 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 8124 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 8125 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 8126 /* 8127 * M2_N2 registers are set only if DRRS is supported 8128 * (to make sure the registers are not unnecessarily accessed). 8129 */ 8130 if (m2_n2 && crtc_state->has_drrs && 8131 transcoder_has_m2_n2(dev_priv, transcoder)) { 8132 I915_WRITE(PIPE_DATA_M2(transcoder), 8133 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 8134 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 8135 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); 8136 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); 8137 } 8138 } else { 8139 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 8140 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 8141 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); 8142 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); 8143 } 8144 } 8145 8146 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n) 8147 { 8148 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 8149 8150 if (m_n == M1_N1) { 8151 dp_m_n = &crtc_state->dp_m_n; 8152 dp_m2_n2 = &crtc_state->dp_m2_n2; 8153 } else if (m_n == M2_N2) { 8154 8155 /* 8156 * M2_N2 registers are not supported. Hence m2_n2 divider value 8157 * needs to be programmed into M1_N1. 8158 */ 8159 dp_m_n = &crtc_state->dp_m2_n2; 8160 } else { 8161 DRM_ERROR("Unsupported divider value\n"); 8162 return; 8163 } 8164 8165 if (crtc_state->has_pch_encoder) 8166 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n); 8167 else 8168 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2); 8169 } 8170 8171 static void vlv_compute_dpll(struct intel_crtc *crtc, 8172 struct intel_crtc_state *pipe_config) 8173 { 8174 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | 8175 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 8176 if (crtc->pipe != PIPE_A) 8177 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 8178 8179 /* DPLL not used with DSI, but still need the rest set up */ 8180 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 8181 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | 8182 DPLL_EXT_BUFFER_ENABLE_VLV; 8183 8184 pipe_config->dpll_hw_state.dpll_md = 8185 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 8186 } 8187 8188 static void chv_compute_dpll(struct intel_crtc *crtc, 8189 struct intel_crtc_state *pipe_config) 8190 { 8191 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | 8192 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 8193 if (crtc->pipe != PIPE_A) 8194 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 8195 8196 /* DPLL not used with DSI, but still need the rest set up */ 8197 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 8198 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; 8199 8200 pipe_config->dpll_hw_state.dpll_md = 8201 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 8202 } 8203 8204 static void vlv_prepare_pll(struct intel_crtc *crtc, 8205 const struct intel_crtc_state *pipe_config) 8206 { 8207 struct drm_device *dev = crtc->base.dev; 8208 struct drm_i915_private *dev_priv = to_i915(dev); 8209 enum pipe pipe = crtc->pipe; 8210 u32 mdiv; 8211 u32 bestn, bestm1, bestm2, bestp1, bestp2; 8212 u32 coreclk, reg_val; 8213 8214 /* Enable Refclk */ 8215 I915_WRITE(DPLL(pipe), 8216 pipe_config->dpll_hw_state.dpll & 8217 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); 8218 8219 /* No need to actually set up the DPLL with DSI */ 8220 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8221 return; 8222 8223 vlv_dpio_get(dev_priv); 8224 8225 bestn = pipe_config->dpll.n; 8226 bestm1 = pipe_config->dpll.m1; 8227 bestm2 = pipe_config->dpll.m2; 8228 bestp1 = pipe_config->dpll.p1; 8229 bestp2 = pipe_config->dpll.p2; 8230 8231 /* See eDP HDMI DPIO driver vbios notes doc */ 8232 8233 /* PLL B needs special handling */ 8234 if (pipe == PIPE_B) 8235 vlv_pllb_recal_opamp(dev_priv, pipe); 8236 8237 /* Set up Tx target for periodic Rcomp update */ 8238 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 8239 8240 /* Disable target IRef on PLL */ 8241 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 8242 reg_val &= 0x00ffffff; 8243 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 8244 8245 /* Disable fast lock */ 8246 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 8247 8248 /* Set idtafcrecal before PLL is enabled */ 8249 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 8250 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 8251 mdiv |= ((bestn << DPIO_N_SHIFT)); 8252 mdiv |= (1 << DPIO_K_SHIFT); 8253 8254 /* 8255 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 8256 * but we don't support that). 8257 * Note: don't use the DAC post divider as it seems unstable. 8258 */ 8259 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 8260 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 8261 8262 mdiv |= DPIO_ENABLE_CALIBRATION; 8263 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 8264 8265 /* Set HBR and RBR LPF coefficients */ 8266 if (pipe_config->port_clock == 162000 || 8267 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) || 8268 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) 8269 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 8270 0x009f0003); 8271 else 8272 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 8273 0x00d0000f); 8274 8275 if (intel_crtc_has_dp_encoder(pipe_config)) { 8276 /* Use SSC source */ 8277 if (pipe == PIPE_A) 8278 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 8279 0x0df40000); 8280 else 8281 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 8282 0x0df70000); 8283 } else { /* HDMI or VGA */ 8284 /* Use bend source */ 8285 if (pipe == PIPE_A) 8286 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 8287 0x0df70000); 8288 else 8289 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 8290 0x0df40000); 8291 } 8292 8293 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 8294 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 8295 if (intel_crtc_has_dp_encoder(pipe_config)) 8296 coreclk |= 0x01000000; 8297 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 8298 8299 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 8300 8301 vlv_dpio_put(dev_priv); 8302 } 8303 8304 static void chv_prepare_pll(struct intel_crtc *crtc, 8305 const struct intel_crtc_state *pipe_config) 8306 { 8307 struct drm_device *dev = crtc->base.dev; 8308 struct drm_i915_private *dev_priv = to_i915(dev); 8309 enum pipe pipe = crtc->pipe; 8310 enum dpio_channel port = vlv_pipe_to_channel(pipe); 8311 u32 loopfilter, tribuf_calcntr; 8312 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 8313 u32 dpio_val; 8314 int vco; 8315 8316 /* Enable Refclk and SSC */ 8317 I915_WRITE(DPLL(pipe), 8318 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 8319 8320 /* No need to actually set up the DPLL with DSI */ 8321 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8322 return; 8323 8324 bestn = pipe_config->dpll.n; 8325 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 8326 bestm1 = pipe_config->dpll.m1; 8327 bestm2 = pipe_config->dpll.m2 >> 22; 8328 bestp1 = pipe_config->dpll.p1; 8329 bestp2 = pipe_config->dpll.p2; 8330 vco = pipe_config->dpll.vco; 8331 dpio_val = 0; 8332 loopfilter = 0; 8333 8334 vlv_dpio_get(dev_priv); 8335 8336 /* p1 and p2 divider */ 8337 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 8338 5 << DPIO_CHV_S1_DIV_SHIFT | 8339 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 8340 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 8341 1 << DPIO_CHV_K_DIV_SHIFT); 8342 8343 /* Feedback post-divider - m2 */ 8344 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 8345 8346 /* Feedback refclk divider - n and m1 */ 8347 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 8348 DPIO_CHV_M1_DIV_BY_2 | 8349 1 << DPIO_CHV_N_DIV_SHIFT); 8350 8351 /* M2 fraction division */ 8352 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 8353 8354 /* M2 fraction division enable */ 8355 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 8356 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 8357 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 8358 if (bestm2_frac) 8359 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 8360 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 8361 8362 /* Program digital lock detect threshold */ 8363 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 8364 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 8365 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 8366 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 8367 if (!bestm2_frac) 8368 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 8369 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 8370 8371 /* Loop filter */ 8372 if (vco == 5400000) { 8373 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 8374 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 8375 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 8376 tribuf_calcntr = 0x9; 8377 } else if (vco <= 6200000) { 8378 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 8379 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 8380 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 8381 tribuf_calcntr = 0x9; 8382 } else if (vco <= 6480000) { 8383 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 8384 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 8385 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 8386 tribuf_calcntr = 0x8; 8387 } else { 8388 /* Not supported. Apply the same limits as in the max case */ 8389 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 8390 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 8391 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 8392 tribuf_calcntr = 0; 8393 } 8394 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 8395 8396 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 8397 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 8398 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 8399 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 8400 8401 /* AFC Recal */ 8402 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 8403 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 8404 DPIO_AFC_RECAL); 8405 8406 vlv_dpio_put(dev_priv); 8407 } 8408 8409 /** 8410 * vlv_force_pll_on - forcibly enable just the PLL 8411 * @dev_priv: i915 private structure 8412 * @pipe: pipe PLL to enable 8413 * @dpll: PLL configuration 8414 * 8415 * Enable the PLL for @pipe using the supplied @dpll config. To be used 8416 * in cases where we need the PLL enabled even when @pipe is not going to 8417 * be enabled. 8418 */ 8419 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, 8420 const struct dpll *dpll) 8421 { 8422 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 8423 struct intel_crtc_state *pipe_config; 8424 8425 pipe_config = intel_crtc_state_alloc(crtc); 8426 if (!pipe_config) 8427 return -ENOMEM; 8428 8429 pipe_config->cpu_transcoder = (enum transcoder)pipe; 8430 pipe_config->pixel_multiplier = 1; 8431 pipe_config->dpll = *dpll; 8432 8433 if (IS_CHERRYVIEW(dev_priv)) { 8434 chv_compute_dpll(crtc, pipe_config); 8435 chv_prepare_pll(crtc, pipe_config); 8436 chv_enable_pll(crtc, pipe_config); 8437 } else { 8438 vlv_compute_dpll(crtc, pipe_config); 8439 vlv_prepare_pll(crtc, pipe_config); 8440 vlv_enable_pll(crtc, pipe_config); 8441 } 8442 8443 kfree(pipe_config); 8444 8445 return 0; 8446 } 8447 8448 /** 8449 * vlv_force_pll_off - forcibly disable just the PLL 8450 * @dev_priv: i915 private structure 8451 * @pipe: pipe PLL to disable 8452 * 8453 * Disable the PLL for @pipe. To be used in cases where we need 8454 * the PLL enabled even when @pipe is not going to be enabled. 8455 */ 8456 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) 8457 { 8458 if (IS_CHERRYVIEW(dev_priv)) 8459 chv_disable_pll(dev_priv, pipe); 8460 else 8461 vlv_disable_pll(dev_priv, pipe); 8462 } 8463 8464 static void i9xx_compute_dpll(struct intel_crtc *crtc, 8465 struct intel_crtc_state *crtc_state, 8466 struct dpll *reduced_clock) 8467 { 8468 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8469 u32 dpll; 8470 struct dpll *clock = &crtc_state->dpll; 8471 8472 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 8473 8474 dpll = DPLL_VGA_MODE_DIS; 8475 8476 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 8477 dpll |= DPLLB_MODE_LVDS; 8478 else 8479 dpll |= DPLLB_MODE_DAC_SERIAL; 8480 8481 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 8482 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 8483 dpll |= (crtc_state->pixel_multiplier - 1) 8484 << SDVO_MULTIPLIER_SHIFT_HIRES; 8485 } 8486 8487 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 8488 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 8489 dpll |= DPLL_SDVO_HIGH_SPEED; 8490 8491 if (intel_crtc_has_dp_encoder(crtc_state)) 8492 dpll |= DPLL_SDVO_HIGH_SPEED; 8493 8494 /* compute bitmask from p1 value */ 8495 if (IS_PINEVIEW(dev_priv)) 8496 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 8497 else { 8498 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8499 if (IS_G4X(dev_priv) && reduced_clock) 8500 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 8501 } 8502 switch (clock->p2) { 8503 case 5: 8504 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 8505 break; 8506 case 7: 8507 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 8508 break; 8509 case 10: 8510 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 8511 break; 8512 case 14: 8513 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 8514 break; 8515 } 8516 if (INTEL_GEN(dev_priv) >= 4) 8517 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 8518 8519 if (crtc_state->sdvo_tv_clock) 8520 dpll |= PLL_REF_INPUT_TVCLKINBC; 8521 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8522 intel_panel_use_ssc(dev_priv)) 8523 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8524 else 8525 dpll |= PLL_REF_INPUT_DREFCLK; 8526 8527 dpll |= DPLL_VCO_ENABLE; 8528 crtc_state->dpll_hw_state.dpll = dpll; 8529 8530 if (INTEL_GEN(dev_priv) >= 4) { 8531 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 8532 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 8533 crtc_state->dpll_hw_state.dpll_md = dpll_md; 8534 } 8535 } 8536 8537 static void i8xx_compute_dpll(struct intel_crtc *crtc, 8538 struct intel_crtc_state *crtc_state, 8539 struct dpll *reduced_clock) 8540 { 8541 struct drm_device *dev = crtc->base.dev; 8542 struct drm_i915_private *dev_priv = to_i915(dev); 8543 u32 dpll; 8544 struct dpll *clock = &crtc_state->dpll; 8545 8546 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 8547 8548 dpll = DPLL_VGA_MODE_DIS; 8549 8550 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8551 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8552 } else { 8553 if (clock->p1 == 2) 8554 dpll |= PLL_P1_DIVIDE_BY_TWO; 8555 else 8556 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8557 if (clock->p2 == 4) 8558 dpll |= PLL_P2_DIVIDE_BY_4; 8559 } 8560 8561 /* 8562 * Bspec: 8563 * "[Almador Errata}: For the correct operation of the muxed DVO pins 8564 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data, 8565 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock 8566 * Enable) must be set to “1” in both the DPLL A Control Register 8567 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)." 8568 * 8569 * For simplicity We simply keep both bits always enabled in 8570 * both DPLLS. The spec says we should disable the DVO 2X clock 8571 * when not needed, but this seems to work fine in practice. 8572 */ 8573 if (IS_I830(dev_priv) || 8574 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) 8575 dpll |= DPLL_DVO_2X_MODE; 8576 8577 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8578 intel_panel_use_ssc(dev_priv)) 8579 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8580 else 8581 dpll |= PLL_REF_INPUT_DREFCLK; 8582 8583 dpll |= DPLL_VCO_ENABLE; 8584 crtc_state->dpll_hw_state.dpll = dpll; 8585 } 8586 8587 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state) 8588 { 8589 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8590 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8591 enum pipe pipe = crtc->pipe; 8592 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 8593 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 8594 u32 crtc_vtotal, crtc_vblank_end; 8595 int vsyncshift = 0; 8596 8597 /* We need to be careful not to changed the adjusted mode, for otherwise 8598 * the hw state checker will get angry at the mismatch. */ 8599 crtc_vtotal = adjusted_mode->crtc_vtotal; 8600 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 8601 8602 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 8603 /* the chip adds 2 halflines automatically */ 8604 crtc_vtotal -= 1; 8605 crtc_vblank_end -= 1; 8606 8607 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 8608 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 8609 else 8610 vsyncshift = adjusted_mode->crtc_hsync_start - 8611 adjusted_mode->crtc_htotal / 2; 8612 if (vsyncshift < 0) 8613 vsyncshift += adjusted_mode->crtc_htotal; 8614 } 8615 8616 if (INTEL_GEN(dev_priv) > 3) 8617 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 8618 8619 I915_WRITE(HTOTAL(cpu_transcoder), 8620 (adjusted_mode->crtc_hdisplay - 1) | 8621 ((adjusted_mode->crtc_htotal - 1) << 16)); 8622 I915_WRITE(HBLANK(cpu_transcoder), 8623 (adjusted_mode->crtc_hblank_start - 1) | 8624 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 8625 I915_WRITE(HSYNC(cpu_transcoder), 8626 (adjusted_mode->crtc_hsync_start - 1) | 8627 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 8628 8629 I915_WRITE(VTOTAL(cpu_transcoder), 8630 (adjusted_mode->crtc_vdisplay - 1) | 8631 ((crtc_vtotal - 1) << 16)); 8632 I915_WRITE(VBLANK(cpu_transcoder), 8633 (adjusted_mode->crtc_vblank_start - 1) | 8634 ((crtc_vblank_end - 1) << 16)); 8635 I915_WRITE(VSYNC(cpu_transcoder), 8636 (adjusted_mode->crtc_vsync_start - 1) | 8637 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 8638 8639 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 8640 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 8641 * documented on the DDI_FUNC_CTL register description, EDP Input Select 8642 * bits. */ 8643 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 8644 (pipe == PIPE_B || pipe == PIPE_C)) 8645 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 8646 8647 } 8648 8649 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 8650 { 8651 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8652 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8653 enum pipe pipe = crtc->pipe; 8654 8655 /* pipesrc controls the size that is scaled from, which should 8656 * always be the user's requested size. 8657 */ 8658 I915_WRITE(PIPESRC(pipe), 8659 ((crtc_state->pipe_src_w - 1) << 16) | 8660 (crtc_state->pipe_src_h - 1)); 8661 } 8662 8663 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) 8664 { 8665 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 8666 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 8667 8668 if (IS_GEN(dev_priv, 2)) 8669 return false; 8670 8671 if (INTEL_GEN(dev_priv) >= 9 || 8672 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 8673 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW; 8674 else 8675 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK; 8676 } 8677 8678 static void intel_get_pipe_timings(struct intel_crtc *crtc, 8679 struct intel_crtc_state *pipe_config) 8680 { 8681 struct drm_device *dev = crtc->base.dev; 8682 struct drm_i915_private *dev_priv = to_i915(dev); 8683 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 8684 u32 tmp; 8685 8686 tmp = I915_READ(HTOTAL(cpu_transcoder)); 8687 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 8688 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 8689 8690 if (!transcoder_is_dsi(cpu_transcoder)) { 8691 tmp = I915_READ(HBLANK(cpu_transcoder)); 8692 pipe_config->hw.adjusted_mode.crtc_hblank_start = 8693 (tmp & 0xffff) + 1; 8694 pipe_config->hw.adjusted_mode.crtc_hblank_end = 8695 ((tmp >> 16) & 0xffff) + 1; 8696 } 8697 tmp = I915_READ(HSYNC(cpu_transcoder)); 8698 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 8699 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 8700 8701 tmp = I915_READ(VTOTAL(cpu_transcoder)); 8702 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 8703 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 8704 8705 if (!transcoder_is_dsi(cpu_transcoder)) { 8706 tmp = I915_READ(VBLANK(cpu_transcoder)); 8707 pipe_config->hw.adjusted_mode.crtc_vblank_start = 8708 (tmp & 0xffff) + 1; 8709 pipe_config->hw.adjusted_mode.crtc_vblank_end = 8710 ((tmp >> 16) & 0xffff) + 1; 8711 } 8712 tmp = I915_READ(VSYNC(cpu_transcoder)); 8713 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 8714 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 8715 8716 if (intel_pipe_is_interlaced(pipe_config)) { 8717 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 8718 pipe_config->hw.adjusted_mode.crtc_vtotal += 1; 8719 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1; 8720 } 8721 } 8722 8723 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 8724 struct intel_crtc_state *pipe_config) 8725 { 8726 struct drm_device *dev = crtc->base.dev; 8727 struct drm_i915_private *dev_priv = to_i915(dev); 8728 u32 tmp; 8729 8730 tmp = I915_READ(PIPESRC(crtc->pipe)); 8731 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 8732 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 8733 8734 pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h; 8735 pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w; 8736 } 8737 8738 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 8739 struct intel_crtc_state *pipe_config) 8740 { 8741 mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay; 8742 mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal; 8743 mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start; 8744 mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end; 8745 8746 mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay; 8747 mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal; 8748 mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start; 8749 mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end; 8750 8751 mode->flags = pipe_config->hw.adjusted_mode.flags; 8752 mode->type = DRM_MODE_TYPE_DRIVER; 8753 8754 mode->clock = pipe_config->hw.adjusted_mode.crtc_clock; 8755 8756 mode->hsync = drm_mode_hsync(mode); 8757 mode->vrefresh = drm_mode_vrefresh(mode); 8758 drm_mode_set_name(mode); 8759 } 8760 8761 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 8762 { 8763 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8764 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8765 u32 pipeconf; 8766 8767 pipeconf = 0; 8768 8769 /* we keep both pipes enabled on 830 */ 8770 if (IS_I830(dev_priv)) 8771 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE; 8772 8773 if (crtc_state->double_wide) 8774 pipeconf |= PIPECONF_DOUBLE_WIDE; 8775 8776 /* only g4x and later have fancy bpc/dither controls */ 8777 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 8778 IS_CHERRYVIEW(dev_priv)) { 8779 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 8780 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 8781 pipeconf |= PIPECONF_DITHER_EN | 8782 PIPECONF_DITHER_TYPE_SP; 8783 8784 switch (crtc_state->pipe_bpp) { 8785 case 18: 8786 pipeconf |= PIPECONF_6BPC; 8787 break; 8788 case 24: 8789 pipeconf |= PIPECONF_8BPC; 8790 break; 8791 case 30: 8792 pipeconf |= PIPECONF_10BPC; 8793 break; 8794 default: 8795 /* Case prevented by intel_choose_pipe_bpp_dither. */ 8796 BUG(); 8797 } 8798 } 8799 8800 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 8801 if (INTEL_GEN(dev_priv) < 4 || 8802 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 8803 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 8804 else 8805 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 8806 } else { 8807 pipeconf |= PIPECONF_PROGRESSIVE; 8808 } 8809 8810 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 8811 crtc_state->limited_color_range) 8812 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 8813 8814 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 8815 8816 pipeconf |= PIPECONF_FRAME_START_DELAY(0); 8817 8818 I915_WRITE(PIPECONF(crtc->pipe), pipeconf); 8819 POSTING_READ(PIPECONF(crtc->pipe)); 8820 } 8821 8822 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, 8823 struct intel_crtc_state *crtc_state) 8824 { 8825 struct drm_device *dev = crtc->base.dev; 8826 struct drm_i915_private *dev_priv = to_i915(dev); 8827 const struct intel_limit *limit; 8828 int refclk = 48000; 8829 8830 memset(&crtc_state->dpll_hw_state, 0, 8831 sizeof(crtc_state->dpll_hw_state)); 8832 8833 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8834 if (intel_panel_use_ssc(dev_priv)) { 8835 refclk = dev_priv->vbt.lvds_ssc_freq; 8836 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8837 } 8838 8839 limit = &intel_limits_i8xx_lvds; 8840 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) { 8841 limit = &intel_limits_i8xx_dvo; 8842 } else { 8843 limit = &intel_limits_i8xx_dac; 8844 } 8845 8846 if (!crtc_state->clock_set && 8847 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8848 refclk, NULL, &crtc_state->dpll)) { 8849 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8850 return -EINVAL; 8851 } 8852 8853 i8xx_compute_dpll(crtc, crtc_state, NULL); 8854 8855 return 0; 8856 } 8857 8858 static int g4x_crtc_compute_clock(struct intel_crtc *crtc, 8859 struct intel_crtc_state *crtc_state) 8860 { 8861 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8862 const struct intel_limit *limit; 8863 int refclk = 96000; 8864 8865 memset(&crtc_state->dpll_hw_state, 0, 8866 sizeof(crtc_state->dpll_hw_state)); 8867 8868 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8869 if (intel_panel_use_ssc(dev_priv)) { 8870 refclk = dev_priv->vbt.lvds_ssc_freq; 8871 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8872 } 8873 8874 if (intel_is_dual_link_lvds(dev_priv)) 8875 limit = &intel_limits_g4x_dual_channel_lvds; 8876 else 8877 limit = &intel_limits_g4x_single_channel_lvds; 8878 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || 8879 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 8880 limit = &intel_limits_g4x_hdmi; 8881 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) { 8882 limit = &intel_limits_g4x_sdvo; 8883 } else { 8884 /* The option is for other outputs */ 8885 limit = &intel_limits_i9xx_sdvo; 8886 } 8887 8888 if (!crtc_state->clock_set && 8889 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8890 refclk, NULL, &crtc_state->dpll)) { 8891 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8892 return -EINVAL; 8893 } 8894 8895 i9xx_compute_dpll(crtc, crtc_state, NULL); 8896 8897 return 0; 8898 } 8899 8900 static int pnv_crtc_compute_clock(struct intel_crtc *crtc, 8901 struct intel_crtc_state *crtc_state) 8902 { 8903 struct drm_device *dev = crtc->base.dev; 8904 struct drm_i915_private *dev_priv = to_i915(dev); 8905 const struct intel_limit *limit; 8906 int refclk = 96000; 8907 8908 memset(&crtc_state->dpll_hw_state, 0, 8909 sizeof(crtc_state->dpll_hw_state)); 8910 8911 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8912 if (intel_panel_use_ssc(dev_priv)) { 8913 refclk = dev_priv->vbt.lvds_ssc_freq; 8914 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8915 } 8916 8917 limit = &pnv_limits_lvds; 8918 } else { 8919 limit = &pnv_limits_sdvo; 8920 } 8921 8922 if (!crtc_state->clock_set && 8923 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8924 refclk, NULL, &crtc_state->dpll)) { 8925 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8926 return -EINVAL; 8927 } 8928 8929 i9xx_compute_dpll(crtc, crtc_state, NULL); 8930 8931 return 0; 8932 } 8933 8934 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 8935 struct intel_crtc_state *crtc_state) 8936 { 8937 struct drm_device *dev = crtc->base.dev; 8938 struct drm_i915_private *dev_priv = to_i915(dev); 8939 const struct intel_limit *limit; 8940 int refclk = 96000; 8941 8942 memset(&crtc_state->dpll_hw_state, 0, 8943 sizeof(crtc_state->dpll_hw_state)); 8944 8945 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8946 if (intel_panel_use_ssc(dev_priv)) { 8947 refclk = dev_priv->vbt.lvds_ssc_freq; 8948 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8949 } 8950 8951 limit = &intel_limits_i9xx_lvds; 8952 } else { 8953 limit = &intel_limits_i9xx_sdvo; 8954 } 8955 8956 if (!crtc_state->clock_set && 8957 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8958 refclk, NULL, &crtc_state->dpll)) { 8959 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8960 return -EINVAL; 8961 } 8962 8963 i9xx_compute_dpll(crtc, crtc_state, NULL); 8964 8965 return 0; 8966 } 8967 8968 static int chv_crtc_compute_clock(struct intel_crtc *crtc, 8969 struct intel_crtc_state *crtc_state) 8970 { 8971 int refclk = 100000; 8972 const struct intel_limit *limit = &intel_limits_chv; 8973 8974 memset(&crtc_state->dpll_hw_state, 0, 8975 sizeof(crtc_state->dpll_hw_state)); 8976 8977 if (!crtc_state->clock_set && 8978 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8979 refclk, NULL, &crtc_state->dpll)) { 8980 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8981 return -EINVAL; 8982 } 8983 8984 chv_compute_dpll(crtc, crtc_state); 8985 8986 return 0; 8987 } 8988 8989 static int vlv_crtc_compute_clock(struct intel_crtc *crtc, 8990 struct intel_crtc_state *crtc_state) 8991 { 8992 int refclk = 100000; 8993 const struct intel_limit *limit = &intel_limits_vlv; 8994 8995 memset(&crtc_state->dpll_hw_state, 0, 8996 sizeof(crtc_state->dpll_hw_state)); 8997 8998 if (!crtc_state->clock_set && 8999 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9000 refclk, NULL, &crtc_state->dpll)) { 9001 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 9002 return -EINVAL; 9003 } 9004 9005 vlv_compute_dpll(crtc, crtc_state); 9006 9007 return 0; 9008 } 9009 9010 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) 9011 { 9012 if (IS_I830(dev_priv)) 9013 return false; 9014 9015 return INTEL_GEN(dev_priv) >= 4 || 9016 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 9017 } 9018 9019 static void i9xx_get_pfit_config(struct intel_crtc *crtc, 9020 struct intel_crtc_state *pipe_config) 9021 { 9022 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9023 u32 tmp; 9024 9025 if (!i9xx_has_pfit(dev_priv)) 9026 return; 9027 9028 tmp = I915_READ(PFIT_CONTROL); 9029 if (!(tmp & PFIT_ENABLE)) 9030 return; 9031 9032 /* Check whether the pfit is attached to our pipe. */ 9033 if (INTEL_GEN(dev_priv) < 4) { 9034 if (crtc->pipe != PIPE_B) 9035 return; 9036 } else { 9037 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 9038 return; 9039 } 9040 9041 pipe_config->gmch_pfit.control = tmp; 9042 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 9043 } 9044 9045 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 9046 struct intel_crtc_state *pipe_config) 9047 { 9048 struct drm_device *dev = crtc->base.dev; 9049 struct drm_i915_private *dev_priv = to_i915(dev); 9050 enum pipe pipe = crtc->pipe; 9051 struct dpll clock; 9052 u32 mdiv; 9053 int refclk = 100000; 9054 9055 /* In case of DSI, DPLL will not be used */ 9056 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 9057 return; 9058 9059 vlv_dpio_get(dev_priv); 9060 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 9061 vlv_dpio_put(dev_priv); 9062 9063 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 9064 clock.m2 = mdiv & DPIO_M2DIV_MASK; 9065 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 9066 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 9067 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 9068 9069 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 9070 } 9071 9072 static void 9073 i9xx_get_initial_plane_config(struct intel_crtc *crtc, 9074 struct intel_initial_plane_config *plane_config) 9075 { 9076 struct drm_device *dev = crtc->base.dev; 9077 struct drm_i915_private *dev_priv = to_i915(dev); 9078 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 9079 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 9080 enum pipe pipe; 9081 u32 val, base, offset; 9082 int fourcc, pixel_format; 9083 unsigned int aligned_height; 9084 struct drm_framebuffer *fb; 9085 struct intel_framebuffer *intel_fb; 9086 9087 if (!plane->get_hw_state(plane, &pipe)) 9088 return; 9089 9090 WARN_ON(pipe != crtc->pipe); 9091 9092 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9093 if (!intel_fb) { 9094 DRM_DEBUG_KMS("failed to alloc fb\n"); 9095 return; 9096 } 9097 9098 fb = &intel_fb->base; 9099 9100 fb->dev = dev; 9101 9102 val = I915_READ(DSPCNTR(i9xx_plane)); 9103 9104 if (INTEL_GEN(dev_priv) >= 4) { 9105 if (val & DISPPLANE_TILED) { 9106 plane_config->tiling = I915_TILING_X; 9107 fb->modifier = I915_FORMAT_MOD_X_TILED; 9108 } 9109 9110 if (val & DISPPLANE_ROTATE_180) 9111 plane_config->rotation = DRM_MODE_ROTATE_180; 9112 } 9113 9114 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B && 9115 val & DISPPLANE_MIRROR) 9116 plane_config->rotation |= DRM_MODE_REFLECT_X; 9117 9118 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 9119 fourcc = i9xx_format_to_fourcc(pixel_format); 9120 fb->format = drm_format_info(fourcc); 9121 9122 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 9123 offset = I915_READ(DSPOFFSET(i9xx_plane)); 9124 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000; 9125 } else if (INTEL_GEN(dev_priv) >= 4) { 9126 if (plane_config->tiling) 9127 offset = I915_READ(DSPTILEOFF(i9xx_plane)); 9128 else 9129 offset = I915_READ(DSPLINOFF(i9xx_plane)); 9130 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000; 9131 } else { 9132 base = I915_READ(DSPADDR(i9xx_plane)); 9133 } 9134 plane_config->base = base; 9135 9136 val = I915_READ(PIPESRC(pipe)); 9137 fb->width = ((val >> 16) & 0xfff) + 1; 9138 fb->height = ((val >> 0) & 0xfff) + 1; 9139 9140 val = I915_READ(DSPSTRIDE(i9xx_plane)); 9141 fb->pitches[0] = val & 0xffffffc0; 9142 9143 aligned_height = intel_fb_align_height(fb, 0, fb->height); 9144 9145 plane_config->size = fb->pitches[0] * aligned_height; 9146 9147 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 9148 crtc->base.name, plane->base.name, fb->width, fb->height, 9149 fb->format->cpp[0] * 8, base, fb->pitches[0], 9150 plane_config->size); 9151 9152 plane_config->fb = intel_fb; 9153 } 9154 9155 static void chv_crtc_clock_get(struct intel_crtc *crtc, 9156 struct intel_crtc_state *pipe_config) 9157 { 9158 struct drm_device *dev = crtc->base.dev; 9159 struct drm_i915_private *dev_priv = to_i915(dev); 9160 enum pipe pipe = crtc->pipe; 9161 enum dpio_channel port = vlv_pipe_to_channel(pipe); 9162 struct dpll clock; 9163 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 9164 int refclk = 100000; 9165 9166 /* In case of DSI, DPLL will not be used */ 9167 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 9168 return; 9169 9170 vlv_dpio_get(dev_priv); 9171 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 9172 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 9173 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 9174 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 9175 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 9176 vlv_dpio_put(dev_priv); 9177 9178 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 9179 clock.m2 = (pll_dw0 & 0xff) << 22; 9180 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 9181 clock.m2 |= pll_dw2 & 0x3fffff; 9182 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 9183 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 9184 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 9185 9186 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 9187 } 9188 9189 static enum intel_output_format 9190 bdw_get_pipemisc_output_format(struct intel_crtc *crtc) 9191 { 9192 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9193 u32 tmp; 9194 9195 tmp = I915_READ(PIPEMISC(crtc->pipe)); 9196 9197 if (tmp & PIPEMISC_YUV420_ENABLE) { 9198 /* We support 4:2:0 in full blend mode only */ 9199 WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0); 9200 9201 return INTEL_OUTPUT_FORMAT_YCBCR420; 9202 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { 9203 return INTEL_OUTPUT_FORMAT_YCBCR444; 9204 } else { 9205 return INTEL_OUTPUT_FORMAT_RGB; 9206 } 9207 } 9208 9209 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) 9210 { 9211 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 9212 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 9213 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9214 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 9215 u32 tmp; 9216 9217 tmp = I915_READ(DSPCNTR(i9xx_plane)); 9218 9219 if (tmp & DISPPLANE_GAMMA_ENABLE) 9220 crtc_state->gamma_enable = true; 9221 9222 if (!HAS_GMCH(dev_priv) && 9223 tmp & DISPPLANE_PIPE_CSC_ENABLE) 9224 crtc_state->csc_enable = true; 9225 } 9226 9227 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 9228 struct intel_crtc_state *pipe_config) 9229 { 9230 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9231 enum intel_display_power_domain power_domain; 9232 intel_wakeref_t wakeref; 9233 u32 tmp; 9234 bool ret; 9235 9236 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9237 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 9238 if (!wakeref) 9239 return false; 9240 9241 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 9242 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9243 pipe_config->shared_dpll = NULL; 9244 pipe_config->master_transcoder = INVALID_TRANSCODER; 9245 9246 ret = false; 9247 9248 tmp = I915_READ(PIPECONF(crtc->pipe)); 9249 if (!(tmp & PIPECONF_ENABLE)) 9250 goto out; 9251 9252 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 9253 IS_CHERRYVIEW(dev_priv)) { 9254 switch (tmp & PIPECONF_BPC_MASK) { 9255 case PIPECONF_6BPC: 9256 pipe_config->pipe_bpp = 18; 9257 break; 9258 case PIPECONF_8BPC: 9259 pipe_config->pipe_bpp = 24; 9260 break; 9261 case PIPECONF_10BPC: 9262 pipe_config->pipe_bpp = 30; 9263 break; 9264 default: 9265 break; 9266 } 9267 } 9268 9269 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 9270 (tmp & PIPECONF_COLOR_RANGE_SELECT)) 9271 pipe_config->limited_color_range = true; 9272 9273 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >> 9274 PIPECONF_GAMMA_MODE_SHIFT; 9275 9276 if (IS_CHERRYVIEW(dev_priv)) 9277 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe)); 9278 9279 i9xx_get_pipe_color_config(pipe_config); 9280 intel_color_get_config(pipe_config); 9281 9282 if (INTEL_GEN(dev_priv) < 4) 9283 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 9284 9285 intel_get_pipe_timings(crtc, pipe_config); 9286 intel_get_pipe_src_size(crtc, pipe_config); 9287 9288 i9xx_get_pfit_config(crtc, pipe_config); 9289 9290 if (INTEL_GEN(dev_priv) >= 4) { 9291 /* No way to read it out on pipes B and C */ 9292 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 9293 tmp = dev_priv->chv_dpll_md[crtc->pipe]; 9294 else 9295 tmp = I915_READ(DPLL_MD(crtc->pipe)); 9296 pipe_config->pixel_multiplier = 9297 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 9298 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 9299 pipe_config->dpll_hw_state.dpll_md = tmp; 9300 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 9301 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 9302 tmp = I915_READ(DPLL(crtc->pipe)); 9303 pipe_config->pixel_multiplier = 9304 ((tmp & SDVO_MULTIPLIER_MASK) 9305 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 9306 } else { 9307 /* Note that on i915G/GM the pixel multiplier is in the sdvo 9308 * port and will be fixed up in the encoder->get_config 9309 * function. */ 9310 pipe_config->pixel_multiplier = 1; 9311 } 9312 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 9313 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 9314 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 9315 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 9316 } else { 9317 /* Mask out read-only status bits. */ 9318 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 9319 DPLL_PORTC_READY_MASK | 9320 DPLL_PORTB_READY_MASK); 9321 } 9322 9323 if (IS_CHERRYVIEW(dev_priv)) 9324 chv_crtc_clock_get(crtc, pipe_config); 9325 else if (IS_VALLEYVIEW(dev_priv)) 9326 vlv_crtc_clock_get(crtc, pipe_config); 9327 else 9328 i9xx_crtc_clock_get(crtc, pipe_config); 9329 9330 /* 9331 * Normally the dotclock is filled in by the encoder .get_config() 9332 * but in case the pipe is enabled w/o any ports we need a sane 9333 * default. 9334 */ 9335 pipe_config->hw.adjusted_mode.crtc_clock = 9336 pipe_config->port_clock / pipe_config->pixel_multiplier; 9337 9338 ret = true; 9339 9340 out: 9341 intel_display_power_put(dev_priv, power_domain, wakeref); 9342 9343 return ret; 9344 } 9345 9346 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) 9347 { 9348 struct intel_encoder *encoder; 9349 int i; 9350 u32 val, final; 9351 bool has_lvds = false; 9352 bool has_cpu_edp = false; 9353 bool has_panel = false; 9354 bool has_ck505 = false; 9355 bool can_ssc = false; 9356 bool using_ssc_source = false; 9357 9358 /* We need to take the global config into account */ 9359 for_each_intel_encoder(&dev_priv->drm, encoder) { 9360 switch (encoder->type) { 9361 case INTEL_OUTPUT_LVDS: 9362 has_panel = true; 9363 has_lvds = true; 9364 break; 9365 case INTEL_OUTPUT_EDP: 9366 has_panel = true; 9367 if (encoder->port == PORT_A) 9368 has_cpu_edp = true; 9369 break; 9370 default: 9371 break; 9372 } 9373 } 9374 9375 if (HAS_PCH_IBX(dev_priv)) { 9376 has_ck505 = dev_priv->vbt.display_clock_mode; 9377 can_ssc = has_ck505; 9378 } else { 9379 has_ck505 = false; 9380 can_ssc = true; 9381 } 9382 9383 /* Check if any DPLLs are using the SSC source */ 9384 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 9385 u32 temp = I915_READ(PCH_DPLL(i)); 9386 9387 if (!(temp & DPLL_VCO_ENABLE)) 9388 continue; 9389 9390 if ((temp & PLL_REF_INPUT_MASK) == 9391 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 9392 using_ssc_source = true; 9393 break; 9394 } 9395 } 9396 9397 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 9398 has_panel, has_lvds, has_ck505, using_ssc_source); 9399 9400 /* Ironlake: try to setup display ref clock before DPLL 9401 * enabling. This is only under driver's control after 9402 * PCH B stepping, previous chipset stepping should be 9403 * ignoring this setting. 9404 */ 9405 val = I915_READ(PCH_DREF_CONTROL); 9406 9407 /* As we must carefully and slowly disable/enable each source in turn, 9408 * compute the final state we want first and check if we need to 9409 * make any changes at all. 9410 */ 9411 final = val; 9412 final &= ~DREF_NONSPREAD_SOURCE_MASK; 9413 if (has_ck505) 9414 final |= DREF_NONSPREAD_CK505_ENABLE; 9415 else 9416 final |= DREF_NONSPREAD_SOURCE_ENABLE; 9417 9418 final &= ~DREF_SSC_SOURCE_MASK; 9419 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9420 final &= ~DREF_SSC1_ENABLE; 9421 9422 if (has_panel) { 9423 final |= DREF_SSC_SOURCE_ENABLE; 9424 9425 if (intel_panel_use_ssc(dev_priv) && can_ssc) 9426 final |= DREF_SSC1_ENABLE; 9427 9428 if (has_cpu_edp) { 9429 if (intel_panel_use_ssc(dev_priv) && can_ssc) 9430 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 9431 else 9432 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 9433 } else 9434 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9435 } else if (using_ssc_source) { 9436 final |= DREF_SSC_SOURCE_ENABLE; 9437 final |= DREF_SSC1_ENABLE; 9438 } 9439 9440 if (final == val) 9441 return; 9442 9443 /* Always enable nonspread source */ 9444 val &= ~DREF_NONSPREAD_SOURCE_MASK; 9445 9446 if (has_ck505) 9447 val |= DREF_NONSPREAD_CK505_ENABLE; 9448 else 9449 val |= DREF_NONSPREAD_SOURCE_ENABLE; 9450 9451 if (has_panel) { 9452 val &= ~DREF_SSC_SOURCE_MASK; 9453 val |= DREF_SSC_SOURCE_ENABLE; 9454 9455 /* SSC must be turned on before enabling the CPU output */ 9456 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 9457 DRM_DEBUG_KMS("Using SSC on panel\n"); 9458 val |= DREF_SSC1_ENABLE; 9459 } else 9460 val &= ~DREF_SSC1_ENABLE; 9461 9462 /* Get SSC going before enabling the outputs */ 9463 I915_WRITE(PCH_DREF_CONTROL, val); 9464 POSTING_READ(PCH_DREF_CONTROL); 9465 udelay(200); 9466 9467 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9468 9469 /* Enable CPU source on CPU attached eDP */ 9470 if (has_cpu_edp) { 9471 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 9472 DRM_DEBUG_KMS("Using SSC on eDP\n"); 9473 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 9474 } else 9475 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 9476 } else 9477 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9478 9479 I915_WRITE(PCH_DREF_CONTROL, val); 9480 POSTING_READ(PCH_DREF_CONTROL); 9481 udelay(200); 9482 } else { 9483 DRM_DEBUG_KMS("Disabling CPU source output\n"); 9484 9485 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9486 9487 /* Turn off CPU output */ 9488 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9489 9490 I915_WRITE(PCH_DREF_CONTROL, val); 9491 POSTING_READ(PCH_DREF_CONTROL); 9492 udelay(200); 9493 9494 if (!using_ssc_source) { 9495 DRM_DEBUG_KMS("Disabling SSC source\n"); 9496 9497 /* Turn off the SSC source */ 9498 val &= ~DREF_SSC_SOURCE_MASK; 9499 val |= DREF_SSC_SOURCE_DISABLE; 9500 9501 /* Turn off SSC1 */ 9502 val &= ~DREF_SSC1_ENABLE; 9503 9504 I915_WRITE(PCH_DREF_CONTROL, val); 9505 POSTING_READ(PCH_DREF_CONTROL); 9506 udelay(200); 9507 } 9508 } 9509 9510 BUG_ON(val != final); 9511 } 9512 9513 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 9514 { 9515 u32 tmp; 9516 9517 tmp = I915_READ(SOUTH_CHICKEN2); 9518 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 9519 I915_WRITE(SOUTH_CHICKEN2, tmp); 9520 9521 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) & 9522 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 9523 DRM_ERROR("FDI mPHY reset assert timeout\n"); 9524 9525 tmp = I915_READ(SOUTH_CHICKEN2); 9526 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 9527 I915_WRITE(SOUTH_CHICKEN2, tmp); 9528 9529 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) & 9530 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 9531 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 9532 } 9533 9534 /* WaMPhyProgramming:hsw */ 9535 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 9536 { 9537 u32 tmp; 9538 9539 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 9540 tmp &= ~(0xFF << 24); 9541 tmp |= (0x12 << 24); 9542 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 9543 9544 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 9545 tmp |= (1 << 11); 9546 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 9547 9548 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 9549 tmp |= (1 << 11); 9550 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 9551 9552 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 9553 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 9554 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 9555 9556 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 9557 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 9558 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 9559 9560 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 9561 tmp &= ~(7 << 13); 9562 tmp |= (5 << 13); 9563 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 9564 9565 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 9566 tmp &= ~(7 << 13); 9567 tmp |= (5 << 13); 9568 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 9569 9570 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 9571 tmp &= ~0xFF; 9572 tmp |= 0x1C; 9573 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 9574 9575 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 9576 tmp &= ~0xFF; 9577 tmp |= 0x1C; 9578 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 9579 9580 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 9581 tmp &= ~(0xFF << 16); 9582 tmp |= (0x1C << 16); 9583 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 9584 9585 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 9586 tmp &= ~(0xFF << 16); 9587 tmp |= (0x1C << 16); 9588 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 9589 9590 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 9591 tmp |= (1 << 27); 9592 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 9593 9594 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 9595 tmp |= (1 << 27); 9596 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 9597 9598 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 9599 tmp &= ~(0xF << 28); 9600 tmp |= (4 << 28); 9601 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 9602 9603 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 9604 tmp &= ~(0xF << 28); 9605 tmp |= (4 << 28); 9606 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 9607 } 9608 9609 /* Implements 3 different sequences from BSpec chapter "Display iCLK 9610 * Programming" based on the parameters passed: 9611 * - Sequence to enable CLKOUT_DP 9612 * - Sequence to enable CLKOUT_DP without spread 9613 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 9614 */ 9615 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, 9616 bool with_spread, bool with_fdi) 9617 { 9618 u32 reg, tmp; 9619 9620 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 9621 with_spread = true; 9622 if (WARN(HAS_PCH_LPT_LP(dev_priv) && 9623 with_fdi, "LP PCH doesn't have FDI\n")) 9624 with_fdi = false; 9625 9626 mutex_lock(&dev_priv->sb_lock); 9627 9628 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9629 tmp &= ~SBI_SSCCTL_DISABLE; 9630 tmp |= SBI_SSCCTL_PATHALT; 9631 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9632 9633 udelay(24); 9634 9635 if (with_spread) { 9636 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9637 tmp &= ~SBI_SSCCTL_PATHALT; 9638 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9639 9640 if (with_fdi) { 9641 lpt_reset_fdi_mphy(dev_priv); 9642 lpt_program_fdi_mphy(dev_priv); 9643 } 9644 } 9645 9646 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 9647 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 9648 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 9649 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 9650 9651 mutex_unlock(&dev_priv->sb_lock); 9652 } 9653 9654 /* Sequence to disable CLKOUT_DP */ 9655 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) 9656 { 9657 u32 reg, tmp; 9658 9659 mutex_lock(&dev_priv->sb_lock); 9660 9661 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 9662 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 9663 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 9664 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 9665 9666 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9667 if (!(tmp & SBI_SSCCTL_DISABLE)) { 9668 if (!(tmp & SBI_SSCCTL_PATHALT)) { 9669 tmp |= SBI_SSCCTL_PATHALT; 9670 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9671 udelay(32); 9672 } 9673 tmp |= SBI_SSCCTL_DISABLE; 9674 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9675 } 9676 9677 mutex_unlock(&dev_priv->sb_lock); 9678 } 9679 9680 #define BEND_IDX(steps) ((50 + (steps)) / 5) 9681 9682 static const u16 sscdivintphase[] = { 9683 [BEND_IDX( 50)] = 0x3B23, 9684 [BEND_IDX( 45)] = 0x3B23, 9685 [BEND_IDX( 40)] = 0x3C23, 9686 [BEND_IDX( 35)] = 0x3C23, 9687 [BEND_IDX( 30)] = 0x3D23, 9688 [BEND_IDX( 25)] = 0x3D23, 9689 [BEND_IDX( 20)] = 0x3E23, 9690 [BEND_IDX( 15)] = 0x3E23, 9691 [BEND_IDX( 10)] = 0x3F23, 9692 [BEND_IDX( 5)] = 0x3F23, 9693 [BEND_IDX( 0)] = 0x0025, 9694 [BEND_IDX( -5)] = 0x0025, 9695 [BEND_IDX(-10)] = 0x0125, 9696 [BEND_IDX(-15)] = 0x0125, 9697 [BEND_IDX(-20)] = 0x0225, 9698 [BEND_IDX(-25)] = 0x0225, 9699 [BEND_IDX(-30)] = 0x0325, 9700 [BEND_IDX(-35)] = 0x0325, 9701 [BEND_IDX(-40)] = 0x0425, 9702 [BEND_IDX(-45)] = 0x0425, 9703 [BEND_IDX(-50)] = 0x0525, 9704 }; 9705 9706 /* 9707 * Bend CLKOUT_DP 9708 * steps -50 to 50 inclusive, in steps of 5 9709 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 9710 * change in clock period = -(steps / 10) * 5.787 ps 9711 */ 9712 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 9713 { 9714 u32 tmp; 9715 int idx = BEND_IDX(steps); 9716 9717 if (WARN_ON(steps % 5 != 0)) 9718 return; 9719 9720 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase))) 9721 return; 9722 9723 mutex_lock(&dev_priv->sb_lock); 9724 9725 if (steps % 10 != 0) 9726 tmp = 0xAAAAAAAB; 9727 else 9728 tmp = 0x00000000; 9729 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 9730 9731 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 9732 tmp &= 0xffff0000; 9733 tmp |= sscdivintphase[idx]; 9734 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 9735 9736 mutex_unlock(&dev_priv->sb_lock); 9737 } 9738 9739 #undef BEND_IDX 9740 9741 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) 9742 { 9743 u32 fuse_strap = I915_READ(FUSE_STRAP); 9744 u32 ctl = I915_READ(SPLL_CTL); 9745 9746 if ((ctl & SPLL_PLL_ENABLE) == 0) 9747 return false; 9748 9749 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && 9750 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 9751 return true; 9752 9753 if (IS_BROADWELL(dev_priv) && 9754 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) 9755 return true; 9756 9757 return false; 9758 } 9759 9760 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, 9761 enum intel_dpll_id id) 9762 { 9763 u32 fuse_strap = I915_READ(FUSE_STRAP); 9764 u32 ctl = I915_READ(WRPLL_CTL(id)); 9765 9766 if ((ctl & WRPLL_PLL_ENABLE) == 0) 9767 return false; 9768 9769 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) 9770 return true; 9771 9772 if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && 9773 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && 9774 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 9775 return true; 9776 9777 return false; 9778 } 9779 9780 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) 9781 { 9782 struct intel_encoder *encoder; 9783 bool has_fdi = false; 9784 9785 for_each_intel_encoder(&dev_priv->drm, encoder) { 9786 switch (encoder->type) { 9787 case INTEL_OUTPUT_ANALOG: 9788 has_fdi = true; 9789 break; 9790 default: 9791 break; 9792 } 9793 } 9794 9795 /* 9796 * The BIOS may have decided to use the PCH SSC 9797 * reference so we must not disable it until the 9798 * relevant PLLs have stopped relying on it. We'll 9799 * just leave the PCH SSC reference enabled in case 9800 * any active PLL is using it. It will get disabled 9801 * after runtime suspend if we don't have FDI. 9802 * 9803 * TODO: Move the whole reference clock handling 9804 * to the modeset sequence proper so that we can 9805 * actually enable/disable/reconfigure these things 9806 * safely. To do that we need to introduce a real 9807 * clock hierarchy. That would also allow us to do 9808 * clock bending finally. 9809 */ 9810 dev_priv->pch_ssc_use = 0; 9811 9812 if (spll_uses_pch_ssc(dev_priv)) { 9813 DRM_DEBUG_KMS("SPLL using PCH SSC\n"); 9814 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL); 9815 } 9816 9817 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { 9818 DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n"); 9819 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1); 9820 } 9821 9822 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { 9823 DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n"); 9824 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2); 9825 } 9826 9827 if (dev_priv->pch_ssc_use) 9828 return; 9829 9830 if (has_fdi) { 9831 lpt_bend_clkout_dp(dev_priv, 0); 9832 lpt_enable_clkout_dp(dev_priv, true, true); 9833 } else { 9834 lpt_disable_clkout_dp(dev_priv); 9835 } 9836 } 9837 9838 /* 9839 * Initialize reference clocks when the driver loads 9840 */ 9841 void intel_init_pch_refclk(struct drm_i915_private *dev_priv) 9842 { 9843 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 9844 ilk_init_pch_refclk(dev_priv); 9845 else if (HAS_PCH_LPT(dev_priv)) 9846 lpt_init_pch_refclk(dev_priv); 9847 } 9848 9849 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) 9850 { 9851 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 9852 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9853 enum pipe pipe = crtc->pipe; 9854 u32 val; 9855 9856 val = 0; 9857 9858 switch (crtc_state->pipe_bpp) { 9859 case 18: 9860 val |= PIPECONF_6BPC; 9861 break; 9862 case 24: 9863 val |= PIPECONF_8BPC; 9864 break; 9865 case 30: 9866 val |= PIPECONF_10BPC; 9867 break; 9868 case 36: 9869 val |= PIPECONF_12BPC; 9870 break; 9871 default: 9872 /* Case prevented by intel_choose_pipe_bpp_dither. */ 9873 BUG(); 9874 } 9875 9876 if (crtc_state->dither) 9877 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 9878 9879 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 9880 val |= PIPECONF_INTERLACED_ILK; 9881 else 9882 val |= PIPECONF_PROGRESSIVE; 9883 9884 /* 9885 * This would end up with an odd purple hue over 9886 * the entire display. Make sure we don't do it. 9887 */ 9888 WARN_ON(crtc_state->limited_color_range && 9889 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); 9890 9891 if (crtc_state->limited_color_range) 9892 val |= PIPECONF_COLOR_RANGE_SELECT; 9893 9894 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 9895 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709; 9896 9897 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 9898 9899 val |= PIPECONF_FRAME_START_DELAY(0); 9900 9901 I915_WRITE(PIPECONF(pipe), val); 9902 POSTING_READ(PIPECONF(pipe)); 9903 } 9904 9905 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state) 9906 { 9907 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 9908 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9909 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 9910 u32 val = 0; 9911 9912 if (IS_HASWELL(dev_priv) && crtc_state->dither) 9913 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 9914 9915 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 9916 val |= PIPECONF_INTERLACED_ILK; 9917 else 9918 val |= PIPECONF_PROGRESSIVE; 9919 9920 if (IS_HASWELL(dev_priv) && 9921 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 9922 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW; 9923 9924 I915_WRITE(PIPECONF(cpu_transcoder), val); 9925 POSTING_READ(PIPECONF(cpu_transcoder)); 9926 } 9927 9928 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) 9929 { 9930 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 9931 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9932 u32 val = 0; 9933 9934 switch (crtc_state->pipe_bpp) { 9935 case 18: 9936 val |= PIPEMISC_DITHER_6_BPC; 9937 break; 9938 case 24: 9939 val |= PIPEMISC_DITHER_8_BPC; 9940 break; 9941 case 30: 9942 val |= PIPEMISC_DITHER_10_BPC; 9943 break; 9944 case 36: 9945 val |= PIPEMISC_DITHER_12_BPC; 9946 break; 9947 default: 9948 MISSING_CASE(crtc_state->pipe_bpp); 9949 break; 9950 } 9951 9952 if (crtc_state->dither) 9953 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 9954 9955 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 9956 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 9957 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV; 9958 9959 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 9960 val |= PIPEMISC_YUV420_ENABLE | 9961 PIPEMISC_YUV420_MODE_FULL_BLEND; 9962 9963 if (INTEL_GEN(dev_priv) >= 11 && 9964 (crtc_state->active_planes & ~(icl_hdr_plane_mask() | 9965 BIT(PLANE_CURSOR))) == 0) 9966 val |= PIPEMISC_HDR_MODE_PRECISION; 9967 9968 I915_WRITE(PIPEMISC(crtc->pipe), val); 9969 } 9970 9971 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) 9972 { 9973 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9974 u32 tmp; 9975 9976 tmp = I915_READ(PIPEMISC(crtc->pipe)); 9977 9978 switch (tmp & PIPEMISC_DITHER_BPC_MASK) { 9979 case PIPEMISC_DITHER_6_BPC: 9980 return 18; 9981 case PIPEMISC_DITHER_8_BPC: 9982 return 24; 9983 case PIPEMISC_DITHER_10_BPC: 9984 return 30; 9985 case PIPEMISC_DITHER_12_BPC: 9986 return 36; 9987 default: 9988 MISSING_CASE(tmp); 9989 return 0; 9990 } 9991 } 9992 9993 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) 9994 { 9995 /* 9996 * Account for spread spectrum to avoid 9997 * oversubscribing the link. Max center spread 9998 * is 2.5%; use 5% for safety's sake. 9999 */ 10000 u32 bps = target_clock * bpp * 21 / 20; 10001 return DIV_ROUND_UP(bps, link_bw * 8); 10002 } 10003 10004 static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor) 10005 { 10006 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 10007 } 10008 10009 static void ilk_compute_dpll(struct intel_crtc *crtc, 10010 struct intel_crtc_state *crtc_state, 10011 struct dpll *reduced_clock) 10012 { 10013 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10014 u32 dpll, fp, fp2; 10015 int factor; 10016 10017 /* Enable autotuning of the PLL clock (if permissible) */ 10018 factor = 21; 10019 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 10020 if ((intel_panel_use_ssc(dev_priv) && 10021 dev_priv->vbt.lvds_ssc_freq == 100000) || 10022 (HAS_PCH_IBX(dev_priv) && 10023 intel_is_dual_link_lvds(dev_priv))) 10024 factor = 25; 10025 } else if (crtc_state->sdvo_tv_clock) { 10026 factor = 20; 10027 } 10028 10029 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 10030 10031 if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor)) 10032 fp |= FP_CB_TUNE; 10033 10034 if (reduced_clock) { 10035 fp2 = i9xx_dpll_compute_fp(reduced_clock); 10036 10037 if (reduced_clock->m < factor * reduced_clock->n) 10038 fp2 |= FP_CB_TUNE; 10039 } else { 10040 fp2 = fp; 10041 } 10042 10043 dpll = 0; 10044 10045 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 10046 dpll |= DPLLB_MODE_LVDS; 10047 else 10048 dpll |= DPLLB_MODE_DAC_SERIAL; 10049 10050 dpll |= (crtc_state->pixel_multiplier - 1) 10051 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 10052 10053 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 10054 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 10055 dpll |= DPLL_SDVO_HIGH_SPEED; 10056 10057 if (intel_crtc_has_dp_encoder(crtc_state)) 10058 dpll |= DPLL_SDVO_HIGH_SPEED; 10059 10060 /* 10061 * The high speed IO clock is only really required for 10062 * SDVO/HDMI/DP, but we also enable it for CRT to make it 10063 * possible to share the DPLL between CRT and HDMI. Enabling 10064 * the clock needlessly does no real harm, except use up a 10065 * bit of power potentially. 10066 * 10067 * We'll limit this to IVB with 3 pipes, since it has only two 10068 * DPLLs and so DPLL sharing is the only way to get three pipes 10069 * driving PCH ports at the same time. On SNB we could do this, 10070 * and potentially avoid enabling the second DPLL, but it's not 10071 * clear if it''s a win or loss power wise. No point in doing 10072 * this on ILK at all since it has a fixed DPLL<->pipe mapping. 10073 */ 10074 if (INTEL_NUM_PIPES(dev_priv) == 3 && 10075 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) 10076 dpll |= DPLL_SDVO_HIGH_SPEED; 10077 10078 /* compute bitmask from p1 value */ 10079 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 10080 /* also FPA1 */ 10081 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 10082 10083 switch (crtc_state->dpll.p2) { 10084 case 5: 10085 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 10086 break; 10087 case 7: 10088 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 10089 break; 10090 case 10: 10091 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 10092 break; 10093 case 14: 10094 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 10095 break; 10096 } 10097 10098 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 10099 intel_panel_use_ssc(dev_priv)) 10100 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 10101 else 10102 dpll |= PLL_REF_INPUT_DREFCLK; 10103 10104 dpll |= DPLL_VCO_ENABLE; 10105 10106 crtc_state->dpll_hw_state.dpll = dpll; 10107 crtc_state->dpll_hw_state.fp0 = fp; 10108 crtc_state->dpll_hw_state.fp1 = fp2; 10109 } 10110 10111 static int ilk_crtc_compute_clock(struct intel_crtc *crtc, 10112 struct intel_crtc_state *crtc_state) 10113 { 10114 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10115 struct intel_atomic_state *state = 10116 to_intel_atomic_state(crtc_state->uapi.state); 10117 const struct intel_limit *limit; 10118 int refclk = 120000; 10119 10120 memset(&crtc_state->dpll_hw_state, 0, 10121 sizeof(crtc_state->dpll_hw_state)); 10122 10123 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 10124 if (!crtc_state->has_pch_encoder) 10125 return 0; 10126 10127 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 10128 if (intel_panel_use_ssc(dev_priv)) { 10129 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 10130 dev_priv->vbt.lvds_ssc_freq); 10131 refclk = dev_priv->vbt.lvds_ssc_freq; 10132 } 10133 10134 if (intel_is_dual_link_lvds(dev_priv)) { 10135 if (refclk == 100000) 10136 limit = &ilk_limits_dual_lvds_100m; 10137 else 10138 limit = &ilk_limits_dual_lvds; 10139 } else { 10140 if (refclk == 100000) 10141 limit = &ilk_limits_single_lvds_100m; 10142 else 10143 limit = &ilk_limits_single_lvds; 10144 } 10145 } else { 10146 limit = &ilk_limits_dac; 10147 } 10148 10149 if (!crtc_state->clock_set && 10150 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 10151 refclk, NULL, &crtc_state->dpll)) { 10152 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 10153 return -EINVAL; 10154 } 10155 10156 ilk_compute_dpll(crtc, crtc_state, NULL); 10157 10158 if (!intel_reserve_shared_dplls(state, crtc, NULL)) { 10159 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", 10160 pipe_name(crtc->pipe)); 10161 return -EINVAL; 10162 } 10163 10164 return 0; 10165 } 10166 10167 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 10168 struct intel_link_m_n *m_n) 10169 { 10170 struct drm_device *dev = crtc->base.dev; 10171 struct drm_i915_private *dev_priv = to_i915(dev); 10172 enum pipe pipe = crtc->pipe; 10173 10174 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 10175 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); 10176 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) 10177 & ~TU_SIZE_MASK; 10178 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); 10179 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) 10180 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 10181 } 10182 10183 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 10184 enum transcoder transcoder, 10185 struct intel_link_m_n *m_n, 10186 struct intel_link_m_n *m2_n2) 10187 { 10188 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10189 enum pipe pipe = crtc->pipe; 10190 10191 if (INTEL_GEN(dev_priv) >= 5) { 10192 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); 10193 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); 10194 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 10195 & ~TU_SIZE_MASK; 10196 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 10197 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 10198 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 10199 10200 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) { 10201 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 10202 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 10203 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 10204 & ~TU_SIZE_MASK; 10205 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); 10206 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) 10207 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 10208 } 10209 } else { 10210 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 10211 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 10212 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) 10213 & ~TU_SIZE_MASK; 10214 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); 10215 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) 10216 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 10217 } 10218 } 10219 10220 void intel_dp_get_m_n(struct intel_crtc *crtc, 10221 struct intel_crtc_state *pipe_config) 10222 { 10223 if (pipe_config->has_pch_encoder) 10224 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 10225 else 10226 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 10227 &pipe_config->dp_m_n, 10228 &pipe_config->dp_m2_n2); 10229 } 10230 10231 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, 10232 struct intel_crtc_state *pipe_config) 10233 { 10234 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 10235 &pipe_config->fdi_m_n, NULL); 10236 } 10237 10238 static void skl_get_pfit_config(struct intel_crtc *crtc, 10239 struct intel_crtc_state *pipe_config) 10240 { 10241 struct drm_device *dev = crtc->base.dev; 10242 struct drm_i915_private *dev_priv = to_i915(dev); 10243 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; 10244 u32 ps_ctrl = 0; 10245 int id = -1; 10246 int i; 10247 10248 /* find scaler attached to this pipe */ 10249 for (i = 0; i < crtc->num_scalers; i++) { 10250 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i)); 10251 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { 10252 id = i; 10253 pipe_config->pch_pfit.enabled = true; 10254 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); 10255 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i)); 10256 scaler_state->scalers[i].in_use = true; 10257 break; 10258 } 10259 } 10260 10261 scaler_state->scaler_id = id; 10262 if (id >= 0) { 10263 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 10264 } else { 10265 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 10266 } 10267 } 10268 10269 static void 10270 skl_get_initial_plane_config(struct intel_crtc *crtc, 10271 struct intel_initial_plane_config *plane_config) 10272 { 10273 struct drm_device *dev = crtc->base.dev; 10274 struct drm_i915_private *dev_priv = to_i915(dev); 10275 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 10276 enum plane_id plane_id = plane->id; 10277 enum pipe pipe; 10278 u32 val, base, offset, stride_mult, tiling, alpha; 10279 int fourcc, pixel_format; 10280 unsigned int aligned_height; 10281 struct drm_framebuffer *fb; 10282 struct intel_framebuffer *intel_fb; 10283 10284 if (!plane->get_hw_state(plane, &pipe)) 10285 return; 10286 10287 WARN_ON(pipe != crtc->pipe); 10288 10289 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 10290 if (!intel_fb) { 10291 DRM_DEBUG_KMS("failed to alloc fb\n"); 10292 return; 10293 } 10294 10295 fb = &intel_fb->base; 10296 10297 fb->dev = dev; 10298 10299 val = I915_READ(PLANE_CTL(pipe, plane_id)); 10300 10301 if (INTEL_GEN(dev_priv) >= 11) 10302 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK; 10303 else 10304 pixel_format = val & PLANE_CTL_FORMAT_MASK; 10305 10306 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { 10307 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id)); 10308 alpha &= PLANE_COLOR_ALPHA_MASK; 10309 } else { 10310 alpha = val & PLANE_CTL_ALPHA_MASK; 10311 } 10312 10313 fourcc = skl_format_to_fourcc(pixel_format, 10314 val & PLANE_CTL_ORDER_RGBX, alpha); 10315 fb->format = drm_format_info(fourcc); 10316 10317 tiling = val & PLANE_CTL_TILED_MASK; 10318 switch (tiling) { 10319 case PLANE_CTL_TILED_LINEAR: 10320 fb->modifier = DRM_FORMAT_MOD_LINEAR; 10321 break; 10322 case PLANE_CTL_TILED_X: 10323 plane_config->tiling = I915_TILING_X; 10324 fb->modifier = I915_FORMAT_MOD_X_TILED; 10325 break; 10326 case PLANE_CTL_TILED_Y: 10327 plane_config->tiling = I915_TILING_Y; 10328 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) 10329 fb->modifier = INTEL_GEN(dev_priv) >= 12 ? 10330 I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS : 10331 I915_FORMAT_MOD_Y_TILED_CCS; 10332 else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE) 10333 fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; 10334 else 10335 fb->modifier = I915_FORMAT_MOD_Y_TILED; 10336 break; 10337 case PLANE_CTL_TILED_YF: 10338 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) 10339 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS; 10340 else 10341 fb->modifier = I915_FORMAT_MOD_Yf_TILED; 10342 break; 10343 default: 10344 MISSING_CASE(tiling); 10345 goto error; 10346 } 10347 10348 /* 10349 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 10350 * while i915 HW rotation is clockwise, thats why this swapping. 10351 */ 10352 switch (val & PLANE_CTL_ROTATE_MASK) { 10353 case PLANE_CTL_ROTATE_0: 10354 plane_config->rotation = DRM_MODE_ROTATE_0; 10355 break; 10356 case PLANE_CTL_ROTATE_90: 10357 plane_config->rotation = DRM_MODE_ROTATE_270; 10358 break; 10359 case PLANE_CTL_ROTATE_180: 10360 plane_config->rotation = DRM_MODE_ROTATE_180; 10361 break; 10362 case PLANE_CTL_ROTATE_270: 10363 plane_config->rotation = DRM_MODE_ROTATE_90; 10364 break; 10365 } 10366 10367 if (INTEL_GEN(dev_priv) >= 10 && 10368 val & PLANE_CTL_FLIP_HORIZONTAL) 10369 plane_config->rotation |= DRM_MODE_REFLECT_X; 10370 10371 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000; 10372 plane_config->base = base; 10373 10374 offset = I915_READ(PLANE_OFFSET(pipe, plane_id)); 10375 10376 val = I915_READ(PLANE_SIZE(pipe, plane_id)); 10377 fb->height = ((val >> 16) & 0xffff) + 1; 10378 fb->width = ((val >> 0) & 0xffff) + 1; 10379 10380 val = I915_READ(PLANE_STRIDE(pipe, plane_id)); 10381 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); 10382 fb->pitches[0] = (val & 0x3ff) * stride_mult; 10383 10384 aligned_height = intel_fb_align_height(fb, 0, fb->height); 10385 10386 plane_config->size = fb->pitches[0] * aligned_height; 10387 10388 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 10389 crtc->base.name, plane->base.name, fb->width, fb->height, 10390 fb->format->cpp[0] * 8, base, fb->pitches[0], 10391 plane_config->size); 10392 10393 plane_config->fb = intel_fb; 10394 return; 10395 10396 error: 10397 kfree(intel_fb); 10398 } 10399 10400 static void ilk_get_pfit_config(struct intel_crtc *crtc, 10401 struct intel_crtc_state *pipe_config) 10402 { 10403 struct drm_device *dev = crtc->base.dev; 10404 struct drm_i915_private *dev_priv = to_i915(dev); 10405 u32 tmp; 10406 10407 tmp = I915_READ(PF_CTL(crtc->pipe)); 10408 10409 if (tmp & PF_ENABLE) { 10410 pipe_config->pch_pfit.enabled = true; 10411 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 10412 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 10413 10414 /* We currently do not free assignements of panel fitters on 10415 * ivb/hsw (since we don't use the higher upscaling modes which 10416 * differentiates them) so just WARN about this case for now. */ 10417 if (IS_GEN(dev_priv, 7)) { 10418 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 10419 PF_PIPE_SEL_IVB(crtc->pipe)); 10420 } 10421 } 10422 } 10423 10424 static bool ilk_get_pipe_config(struct intel_crtc *crtc, 10425 struct intel_crtc_state *pipe_config) 10426 { 10427 struct drm_device *dev = crtc->base.dev; 10428 struct drm_i915_private *dev_priv = to_i915(dev); 10429 enum intel_display_power_domain power_domain; 10430 intel_wakeref_t wakeref; 10431 u32 tmp; 10432 bool ret; 10433 10434 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 10435 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 10436 if (!wakeref) 10437 return false; 10438 10439 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 10440 pipe_config->shared_dpll = NULL; 10441 pipe_config->master_transcoder = INVALID_TRANSCODER; 10442 10443 ret = false; 10444 tmp = I915_READ(PIPECONF(crtc->pipe)); 10445 if (!(tmp & PIPECONF_ENABLE)) 10446 goto out; 10447 10448 switch (tmp & PIPECONF_BPC_MASK) { 10449 case PIPECONF_6BPC: 10450 pipe_config->pipe_bpp = 18; 10451 break; 10452 case PIPECONF_8BPC: 10453 pipe_config->pipe_bpp = 24; 10454 break; 10455 case PIPECONF_10BPC: 10456 pipe_config->pipe_bpp = 30; 10457 break; 10458 case PIPECONF_12BPC: 10459 pipe_config->pipe_bpp = 36; 10460 break; 10461 default: 10462 break; 10463 } 10464 10465 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 10466 pipe_config->limited_color_range = true; 10467 10468 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) { 10469 case PIPECONF_OUTPUT_COLORSPACE_YUV601: 10470 case PIPECONF_OUTPUT_COLORSPACE_YUV709: 10471 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 10472 break; 10473 default: 10474 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 10475 break; 10476 } 10477 10478 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >> 10479 PIPECONF_GAMMA_MODE_SHIFT; 10480 10481 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe)); 10482 10483 i9xx_get_pipe_color_config(pipe_config); 10484 intel_color_get_config(pipe_config); 10485 10486 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 10487 struct intel_shared_dpll *pll; 10488 enum intel_dpll_id pll_id; 10489 10490 pipe_config->has_pch_encoder = true; 10491 10492 tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); 10493 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 10494 FDI_DP_PORT_WIDTH_SHIFT) + 1; 10495 10496 ilk_get_fdi_m_n_config(crtc, pipe_config); 10497 10498 if (HAS_PCH_IBX(dev_priv)) { 10499 /* 10500 * The pipe->pch transcoder and pch transcoder->pll 10501 * mapping is fixed. 10502 */ 10503 pll_id = (enum intel_dpll_id) crtc->pipe; 10504 } else { 10505 tmp = I915_READ(PCH_DPLL_SEL); 10506 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 10507 pll_id = DPLL_ID_PCH_PLL_B; 10508 else 10509 pll_id= DPLL_ID_PCH_PLL_A; 10510 } 10511 10512 pipe_config->shared_dpll = 10513 intel_get_shared_dpll_by_id(dev_priv, pll_id); 10514 pll = pipe_config->shared_dpll; 10515 10516 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll, 10517 &pipe_config->dpll_hw_state)); 10518 10519 tmp = pipe_config->dpll_hw_state.dpll; 10520 pipe_config->pixel_multiplier = 10521 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 10522 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 10523 10524 ilk_pch_clock_get(crtc, pipe_config); 10525 } else { 10526 pipe_config->pixel_multiplier = 1; 10527 } 10528 10529 intel_get_pipe_timings(crtc, pipe_config); 10530 intel_get_pipe_src_size(crtc, pipe_config); 10531 10532 ilk_get_pfit_config(crtc, pipe_config); 10533 10534 ret = true; 10535 10536 out: 10537 intel_display_power_put(dev_priv, power_domain, wakeref); 10538 10539 return ret; 10540 } 10541 10542 static int hsw_crtc_compute_clock(struct intel_crtc *crtc, 10543 struct intel_crtc_state *crtc_state) 10544 { 10545 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10546 struct intel_atomic_state *state = 10547 to_intel_atomic_state(crtc_state->uapi.state); 10548 10549 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) || 10550 INTEL_GEN(dev_priv) >= 11) { 10551 struct intel_encoder *encoder = 10552 intel_get_crtc_new_encoder(state, crtc_state); 10553 10554 if (!intel_reserve_shared_dplls(state, crtc, encoder)) { 10555 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", 10556 pipe_name(crtc->pipe)); 10557 return -EINVAL; 10558 } 10559 } 10560 10561 return 0; 10562 } 10563 10564 static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 10565 struct intel_crtc_state *pipe_config) 10566 { 10567 enum intel_dpll_id id; 10568 u32 temp; 10569 10570 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); 10571 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); 10572 10573 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2)) 10574 return; 10575 10576 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10577 } 10578 10579 static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 10580 struct intel_crtc_state *pipe_config) 10581 { 10582 enum phy phy = intel_port_to_phy(dev_priv, port); 10583 enum icl_port_dpll_id port_dpll_id; 10584 enum intel_dpll_id id; 10585 u32 temp; 10586 10587 if (intel_phy_is_combo(dev_priv, phy)) { 10588 temp = I915_READ(ICL_DPCLKA_CFGCR0) & 10589 ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); 10590 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); 10591 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 10592 } else if (intel_phy_is_tc(dev_priv, phy)) { 10593 u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; 10594 10595 if (clk_sel == DDI_CLK_SEL_MG) { 10596 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, 10597 port)); 10598 port_dpll_id = ICL_PORT_DPLL_MG_PHY; 10599 } else { 10600 WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162); 10601 id = DPLL_ID_ICL_TBTPLL; 10602 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 10603 } 10604 } else { 10605 WARN(1, "Invalid port %x\n", port); 10606 return; 10607 } 10608 10609 pipe_config->icl_port_dplls[port_dpll_id].pll = 10610 intel_get_shared_dpll_by_id(dev_priv, id); 10611 10612 icl_set_active_port_dpll(pipe_config, port_dpll_id); 10613 } 10614 10615 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 10616 enum port port, 10617 struct intel_crtc_state *pipe_config) 10618 { 10619 enum intel_dpll_id id; 10620 10621 switch (port) { 10622 case PORT_A: 10623 id = DPLL_ID_SKL_DPLL0; 10624 break; 10625 case PORT_B: 10626 id = DPLL_ID_SKL_DPLL1; 10627 break; 10628 case PORT_C: 10629 id = DPLL_ID_SKL_DPLL2; 10630 break; 10631 default: 10632 DRM_ERROR("Incorrect port type\n"); 10633 return; 10634 } 10635 10636 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10637 } 10638 10639 static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 10640 struct intel_crtc_state *pipe_config) 10641 { 10642 enum intel_dpll_id id; 10643 u32 temp; 10644 10645 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 10646 id = temp >> (port * 3 + 1); 10647 10648 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3)) 10649 return; 10650 10651 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10652 } 10653 10654 static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 10655 struct intel_crtc_state *pipe_config) 10656 { 10657 enum intel_dpll_id id; 10658 u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 10659 10660 switch (ddi_pll_sel) { 10661 case PORT_CLK_SEL_WRPLL1: 10662 id = DPLL_ID_WRPLL1; 10663 break; 10664 case PORT_CLK_SEL_WRPLL2: 10665 id = DPLL_ID_WRPLL2; 10666 break; 10667 case PORT_CLK_SEL_SPLL: 10668 id = DPLL_ID_SPLL; 10669 break; 10670 case PORT_CLK_SEL_LCPLL_810: 10671 id = DPLL_ID_LCPLL_810; 10672 break; 10673 case PORT_CLK_SEL_LCPLL_1350: 10674 id = DPLL_ID_LCPLL_1350; 10675 break; 10676 case PORT_CLK_SEL_LCPLL_2700: 10677 id = DPLL_ID_LCPLL_2700; 10678 break; 10679 default: 10680 MISSING_CASE(ddi_pll_sel); 10681 /* fall through */ 10682 case PORT_CLK_SEL_NONE: 10683 return; 10684 } 10685 10686 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10687 } 10688 10689 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 10690 struct intel_crtc_state *pipe_config, 10691 u64 *power_domain_mask, 10692 intel_wakeref_t *wakerefs) 10693 { 10694 struct drm_device *dev = crtc->base.dev; 10695 struct drm_i915_private *dev_priv = to_i915(dev); 10696 enum intel_display_power_domain power_domain; 10697 unsigned long panel_transcoder_mask = 0; 10698 unsigned long enabled_panel_transcoders = 0; 10699 enum transcoder panel_transcoder; 10700 intel_wakeref_t wf; 10701 u32 tmp; 10702 10703 if (INTEL_GEN(dev_priv) >= 11) 10704 panel_transcoder_mask |= 10705 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 10706 10707 if (HAS_TRANSCODER_EDP(dev_priv)) 10708 panel_transcoder_mask |= BIT(TRANSCODER_EDP); 10709 10710 /* 10711 * The pipe->transcoder mapping is fixed with the exception of the eDP 10712 * and DSI transcoders handled below. 10713 */ 10714 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 10715 10716 /* 10717 * XXX: Do intel_display_power_get_if_enabled before reading this (for 10718 * consistency and less surprising code; it's in always on power). 10719 */ 10720 for_each_set_bit(panel_transcoder, 10721 &panel_transcoder_mask, 10722 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) { 10723 bool force_thru = false; 10724 enum pipe trans_pipe; 10725 10726 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder)); 10727 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 10728 continue; 10729 10730 /* 10731 * Log all enabled ones, only use the first one. 10732 * 10733 * FIXME: This won't work for two separate DSI displays. 10734 */ 10735 enabled_panel_transcoders |= BIT(panel_transcoder); 10736 if (enabled_panel_transcoders != BIT(panel_transcoder)) 10737 continue; 10738 10739 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 10740 default: 10741 WARN(1, "unknown pipe linked to transcoder %s\n", 10742 transcoder_name(panel_transcoder)); 10743 /* fall through */ 10744 case TRANS_DDI_EDP_INPUT_A_ONOFF: 10745 force_thru = true; 10746 /* fall through */ 10747 case TRANS_DDI_EDP_INPUT_A_ON: 10748 trans_pipe = PIPE_A; 10749 break; 10750 case TRANS_DDI_EDP_INPUT_B_ONOFF: 10751 trans_pipe = PIPE_B; 10752 break; 10753 case TRANS_DDI_EDP_INPUT_C_ONOFF: 10754 trans_pipe = PIPE_C; 10755 break; 10756 case TRANS_DDI_EDP_INPUT_D_ONOFF: 10757 trans_pipe = PIPE_D; 10758 break; 10759 } 10760 10761 if (trans_pipe == crtc->pipe) { 10762 pipe_config->cpu_transcoder = panel_transcoder; 10763 pipe_config->pch_pfit.force_thru = force_thru; 10764 } 10765 } 10766 10767 /* 10768 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1 10769 */ 10770 WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) && 10771 enabled_panel_transcoders != BIT(TRANSCODER_EDP)); 10772 10773 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); 10774 WARN_ON(*power_domain_mask & BIT_ULL(power_domain)); 10775 10776 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10777 if (!wf) 10778 return false; 10779 10780 wakerefs[power_domain] = wf; 10781 *power_domain_mask |= BIT_ULL(power_domain); 10782 10783 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 10784 10785 return tmp & PIPECONF_ENABLE; 10786 } 10787 10788 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 10789 struct intel_crtc_state *pipe_config, 10790 u64 *power_domain_mask, 10791 intel_wakeref_t *wakerefs) 10792 { 10793 struct drm_device *dev = crtc->base.dev; 10794 struct drm_i915_private *dev_priv = to_i915(dev); 10795 enum intel_display_power_domain power_domain; 10796 enum transcoder cpu_transcoder; 10797 intel_wakeref_t wf; 10798 enum port port; 10799 u32 tmp; 10800 10801 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 10802 if (port == PORT_A) 10803 cpu_transcoder = TRANSCODER_DSI_A; 10804 else 10805 cpu_transcoder = TRANSCODER_DSI_C; 10806 10807 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 10808 WARN_ON(*power_domain_mask & BIT_ULL(power_domain)); 10809 10810 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10811 if (!wf) 10812 continue; 10813 10814 wakerefs[power_domain] = wf; 10815 *power_domain_mask |= BIT_ULL(power_domain); 10816 10817 /* 10818 * The PLL needs to be enabled with a valid divider 10819 * configuration, otherwise accessing DSI registers will hang 10820 * the machine. See BSpec North Display Engine 10821 * registers/MIPI[BXT]. We can break out here early, since we 10822 * need the same DSI PLL to be enabled for both DSI ports. 10823 */ 10824 if (!bxt_dsi_pll_is_enabled(dev_priv)) 10825 break; 10826 10827 /* XXX: this works for video mode only */ 10828 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port)); 10829 if (!(tmp & DPI_ENABLE)) 10830 continue; 10831 10832 tmp = I915_READ(MIPI_CTRL(port)); 10833 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 10834 continue; 10835 10836 pipe_config->cpu_transcoder = cpu_transcoder; 10837 break; 10838 } 10839 10840 return transcoder_is_dsi(pipe_config->cpu_transcoder); 10841 } 10842 10843 static void hsw_get_ddi_port_state(struct intel_crtc *crtc, 10844 struct intel_crtc_state *pipe_config) 10845 { 10846 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10847 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 10848 struct intel_shared_dpll *pll; 10849 enum port port; 10850 u32 tmp; 10851 10852 if (transcoder_is_dsi(cpu_transcoder)) { 10853 port = (cpu_transcoder == TRANSCODER_DSI_A) ? 10854 PORT_A : PORT_B; 10855 } else { 10856 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 10857 if (INTEL_GEN(dev_priv) >= 12) 10858 port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); 10859 else 10860 port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); 10861 } 10862 10863 if (INTEL_GEN(dev_priv) >= 11) 10864 icl_get_ddi_pll(dev_priv, port, pipe_config); 10865 else if (IS_CANNONLAKE(dev_priv)) 10866 cnl_get_ddi_pll(dev_priv, port, pipe_config); 10867 else if (IS_GEN9_BC(dev_priv)) 10868 skl_get_ddi_pll(dev_priv, port, pipe_config); 10869 else if (IS_GEN9_LP(dev_priv)) 10870 bxt_get_ddi_pll(dev_priv, port, pipe_config); 10871 else 10872 hsw_get_ddi_pll(dev_priv, port, pipe_config); 10873 10874 pll = pipe_config->shared_dpll; 10875 if (pll) { 10876 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll, 10877 &pipe_config->dpll_hw_state)); 10878 } 10879 10880 /* 10881 * Haswell has only FDI/PCH transcoder A. It is which is connected to 10882 * DDI E. So just check whether this pipe is wired to DDI E and whether 10883 * the PCH transcoder is on. 10884 */ 10885 if (INTEL_GEN(dev_priv) < 9 && 10886 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 10887 pipe_config->has_pch_encoder = true; 10888 10889 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 10890 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 10891 FDI_DP_PORT_WIDTH_SHIFT) + 1; 10892 10893 ilk_get_fdi_m_n_config(crtc, pipe_config); 10894 } 10895 } 10896 10897 static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv, 10898 enum transcoder cpu_transcoder) 10899 { 10900 u32 trans_port_sync, master_select; 10901 10902 trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder)); 10903 10904 if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0) 10905 return INVALID_TRANSCODER; 10906 10907 master_select = trans_port_sync & 10908 PORT_SYNC_MODE_MASTER_SELECT_MASK; 10909 if (master_select == 0) 10910 return TRANSCODER_EDP; 10911 else 10912 return master_select - 1; 10913 } 10914 10915 static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state) 10916 { 10917 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 10918 u32 transcoders; 10919 enum transcoder cpu_transcoder; 10920 10921 crtc_state->master_transcoder = transcoder_master_readout(dev_priv, 10922 crtc_state->cpu_transcoder); 10923 10924 transcoders = BIT(TRANSCODER_A) | 10925 BIT(TRANSCODER_B) | 10926 BIT(TRANSCODER_C) | 10927 BIT(TRANSCODER_D); 10928 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { 10929 enum intel_display_power_domain power_domain; 10930 intel_wakeref_t trans_wakeref; 10931 10932 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 10933 trans_wakeref = intel_display_power_get_if_enabled(dev_priv, 10934 power_domain); 10935 10936 if (!trans_wakeref) 10937 continue; 10938 10939 if (transcoder_master_readout(dev_priv, cpu_transcoder) == 10940 crtc_state->cpu_transcoder) 10941 crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder); 10942 10943 intel_display_power_put(dev_priv, power_domain, trans_wakeref); 10944 } 10945 10946 WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER && 10947 crtc_state->sync_mode_slaves_mask); 10948 } 10949 10950 static bool hsw_get_pipe_config(struct intel_crtc *crtc, 10951 struct intel_crtc_state *pipe_config) 10952 { 10953 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10954 intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf; 10955 enum intel_display_power_domain power_domain; 10956 u64 power_domain_mask; 10957 bool active; 10958 10959 pipe_config->master_transcoder = INVALID_TRANSCODER; 10960 10961 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 10962 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10963 if (!wf) 10964 return false; 10965 10966 wakerefs[power_domain] = wf; 10967 power_domain_mask = BIT_ULL(power_domain); 10968 10969 pipe_config->shared_dpll = NULL; 10970 10971 active = hsw_get_transcoder_state(crtc, pipe_config, 10972 &power_domain_mask, wakerefs); 10973 10974 if (IS_GEN9_LP(dev_priv) && 10975 bxt_get_dsi_transcoder_state(crtc, pipe_config, 10976 &power_domain_mask, wakerefs)) { 10977 WARN_ON(active); 10978 active = true; 10979 } 10980 10981 if (!active) 10982 goto out; 10983 10984 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 10985 INTEL_GEN(dev_priv) >= 11) { 10986 hsw_get_ddi_port_state(crtc, pipe_config); 10987 intel_get_pipe_timings(crtc, pipe_config); 10988 } 10989 10990 intel_get_pipe_src_size(crtc, pipe_config); 10991 10992 if (IS_HASWELL(dev_priv)) { 10993 u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 10994 10995 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW) 10996 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 10997 else 10998 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 10999 } else { 11000 pipe_config->output_format = 11001 bdw_get_pipemisc_output_format(crtc); 11002 11003 /* 11004 * Currently there is no interface defined to 11005 * check user preference between RGB/YCBCR444 11006 * or YCBCR420. So the only possible case for 11007 * YCBCR444 usage is driving YCBCR420 output 11008 * with LSPCON, when pipe is configured for 11009 * YCBCR444 output and LSPCON takes care of 11010 * downsampling it. 11011 */ 11012 pipe_config->lspcon_downsampling = 11013 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444; 11014 } 11015 11016 pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe)); 11017 11018 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe)); 11019 11020 if (INTEL_GEN(dev_priv) >= 9) { 11021 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe)); 11022 11023 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) 11024 pipe_config->gamma_enable = true; 11025 11026 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE) 11027 pipe_config->csc_enable = true; 11028 } else { 11029 i9xx_get_pipe_color_config(pipe_config); 11030 } 11031 11032 intel_color_get_config(pipe_config); 11033 11034 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 11035 WARN_ON(power_domain_mask & BIT_ULL(power_domain)); 11036 11037 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 11038 if (wf) { 11039 wakerefs[power_domain] = wf; 11040 power_domain_mask |= BIT_ULL(power_domain); 11041 11042 if (INTEL_GEN(dev_priv) >= 9) 11043 skl_get_pfit_config(crtc, pipe_config); 11044 else 11045 ilk_get_pfit_config(crtc, pipe_config); 11046 } 11047 11048 if (hsw_crtc_supports_ips(crtc)) { 11049 if (IS_HASWELL(dev_priv)) 11050 pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE; 11051 else { 11052 /* 11053 * We cannot readout IPS state on broadwell, set to 11054 * true so we can set it to a defined state on first 11055 * commit. 11056 */ 11057 pipe_config->ips_enabled = true; 11058 } 11059 } 11060 11061 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 11062 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 11063 pipe_config->pixel_multiplier = 11064 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 11065 } else { 11066 pipe_config->pixel_multiplier = 1; 11067 } 11068 11069 if (INTEL_GEN(dev_priv) >= 11 && 11070 !transcoder_is_dsi(pipe_config->cpu_transcoder)) 11071 icl_get_trans_port_sync_config(pipe_config); 11072 11073 out: 11074 for_each_power_domain(power_domain, power_domain_mask) 11075 intel_display_power_put(dev_priv, 11076 power_domain, wakerefs[power_domain]); 11077 11078 return active; 11079 } 11080 11081 static u32 intel_cursor_base(const struct intel_plane_state *plane_state) 11082 { 11083 struct drm_i915_private *dev_priv = 11084 to_i915(plane_state->uapi.plane->dev); 11085 const struct drm_framebuffer *fb = plane_state->hw.fb; 11086 const struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11087 u32 base; 11088 11089 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) 11090 base = sg_dma_address(obj->mm.pages->sgl); 11091 else 11092 base = intel_plane_ggtt_offset(plane_state); 11093 11094 return base + plane_state->color_plane[0].offset; 11095 } 11096 11097 static u32 intel_cursor_position(const struct intel_plane_state *plane_state) 11098 { 11099 int x = plane_state->uapi.dst.x1; 11100 int y = plane_state->uapi.dst.y1; 11101 u32 pos = 0; 11102 11103 if (x < 0) { 11104 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 11105 x = -x; 11106 } 11107 pos |= x << CURSOR_X_SHIFT; 11108 11109 if (y < 0) { 11110 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 11111 y = -y; 11112 } 11113 pos |= y << CURSOR_Y_SHIFT; 11114 11115 return pos; 11116 } 11117 11118 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) 11119 { 11120 const struct drm_mode_config *config = 11121 &plane_state->uapi.plane->dev->mode_config; 11122 int width = drm_rect_width(&plane_state->uapi.dst); 11123 int height = drm_rect_height(&plane_state->uapi.dst); 11124 11125 return width > 0 && width <= config->cursor_width && 11126 height > 0 && height <= config->cursor_height; 11127 } 11128 11129 static int intel_cursor_check_surface(struct intel_plane_state *plane_state) 11130 { 11131 struct drm_i915_private *dev_priv = 11132 to_i915(plane_state->uapi.plane->dev); 11133 unsigned int rotation = plane_state->hw.rotation; 11134 int src_x, src_y; 11135 u32 offset; 11136 int ret; 11137 11138 ret = intel_plane_compute_gtt(plane_state); 11139 if (ret) 11140 return ret; 11141 11142 if (!plane_state->uapi.visible) 11143 return 0; 11144 11145 src_x = plane_state->uapi.src.x1 >> 16; 11146 src_y = plane_state->uapi.src.y1 >> 16; 11147 11148 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 11149 offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 11150 plane_state, 0); 11151 11152 if (src_x != 0 || src_y != 0) { 11153 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n"); 11154 return -EINVAL; 11155 } 11156 11157 /* 11158 * Put the final coordinates back so that the src 11159 * coordinate checks will see the right values. 11160 */ 11161 drm_rect_translate_to(&plane_state->uapi.src, 11162 src_x << 16, src_y << 16); 11163 11164 /* ILK+ do this automagically in hardware */ 11165 if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) { 11166 const struct drm_framebuffer *fb = plane_state->hw.fb; 11167 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 11168 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 11169 11170 offset += (src_h * src_w - 1) * fb->format->cpp[0]; 11171 } 11172 11173 plane_state->color_plane[0].offset = offset; 11174 plane_state->color_plane[0].x = src_x; 11175 plane_state->color_plane[0].y = src_y; 11176 11177 return 0; 11178 } 11179 11180 static int intel_check_cursor(struct intel_crtc_state *crtc_state, 11181 struct intel_plane_state *plane_state) 11182 { 11183 const struct drm_framebuffer *fb = plane_state->hw.fb; 11184 int ret; 11185 11186 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) { 11187 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 11188 return -EINVAL; 11189 } 11190 11191 ret = drm_atomic_helper_check_plane_state(&plane_state->uapi, 11192 &crtc_state->uapi, 11193 DRM_PLANE_HELPER_NO_SCALING, 11194 DRM_PLANE_HELPER_NO_SCALING, 11195 true, true); 11196 if (ret) 11197 return ret; 11198 11199 /* Use the unclipped src/dst rectangles, which we program to hw */ 11200 plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi); 11201 plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi); 11202 11203 ret = intel_cursor_check_surface(plane_state); 11204 if (ret) 11205 return ret; 11206 11207 if (!plane_state->uapi.visible) 11208 return 0; 11209 11210 ret = intel_plane_check_src_coordinates(plane_state); 11211 if (ret) 11212 return ret; 11213 11214 return 0; 11215 } 11216 11217 static unsigned int 11218 i845_cursor_max_stride(struct intel_plane *plane, 11219 u32 pixel_format, u64 modifier, 11220 unsigned int rotation) 11221 { 11222 return 2048; 11223 } 11224 11225 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 11226 { 11227 u32 cntl = 0; 11228 11229 if (crtc_state->gamma_enable) 11230 cntl |= CURSOR_GAMMA_ENABLE; 11231 11232 return cntl; 11233 } 11234 11235 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, 11236 const struct intel_plane_state *plane_state) 11237 { 11238 return CURSOR_ENABLE | 11239 CURSOR_FORMAT_ARGB | 11240 CURSOR_STRIDE(plane_state->color_plane[0].stride); 11241 } 11242 11243 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) 11244 { 11245 int width = drm_rect_width(&plane_state->uapi.dst); 11246 11247 /* 11248 * 845g/865g are only limited by the width of their cursors, 11249 * the height is arbitrary up to the precision of the register. 11250 */ 11251 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64); 11252 } 11253 11254 static int i845_check_cursor(struct intel_crtc_state *crtc_state, 11255 struct intel_plane_state *plane_state) 11256 { 11257 const struct drm_framebuffer *fb = plane_state->hw.fb; 11258 int ret; 11259 11260 ret = intel_check_cursor(crtc_state, plane_state); 11261 if (ret) 11262 return ret; 11263 11264 /* if we want to turn off the cursor ignore width and height */ 11265 if (!fb) 11266 return 0; 11267 11268 /* Check for which cursor types we support */ 11269 if (!i845_cursor_size_ok(plane_state)) { 11270 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 11271 drm_rect_width(&plane_state->uapi.dst), 11272 drm_rect_height(&plane_state->uapi.dst)); 11273 return -EINVAL; 11274 } 11275 11276 WARN_ON(plane_state->uapi.visible && 11277 plane_state->color_plane[0].stride != fb->pitches[0]); 11278 11279 switch (fb->pitches[0]) { 11280 case 256: 11281 case 512: 11282 case 1024: 11283 case 2048: 11284 break; 11285 default: 11286 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n", 11287 fb->pitches[0]); 11288 return -EINVAL; 11289 } 11290 11291 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state); 11292 11293 return 0; 11294 } 11295 11296 static void i845_update_cursor(struct intel_plane *plane, 11297 const struct intel_crtc_state *crtc_state, 11298 const struct intel_plane_state *plane_state) 11299 { 11300 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11301 u32 cntl = 0, base = 0, pos = 0, size = 0; 11302 unsigned long irqflags; 11303 11304 if (plane_state && plane_state->uapi.visible) { 11305 unsigned int width = drm_rect_width(&plane_state->uapi.dst); 11306 unsigned int height = drm_rect_height(&plane_state->uapi.dst); 11307 11308 cntl = plane_state->ctl | 11309 i845_cursor_ctl_crtc(crtc_state); 11310 11311 size = (height << 12) | width; 11312 11313 base = intel_cursor_base(plane_state); 11314 pos = intel_cursor_position(plane_state); 11315 } 11316 11317 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 11318 11319 /* On these chipsets we can only modify the base/size/stride 11320 * whilst the cursor is disabled. 11321 */ 11322 if (plane->cursor.base != base || 11323 plane->cursor.size != size || 11324 plane->cursor.cntl != cntl) { 11325 I915_WRITE_FW(CURCNTR(PIPE_A), 0); 11326 I915_WRITE_FW(CURBASE(PIPE_A), base); 11327 I915_WRITE_FW(CURSIZE, size); 11328 I915_WRITE_FW(CURPOS(PIPE_A), pos); 11329 I915_WRITE_FW(CURCNTR(PIPE_A), cntl); 11330 11331 plane->cursor.base = base; 11332 plane->cursor.size = size; 11333 plane->cursor.cntl = cntl; 11334 } else { 11335 I915_WRITE_FW(CURPOS(PIPE_A), pos); 11336 } 11337 11338 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 11339 } 11340 11341 static void i845_disable_cursor(struct intel_plane *plane, 11342 const struct intel_crtc_state *crtc_state) 11343 { 11344 i845_update_cursor(plane, crtc_state, NULL); 11345 } 11346 11347 static bool i845_cursor_get_hw_state(struct intel_plane *plane, 11348 enum pipe *pipe) 11349 { 11350 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11351 enum intel_display_power_domain power_domain; 11352 intel_wakeref_t wakeref; 11353 bool ret; 11354 11355 power_domain = POWER_DOMAIN_PIPE(PIPE_A); 11356 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 11357 if (!wakeref) 11358 return false; 11359 11360 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 11361 11362 *pipe = PIPE_A; 11363 11364 intel_display_power_put(dev_priv, power_domain, wakeref); 11365 11366 return ret; 11367 } 11368 11369 static unsigned int 11370 i9xx_cursor_max_stride(struct intel_plane *plane, 11371 u32 pixel_format, u64 modifier, 11372 unsigned int rotation) 11373 { 11374 return plane->base.dev->mode_config.cursor_width * 4; 11375 } 11376 11377 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 11378 { 11379 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 11380 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11381 u32 cntl = 0; 11382 11383 if (INTEL_GEN(dev_priv) >= 11) 11384 return cntl; 11385 11386 if (crtc_state->gamma_enable) 11387 cntl = MCURSOR_GAMMA_ENABLE; 11388 11389 if (crtc_state->csc_enable) 11390 cntl |= MCURSOR_PIPE_CSC_ENABLE; 11391 11392 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 11393 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); 11394 11395 return cntl; 11396 } 11397 11398 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, 11399 const struct intel_plane_state *plane_state) 11400 { 11401 struct drm_i915_private *dev_priv = 11402 to_i915(plane_state->uapi.plane->dev); 11403 u32 cntl = 0; 11404 11405 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 11406 cntl |= MCURSOR_TRICKLE_FEED_DISABLE; 11407 11408 switch (drm_rect_width(&plane_state->uapi.dst)) { 11409 case 64: 11410 cntl |= MCURSOR_MODE_64_ARGB_AX; 11411 break; 11412 case 128: 11413 cntl |= MCURSOR_MODE_128_ARGB_AX; 11414 break; 11415 case 256: 11416 cntl |= MCURSOR_MODE_256_ARGB_AX; 11417 break; 11418 default: 11419 MISSING_CASE(drm_rect_width(&plane_state->uapi.dst)); 11420 return 0; 11421 } 11422 11423 if (plane_state->hw.rotation & DRM_MODE_ROTATE_180) 11424 cntl |= MCURSOR_ROTATE_180; 11425 11426 return cntl; 11427 } 11428 11429 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) 11430 { 11431 struct drm_i915_private *dev_priv = 11432 to_i915(plane_state->uapi.plane->dev); 11433 int width = drm_rect_width(&plane_state->uapi.dst); 11434 int height = drm_rect_height(&plane_state->uapi.dst); 11435 11436 if (!intel_cursor_size_ok(plane_state)) 11437 return false; 11438 11439 /* Cursor width is limited to a few power-of-two sizes */ 11440 switch (width) { 11441 case 256: 11442 case 128: 11443 case 64: 11444 break; 11445 default: 11446 return false; 11447 } 11448 11449 /* 11450 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor 11451 * height from 8 lines up to the cursor width, when the 11452 * cursor is not rotated. Everything else requires square 11453 * cursors. 11454 */ 11455 if (HAS_CUR_FBC(dev_priv) && 11456 plane_state->hw.rotation & DRM_MODE_ROTATE_0) { 11457 if (height < 8 || height > width) 11458 return false; 11459 } else { 11460 if (height != width) 11461 return false; 11462 } 11463 11464 return true; 11465 } 11466 11467 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, 11468 struct intel_plane_state *plane_state) 11469 { 11470 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 11471 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11472 const struct drm_framebuffer *fb = plane_state->hw.fb; 11473 enum pipe pipe = plane->pipe; 11474 int ret; 11475 11476 ret = intel_check_cursor(crtc_state, plane_state); 11477 if (ret) 11478 return ret; 11479 11480 /* if we want to turn off the cursor ignore width and height */ 11481 if (!fb) 11482 return 0; 11483 11484 /* Check for which cursor types we support */ 11485 if (!i9xx_cursor_size_ok(plane_state)) { 11486 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 11487 drm_rect_width(&plane_state->uapi.dst), 11488 drm_rect_height(&plane_state->uapi.dst)); 11489 return -EINVAL; 11490 } 11491 11492 WARN_ON(plane_state->uapi.visible && 11493 plane_state->color_plane[0].stride != fb->pitches[0]); 11494 11495 if (fb->pitches[0] != 11496 drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) { 11497 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n", 11498 fb->pitches[0], 11499 drm_rect_width(&plane_state->uapi.dst)); 11500 return -EINVAL; 11501 } 11502 11503 /* 11504 * There's something wrong with the cursor on CHV pipe C. 11505 * If it straddles the left edge of the screen then 11506 * moving it away from the edge or disabling it often 11507 * results in a pipe underrun, and often that can lead to 11508 * dead pipe (constant underrun reported, and it scans 11509 * out just a solid color). To recover from that, the 11510 * display power well must be turned off and on again. 11511 * Refuse the put the cursor into that compromised position. 11512 */ 11513 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && 11514 plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) { 11515 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); 11516 return -EINVAL; 11517 } 11518 11519 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state); 11520 11521 return 0; 11522 } 11523 11524 static void i9xx_update_cursor(struct intel_plane *plane, 11525 const struct intel_crtc_state *crtc_state, 11526 const struct intel_plane_state *plane_state) 11527 { 11528 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11529 enum pipe pipe = plane->pipe; 11530 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0; 11531 unsigned long irqflags; 11532 11533 if (plane_state && plane_state->uapi.visible) { 11534 unsigned width = drm_rect_width(&plane_state->uapi.dst); 11535 unsigned height = drm_rect_height(&plane_state->uapi.dst); 11536 11537 cntl = plane_state->ctl | 11538 i9xx_cursor_ctl_crtc(crtc_state); 11539 11540 if (width != height) 11541 fbc_ctl = CUR_FBC_CTL_EN | (height - 1); 11542 11543 base = intel_cursor_base(plane_state); 11544 pos = intel_cursor_position(plane_state); 11545 } 11546 11547 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 11548 11549 /* 11550 * On some platforms writing CURCNTR first will also 11551 * cause CURPOS to be armed by the CURBASE write. 11552 * Without the CURCNTR write the CURPOS write would 11553 * arm itself. Thus we always update CURCNTR before 11554 * CURPOS. 11555 * 11556 * On other platforms CURPOS always requires the 11557 * CURBASE write to arm the update. Additonally 11558 * a write to any of the cursor register will cancel 11559 * an already armed cursor update. Thus leaving out 11560 * the CURBASE write after CURPOS could lead to a 11561 * cursor that doesn't appear to move, or even change 11562 * shape. Thus we always write CURBASE. 11563 * 11564 * The other registers are armed by by the CURBASE write 11565 * except when the plane is getting enabled at which time 11566 * the CURCNTR write arms the update. 11567 */ 11568 11569 if (INTEL_GEN(dev_priv) >= 9) 11570 skl_write_cursor_wm(plane, crtc_state); 11571 11572 if (plane->cursor.base != base || 11573 plane->cursor.size != fbc_ctl || 11574 plane->cursor.cntl != cntl) { 11575 if (HAS_CUR_FBC(dev_priv)) 11576 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl); 11577 I915_WRITE_FW(CURCNTR(pipe), cntl); 11578 I915_WRITE_FW(CURPOS(pipe), pos); 11579 I915_WRITE_FW(CURBASE(pipe), base); 11580 11581 plane->cursor.base = base; 11582 plane->cursor.size = fbc_ctl; 11583 plane->cursor.cntl = cntl; 11584 } else { 11585 I915_WRITE_FW(CURPOS(pipe), pos); 11586 I915_WRITE_FW(CURBASE(pipe), base); 11587 } 11588 11589 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 11590 } 11591 11592 static void i9xx_disable_cursor(struct intel_plane *plane, 11593 const struct intel_crtc_state *crtc_state) 11594 { 11595 i9xx_update_cursor(plane, crtc_state, NULL); 11596 } 11597 11598 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, 11599 enum pipe *pipe) 11600 { 11601 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11602 enum intel_display_power_domain power_domain; 11603 intel_wakeref_t wakeref; 11604 bool ret; 11605 u32 val; 11606 11607 /* 11608 * Not 100% correct for planes that can move between pipes, 11609 * but that's only the case for gen2-3 which don't have any 11610 * display power wells. 11611 */ 11612 power_domain = POWER_DOMAIN_PIPE(plane->pipe); 11613 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 11614 if (!wakeref) 11615 return false; 11616 11617 val = I915_READ(CURCNTR(plane->pipe)); 11618 11619 ret = val & MCURSOR_MODE; 11620 11621 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 11622 *pipe = plane->pipe; 11623 else 11624 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >> 11625 MCURSOR_PIPE_SELECT_SHIFT; 11626 11627 intel_display_power_put(dev_priv, power_domain, wakeref); 11628 11629 return ret; 11630 } 11631 11632 /* VESA 640x480x72Hz mode to set on the pipe */ 11633 static const struct drm_display_mode load_detect_mode = { 11634 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 11635 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 11636 }; 11637 11638 struct drm_framebuffer * 11639 intel_framebuffer_create(struct drm_i915_gem_object *obj, 11640 struct drm_mode_fb_cmd2 *mode_cmd) 11641 { 11642 struct intel_framebuffer *intel_fb; 11643 int ret; 11644 11645 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 11646 if (!intel_fb) 11647 return ERR_PTR(-ENOMEM); 11648 11649 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd); 11650 if (ret) 11651 goto err; 11652 11653 return &intel_fb->base; 11654 11655 err: 11656 kfree(intel_fb); 11657 return ERR_PTR(ret); 11658 } 11659 11660 static int intel_modeset_disable_planes(struct drm_atomic_state *state, 11661 struct drm_crtc *crtc) 11662 { 11663 struct drm_plane *plane; 11664 struct drm_plane_state *plane_state; 11665 int ret, i; 11666 11667 ret = drm_atomic_add_affected_planes(state, crtc); 11668 if (ret) 11669 return ret; 11670 11671 for_each_new_plane_in_state(state, plane, plane_state, i) { 11672 if (plane_state->crtc != crtc) 11673 continue; 11674 11675 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); 11676 if (ret) 11677 return ret; 11678 11679 drm_atomic_set_fb_for_plane(plane_state, NULL); 11680 } 11681 11682 return 0; 11683 } 11684 11685 int intel_get_load_detect_pipe(struct drm_connector *connector, 11686 struct intel_load_detect_pipe *old, 11687 struct drm_modeset_acquire_ctx *ctx) 11688 { 11689 struct intel_crtc *intel_crtc; 11690 struct intel_encoder *intel_encoder = 11691 intel_attached_encoder(to_intel_connector(connector)); 11692 struct drm_crtc *possible_crtc; 11693 struct drm_encoder *encoder = &intel_encoder->base; 11694 struct drm_crtc *crtc = NULL; 11695 struct drm_device *dev = encoder->dev; 11696 struct drm_i915_private *dev_priv = to_i915(dev); 11697 struct drm_mode_config *config = &dev->mode_config; 11698 struct drm_atomic_state *state = NULL, *restore_state = NULL; 11699 struct drm_connector_state *connector_state; 11700 struct intel_crtc_state *crtc_state; 11701 int ret, i = -1; 11702 11703 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 11704 connector->base.id, connector->name, 11705 encoder->base.id, encoder->name); 11706 11707 old->restore_state = NULL; 11708 11709 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); 11710 11711 /* 11712 * Algorithm gets a little messy: 11713 * 11714 * - if the connector already has an assigned crtc, use it (but make 11715 * sure it's on first) 11716 * 11717 * - try to find the first unused crtc that can drive this connector, 11718 * and use that if we find one 11719 */ 11720 11721 /* See if we already have a CRTC for this connector */ 11722 if (connector->state->crtc) { 11723 crtc = connector->state->crtc; 11724 11725 ret = drm_modeset_lock(&crtc->mutex, ctx); 11726 if (ret) 11727 goto fail; 11728 11729 /* Make sure the crtc and connector are running */ 11730 goto found; 11731 } 11732 11733 /* Find an unused one (if possible) */ 11734 for_each_crtc(dev, possible_crtc) { 11735 i++; 11736 if (!(encoder->possible_crtcs & (1 << i))) 11737 continue; 11738 11739 ret = drm_modeset_lock(&possible_crtc->mutex, ctx); 11740 if (ret) 11741 goto fail; 11742 11743 if (possible_crtc->state->enable) { 11744 drm_modeset_unlock(&possible_crtc->mutex); 11745 continue; 11746 } 11747 11748 crtc = possible_crtc; 11749 break; 11750 } 11751 11752 /* 11753 * If we didn't find an unused CRTC, don't use any. 11754 */ 11755 if (!crtc) { 11756 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 11757 ret = -ENODEV; 11758 goto fail; 11759 } 11760 11761 found: 11762 intel_crtc = to_intel_crtc(crtc); 11763 11764 state = drm_atomic_state_alloc(dev); 11765 restore_state = drm_atomic_state_alloc(dev); 11766 if (!state || !restore_state) { 11767 ret = -ENOMEM; 11768 goto fail; 11769 } 11770 11771 state->acquire_ctx = ctx; 11772 restore_state->acquire_ctx = ctx; 11773 11774 connector_state = drm_atomic_get_connector_state(state, connector); 11775 if (IS_ERR(connector_state)) { 11776 ret = PTR_ERR(connector_state); 11777 goto fail; 11778 } 11779 11780 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc); 11781 if (ret) 11782 goto fail; 11783 11784 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 11785 if (IS_ERR(crtc_state)) { 11786 ret = PTR_ERR(crtc_state); 11787 goto fail; 11788 } 11789 11790 crtc_state->uapi.active = true; 11791 11792 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi, 11793 &load_detect_mode); 11794 if (ret) 11795 goto fail; 11796 11797 ret = intel_modeset_disable_planes(state, crtc); 11798 if (ret) 11799 goto fail; 11800 11801 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); 11802 if (!ret) 11803 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc)); 11804 if (!ret) 11805 ret = drm_atomic_add_affected_planes(restore_state, crtc); 11806 if (ret) { 11807 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret); 11808 goto fail; 11809 } 11810 11811 ret = drm_atomic_commit(state); 11812 if (ret) { 11813 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 11814 goto fail; 11815 } 11816 11817 old->restore_state = restore_state; 11818 drm_atomic_state_put(state); 11819 11820 /* let the connector get through one full cycle before testing */ 11821 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 11822 return true; 11823 11824 fail: 11825 if (state) { 11826 drm_atomic_state_put(state); 11827 state = NULL; 11828 } 11829 if (restore_state) { 11830 drm_atomic_state_put(restore_state); 11831 restore_state = NULL; 11832 } 11833 11834 if (ret == -EDEADLK) 11835 return ret; 11836 11837 return false; 11838 } 11839 11840 void intel_release_load_detect_pipe(struct drm_connector *connector, 11841 struct intel_load_detect_pipe *old, 11842 struct drm_modeset_acquire_ctx *ctx) 11843 { 11844 struct intel_encoder *intel_encoder = 11845 intel_attached_encoder(to_intel_connector(connector)); 11846 struct drm_encoder *encoder = &intel_encoder->base; 11847 struct drm_atomic_state *state = old->restore_state; 11848 int ret; 11849 11850 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 11851 connector->base.id, connector->name, 11852 encoder->base.id, encoder->name); 11853 11854 if (!state) 11855 return; 11856 11857 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 11858 if (ret) 11859 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret); 11860 drm_atomic_state_put(state); 11861 } 11862 11863 static int i9xx_pll_refclk(struct drm_device *dev, 11864 const struct intel_crtc_state *pipe_config) 11865 { 11866 struct drm_i915_private *dev_priv = to_i915(dev); 11867 u32 dpll = pipe_config->dpll_hw_state.dpll; 11868 11869 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 11870 return dev_priv->vbt.lvds_ssc_freq; 11871 else if (HAS_PCH_SPLIT(dev_priv)) 11872 return 120000; 11873 else if (!IS_GEN(dev_priv, 2)) 11874 return 96000; 11875 else 11876 return 48000; 11877 } 11878 11879 /* Returns the clock of the currently programmed mode of the given pipe. */ 11880 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 11881 struct intel_crtc_state *pipe_config) 11882 { 11883 struct drm_device *dev = crtc->base.dev; 11884 struct drm_i915_private *dev_priv = to_i915(dev); 11885 enum pipe pipe = crtc->pipe; 11886 u32 dpll = pipe_config->dpll_hw_state.dpll; 11887 u32 fp; 11888 struct dpll clock; 11889 int port_clock; 11890 int refclk = i9xx_pll_refclk(dev, pipe_config); 11891 11892 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 11893 fp = pipe_config->dpll_hw_state.fp0; 11894 else 11895 fp = pipe_config->dpll_hw_state.fp1; 11896 11897 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 11898 if (IS_PINEVIEW(dev_priv)) { 11899 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 11900 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 11901 } else { 11902 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 11903 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 11904 } 11905 11906 if (!IS_GEN(dev_priv, 2)) { 11907 if (IS_PINEVIEW(dev_priv)) 11908 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 11909 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 11910 else 11911 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 11912 DPLL_FPA01_P1_POST_DIV_SHIFT); 11913 11914 switch (dpll & DPLL_MODE_MASK) { 11915 case DPLLB_MODE_DAC_SERIAL: 11916 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 11917 5 : 10; 11918 break; 11919 case DPLLB_MODE_LVDS: 11920 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 11921 7 : 14; 11922 break; 11923 default: 11924 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 11925 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 11926 return; 11927 } 11928 11929 if (IS_PINEVIEW(dev_priv)) 11930 port_clock = pnv_calc_dpll_params(refclk, &clock); 11931 else 11932 port_clock = i9xx_calc_dpll_params(refclk, &clock); 11933 } else { 11934 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS); 11935 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 11936 11937 if (is_lvds) { 11938 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 11939 DPLL_FPA01_P1_POST_DIV_SHIFT); 11940 11941 if (lvds & LVDS_CLKB_POWER_UP) 11942 clock.p2 = 7; 11943 else 11944 clock.p2 = 14; 11945 } else { 11946 if (dpll & PLL_P1_DIVIDE_BY_TWO) 11947 clock.p1 = 2; 11948 else { 11949 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 11950 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 11951 } 11952 if (dpll & PLL_P2_DIVIDE_BY_4) 11953 clock.p2 = 4; 11954 else 11955 clock.p2 = 2; 11956 } 11957 11958 port_clock = i9xx_calc_dpll_params(refclk, &clock); 11959 } 11960 11961 /* 11962 * This value includes pixel_multiplier. We will use 11963 * port_clock to compute adjusted_mode.crtc_clock in the 11964 * encoder's get_config() function. 11965 */ 11966 pipe_config->port_clock = port_clock; 11967 } 11968 11969 int intel_dotclock_calculate(int link_freq, 11970 const struct intel_link_m_n *m_n) 11971 { 11972 /* 11973 * The calculation for the data clock is: 11974 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 11975 * But we want to avoid losing precison if possible, so: 11976 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 11977 * 11978 * and the link clock is simpler: 11979 * link_clock = (m * link_clock) / n 11980 */ 11981 11982 if (!m_n->link_n) 11983 return 0; 11984 11985 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); 11986 } 11987 11988 static void ilk_pch_clock_get(struct intel_crtc *crtc, 11989 struct intel_crtc_state *pipe_config) 11990 { 11991 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11992 11993 /* read out port_clock from the DPLL */ 11994 i9xx_crtc_clock_get(crtc, pipe_config); 11995 11996 /* 11997 * In case there is an active pipe without active ports, 11998 * we may need some idea for the dotclock anyway. 11999 * Calculate one based on the FDI configuration. 12000 */ 12001 pipe_config->hw.adjusted_mode.crtc_clock = 12002 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 12003 &pipe_config->fdi_m_n); 12004 } 12005 12006 static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state, 12007 struct intel_crtc *crtc) 12008 { 12009 memset(crtc_state, 0, sizeof(*crtc_state)); 12010 12011 __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base); 12012 12013 crtc_state->cpu_transcoder = INVALID_TRANSCODER; 12014 crtc_state->master_transcoder = INVALID_TRANSCODER; 12015 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 12016 crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID; 12017 crtc_state->scaler_state.scaler_id = -1; 12018 crtc_state->mst_master_transcoder = INVALID_TRANSCODER; 12019 } 12020 12021 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc) 12022 { 12023 struct intel_crtc_state *crtc_state; 12024 12025 crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL); 12026 12027 if (crtc_state) 12028 intel_crtc_state_reset(crtc_state, crtc); 12029 12030 return crtc_state; 12031 } 12032 12033 /* Returns the currently programmed mode of the given encoder. */ 12034 struct drm_display_mode * 12035 intel_encoder_current_mode(struct intel_encoder *encoder) 12036 { 12037 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 12038 struct intel_crtc_state *crtc_state; 12039 struct drm_display_mode *mode; 12040 struct intel_crtc *crtc; 12041 enum pipe pipe; 12042 12043 if (!encoder->get_hw_state(encoder, &pipe)) 12044 return NULL; 12045 12046 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 12047 12048 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 12049 if (!mode) 12050 return NULL; 12051 12052 crtc_state = intel_crtc_state_alloc(crtc); 12053 if (!crtc_state) { 12054 kfree(mode); 12055 return NULL; 12056 } 12057 12058 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) { 12059 kfree(crtc_state); 12060 kfree(mode); 12061 return NULL; 12062 } 12063 12064 encoder->get_config(encoder, crtc_state); 12065 12066 intel_mode_from_pipe_config(mode, crtc_state); 12067 12068 kfree(crtc_state); 12069 12070 return mode; 12071 } 12072 12073 static void intel_crtc_destroy(struct drm_crtc *crtc) 12074 { 12075 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12076 12077 drm_crtc_cleanup(crtc); 12078 kfree(intel_crtc); 12079 } 12080 12081 /** 12082 * intel_wm_need_update - Check whether watermarks need updating 12083 * @cur: current plane state 12084 * @new: new plane state 12085 * 12086 * Check current plane state versus the new one to determine whether 12087 * watermarks need to be recalculated. 12088 * 12089 * Returns true or false. 12090 */ 12091 static bool intel_wm_need_update(const struct intel_plane_state *cur, 12092 struct intel_plane_state *new) 12093 { 12094 /* Update watermarks on tiling or size changes. */ 12095 if (new->uapi.visible != cur->uapi.visible) 12096 return true; 12097 12098 if (!cur->hw.fb || !new->hw.fb) 12099 return false; 12100 12101 if (cur->hw.fb->modifier != new->hw.fb->modifier || 12102 cur->hw.rotation != new->hw.rotation || 12103 drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) || 12104 drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) || 12105 drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) || 12106 drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst)) 12107 return true; 12108 12109 return false; 12110 } 12111 12112 static bool needs_scaling(const struct intel_plane_state *state) 12113 { 12114 int src_w = drm_rect_width(&state->uapi.src) >> 16; 12115 int src_h = drm_rect_height(&state->uapi.src) >> 16; 12116 int dst_w = drm_rect_width(&state->uapi.dst); 12117 int dst_h = drm_rect_height(&state->uapi.dst); 12118 12119 return (src_w != dst_w || src_h != dst_h); 12120 } 12121 12122 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, 12123 struct intel_crtc_state *crtc_state, 12124 const struct intel_plane_state *old_plane_state, 12125 struct intel_plane_state *plane_state) 12126 { 12127 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 12128 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 12129 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12130 bool mode_changed = needs_modeset(crtc_state); 12131 bool was_crtc_enabled = old_crtc_state->hw.active; 12132 bool is_crtc_enabled = crtc_state->hw.active; 12133 bool turn_off, turn_on, visible, was_visible; 12134 int ret; 12135 12136 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { 12137 ret = skl_update_scaler_plane(crtc_state, plane_state); 12138 if (ret) 12139 return ret; 12140 } 12141 12142 was_visible = old_plane_state->uapi.visible; 12143 visible = plane_state->uapi.visible; 12144 12145 if (!was_crtc_enabled && WARN_ON(was_visible)) 12146 was_visible = false; 12147 12148 /* 12149 * Visibility is calculated as if the crtc was on, but 12150 * after scaler setup everything depends on it being off 12151 * when the crtc isn't active. 12152 * 12153 * FIXME this is wrong for watermarks. Watermarks should also 12154 * be computed as if the pipe would be active. Perhaps move 12155 * per-plane wm computation to the .check_plane() hook, and 12156 * only combine the results from all planes in the current place? 12157 */ 12158 if (!is_crtc_enabled) { 12159 plane_state->uapi.visible = visible = false; 12160 crtc_state->active_planes &= ~BIT(plane->id); 12161 crtc_state->data_rate[plane->id] = 0; 12162 crtc_state->min_cdclk[plane->id] = 0; 12163 } 12164 12165 if (!was_visible && !visible) 12166 return 0; 12167 12168 turn_off = was_visible && (!visible || mode_changed); 12169 turn_on = visible && (!was_visible || mode_changed); 12170 12171 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", 12172 crtc->base.base.id, crtc->base.name, 12173 plane->base.base.id, plane->base.name, 12174 was_visible, visible, 12175 turn_off, turn_on, mode_changed); 12176 12177 if (turn_on) { 12178 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 12179 crtc_state->update_wm_pre = true; 12180 12181 /* must disable cxsr around plane enable/disable */ 12182 if (plane->id != PLANE_CURSOR) 12183 crtc_state->disable_cxsr = true; 12184 } else if (turn_off) { 12185 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 12186 crtc_state->update_wm_post = true; 12187 12188 /* must disable cxsr around plane enable/disable */ 12189 if (plane->id != PLANE_CURSOR) 12190 crtc_state->disable_cxsr = true; 12191 } else if (intel_wm_need_update(old_plane_state, plane_state)) { 12192 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) { 12193 /* FIXME bollocks */ 12194 crtc_state->update_wm_pre = true; 12195 crtc_state->update_wm_post = true; 12196 } 12197 } 12198 12199 if (visible || was_visible) 12200 crtc_state->fb_bits |= plane->frontbuffer_bit; 12201 12202 /* 12203 * ILK/SNB DVSACNTR/Sprite Enable 12204 * IVB SPR_CTL/Sprite Enable 12205 * "When in Self Refresh Big FIFO mode, a write to enable the 12206 * plane will be internally buffered and delayed while Big FIFO 12207 * mode is exiting." 12208 * 12209 * Which means that enabling the sprite can take an extra frame 12210 * when we start in big FIFO mode (LP1+). Thus we need to drop 12211 * down to LP0 and wait for vblank in order to make sure the 12212 * sprite gets enabled on the next vblank after the register write. 12213 * Doing otherwise would risk enabling the sprite one frame after 12214 * we've already signalled flip completion. We can resume LP1+ 12215 * once the sprite has been enabled. 12216 * 12217 * 12218 * WaCxSRDisabledForSpriteScaling:ivb 12219 * IVB SPR_SCALE/Scaling Enable 12220 * "Low Power watermarks must be disabled for at least one 12221 * frame before enabling sprite scaling, and kept disabled 12222 * until sprite scaling is disabled." 12223 * 12224 * ILK/SNB DVSASCALE/Scaling Enable 12225 * "When in Self Refresh Big FIFO mode, scaling enable will be 12226 * masked off while Big FIFO mode is exiting." 12227 * 12228 * Despite the w/a only being listed for IVB we assume that 12229 * the ILK/SNB note has similar ramifications, hence we apply 12230 * the w/a on all three platforms. 12231 * 12232 * With experimental results seems this is needed also for primary 12233 * plane, not only sprite plane. 12234 */ 12235 if (plane->id != PLANE_CURSOR && 12236 (IS_GEN_RANGE(dev_priv, 5, 6) || 12237 IS_IVYBRIDGE(dev_priv)) && 12238 (turn_on || (!needs_scaling(old_plane_state) && 12239 needs_scaling(plane_state)))) 12240 crtc_state->disable_lp_wm = true; 12241 12242 return 0; 12243 } 12244 12245 static bool encoders_cloneable(const struct intel_encoder *a, 12246 const struct intel_encoder *b) 12247 { 12248 /* masks could be asymmetric, so check both ways */ 12249 return a == b || (a->cloneable & (1 << b->type) && 12250 b->cloneable & (1 << a->type)); 12251 } 12252 12253 static bool check_single_encoder_cloning(struct drm_atomic_state *state, 12254 struct intel_crtc *crtc, 12255 struct intel_encoder *encoder) 12256 { 12257 struct intel_encoder *source_encoder; 12258 struct drm_connector *connector; 12259 struct drm_connector_state *connector_state; 12260 int i; 12261 12262 for_each_new_connector_in_state(state, connector, connector_state, i) { 12263 if (connector_state->crtc != &crtc->base) 12264 continue; 12265 12266 source_encoder = 12267 to_intel_encoder(connector_state->best_encoder); 12268 if (!encoders_cloneable(encoder, source_encoder)) 12269 return false; 12270 } 12271 12272 return true; 12273 } 12274 12275 static int icl_add_linked_planes(struct intel_atomic_state *state) 12276 { 12277 struct intel_plane *plane, *linked; 12278 struct intel_plane_state *plane_state, *linked_plane_state; 12279 int i; 12280 12281 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12282 linked = plane_state->planar_linked_plane; 12283 12284 if (!linked) 12285 continue; 12286 12287 linked_plane_state = intel_atomic_get_plane_state(state, linked); 12288 if (IS_ERR(linked_plane_state)) 12289 return PTR_ERR(linked_plane_state); 12290 12291 WARN_ON(linked_plane_state->planar_linked_plane != plane); 12292 WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave); 12293 } 12294 12295 return 0; 12296 } 12297 12298 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) 12299 { 12300 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 12301 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12302 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); 12303 struct intel_plane *plane, *linked; 12304 struct intel_plane_state *plane_state; 12305 int i; 12306 12307 if (INTEL_GEN(dev_priv) < 11) 12308 return 0; 12309 12310 /* 12311 * Destroy all old plane links and make the slave plane invisible 12312 * in the crtc_state->active_planes mask. 12313 */ 12314 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12315 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane) 12316 continue; 12317 12318 plane_state->planar_linked_plane = NULL; 12319 if (plane_state->planar_slave && !plane_state->uapi.visible) { 12320 crtc_state->active_planes &= ~BIT(plane->id); 12321 crtc_state->update_planes |= BIT(plane->id); 12322 } 12323 12324 plane_state->planar_slave = false; 12325 } 12326 12327 if (!crtc_state->nv12_planes) 12328 return 0; 12329 12330 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12331 struct intel_plane_state *linked_state = NULL; 12332 12333 if (plane->pipe != crtc->pipe || 12334 !(crtc_state->nv12_planes & BIT(plane->id))) 12335 continue; 12336 12337 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { 12338 if (!icl_is_nv12_y_plane(linked->id)) 12339 continue; 12340 12341 if (crtc_state->active_planes & BIT(linked->id)) 12342 continue; 12343 12344 linked_state = intel_atomic_get_plane_state(state, linked); 12345 if (IS_ERR(linked_state)) 12346 return PTR_ERR(linked_state); 12347 12348 break; 12349 } 12350 12351 if (!linked_state) { 12352 DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n", 12353 hweight8(crtc_state->nv12_planes)); 12354 12355 return -EINVAL; 12356 } 12357 12358 plane_state->planar_linked_plane = linked; 12359 12360 linked_state->planar_slave = true; 12361 linked_state->planar_linked_plane = plane; 12362 crtc_state->active_planes |= BIT(linked->id); 12363 crtc_state->update_planes |= BIT(linked->id); 12364 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name); 12365 12366 /* Copy parameters to slave plane */ 12367 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; 12368 linked_state->color_ctl = plane_state->color_ctl; 12369 linked_state->view = plane_state->view; 12370 memcpy(linked_state->color_plane, plane_state->color_plane, 12371 sizeof(linked_state->color_plane)); 12372 12373 intel_plane_copy_uapi_to_hw_state(linked_state, plane_state); 12374 linked_state->uapi.src = plane_state->uapi.src; 12375 linked_state->uapi.dst = plane_state->uapi.dst; 12376 12377 if (icl_is_hdr_plane(dev_priv, plane->id)) { 12378 if (linked->id == PLANE_SPRITE5) 12379 plane_state->cus_ctl |= PLANE_CUS_PLANE_7; 12380 else if (linked->id == PLANE_SPRITE4) 12381 plane_state->cus_ctl |= PLANE_CUS_PLANE_6; 12382 else 12383 MISSING_CASE(linked->id); 12384 } 12385 } 12386 12387 return 0; 12388 } 12389 12390 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) 12391 { 12392 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 12393 struct intel_atomic_state *state = 12394 to_intel_atomic_state(new_crtc_state->uapi.state); 12395 const struct intel_crtc_state *old_crtc_state = 12396 intel_atomic_get_old_crtc_state(state, crtc); 12397 12398 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; 12399 } 12400 12401 static bool 12402 intel_atomic_is_master_connector(struct intel_crtc_state *crtc_state) 12403 { 12404 struct drm_crtc *crtc = crtc_state->uapi.crtc; 12405 struct drm_atomic_state *state = crtc_state->uapi.state; 12406 struct drm_connector *connector; 12407 struct drm_connector_state *connector_state; 12408 int i; 12409 12410 for_each_new_connector_in_state(state, connector, connector_state, i) { 12411 if (connector_state->crtc != crtc) 12412 continue; 12413 if (connector->has_tile && 12414 connector->tile_h_loc == connector->num_h_tile - 1 && 12415 connector->tile_v_loc == connector->num_v_tile - 1) 12416 return true; 12417 } 12418 12419 return false; 12420 } 12421 12422 static void reset_port_sync_mode_state(struct intel_crtc_state *crtc_state) 12423 { 12424 crtc_state->master_transcoder = INVALID_TRANSCODER; 12425 crtc_state->sync_mode_slaves_mask = 0; 12426 } 12427 12428 static int icl_compute_port_sync_crtc_state(struct drm_connector *connector, 12429 struct intel_crtc_state *crtc_state, 12430 int num_tiled_conns) 12431 { 12432 struct drm_crtc *crtc = crtc_state->uapi.crtc; 12433 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); 12434 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 12435 struct drm_connector *master_connector; 12436 struct drm_connector_list_iter conn_iter; 12437 struct drm_crtc *master_crtc = NULL; 12438 struct drm_crtc_state *master_crtc_state; 12439 struct intel_crtc_state *master_pipe_config; 12440 12441 if (INTEL_GEN(dev_priv) < 11) 12442 return 0; 12443 12444 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP)) 12445 return 0; 12446 12447 /* 12448 * In case of tiled displays there could be one or more slaves but there is 12449 * only one master. Lets make the CRTC used by the connector corresponding 12450 * to the last horizonal and last vertical tile a master/genlock CRTC. 12451 * All the other CRTCs corresponding to other tiles of the same Tile group 12452 * are the slave CRTCs and hold a pointer to their genlock CRTC. 12453 * If all tiles not present do not make master slave assignments. 12454 */ 12455 if (!connector->has_tile || 12456 crtc_state->hw.mode.hdisplay != connector->tile_h_size || 12457 crtc_state->hw.mode.vdisplay != connector->tile_v_size || 12458 num_tiled_conns < connector->num_h_tile * connector->num_v_tile) { 12459 reset_port_sync_mode_state(crtc_state); 12460 return 0; 12461 } 12462 /* Last Horizontal and last vertical tile connector is a master 12463 * Master's crtc state is already populated in slave for port sync 12464 */ 12465 if (connector->tile_h_loc == connector->num_h_tile - 1 && 12466 connector->tile_v_loc == connector->num_v_tile - 1) 12467 return 0; 12468 12469 /* Loop through all connectors and configure the Slave crtc_state 12470 * to point to the correct master. 12471 */ 12472 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 12473 drm_for_each_connector_iter(master_connector, &conn_iter) { 12474 struct drm_connector_state *master_conn_state = NULL; 12475 12476 if (!(master_connector->has_tile && 12477 master_connector->tile_group->id == connector->tile_group->id)) 12478 continue; 12479 if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 || 12480 master_connector->tile_v_loc != master_connector->num_v_tile - 1) 12481 continue; 12482 12483 master_conn_state = drm_atomic_get_connector_state(&state->base, 12484 master_connector); 12485 if (IS_ERR(master_conn_state)) { 12486 drm_connector_list_iter_end(&conn_iter); 12487 return PTR_ERR(master_conn_state); 12488 } 12489 if (master_conn_state->crtc) { 12490 master_crtc = master_conn_state->crtc; 12491 break; 12492 } 12493 } 12494 drm_connector_list_iter_end(&conn_iter); 12495 12496 if (!master_crtc) { 12497 DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n", 12498 crtc->base.id); 12499 return -EINVAL; 12500 } 12501 12502 master_crtc_state = drm_atomic_get_crtc_state(&state->base, 12503 master_crtc); 12504 if (IS_ERR(master_crtc_state)) 12505 return PTR_ERR(master_crtc_state); 12506 12507 master_pipe_config = to_intel_crtc_state(master_crtc_state); 12508 crtc_state->master_transcoder = master_pipe_config->cpu_transcoder; 12509 master_pipe_config->sync_mode_slaves_mask |= 12510 BIT(crtc_state->cpu_transcoder); 12511 DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n", 12512 transcoder_name(crtc_state->master_transcoder), 12513 crtc->base.id, 12514 master_pipe_config->sync_mode_slaves_mask); 12515 12516 return 0; 12517 } 12518 12519 static int intel_crtc_atomic_check(struct intel_atomic_state *state, 12520 struct intel_crtc *crtc) 12521 { 12522 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12523 struct intel_crtc_state *crtc_state = 12524 intel_atomic_get_new_crtc_state(state, crtc); 12525 bool mode_changed = needs_modeset(crtc_state); 12526 int ret; 12527 12528 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) && 12529 mode_changed && !crtc_state->hw.active) 12530 crtc_state->update_wm_post = true; 12531 12532 if (mode_changed && crtc_state->hw.enable && 12533 dev_priv->display.crtc_compute_clock && 12534 !WARN_ON(crtc_state->shared_dpll)) { 12535 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state); 12536 if (ret) 12537 return ret; 12538 } 12539 12540 /* 12541 * May need to update pipe gamma enable bits 12542 * when C8 planes are getting enabled/disabled. 12543 */ 12544 if (c8_planes_changed(crtc_state)) 12545 crtc_state->uapi.color_mgmt_changed = true; 12546 12547 if (mode_changed || crtc_state->update_pipe || 12548 crtc_state->uapi.color_mgmt_changed) { 12549 ret = intel_color_check(crtc_state); 12550 if (ret) 12551 return ret; 12552 } 12553 12554 ret = 0; 12555 if (dev_priv->display.compute_pipe_wm) { 12556 ret = dev_priv->display.compute_pipe_wm(crtc_state); 12557 if (ret) { 12558 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n"); 12559 return ret; 12560 } 12561 } 12562 12563 if (dev_priv->display.compute_intermediate_wm) { 12564 if (WARN_ON(!dev_priv->display.compute_pipe_wm)) 12565 return 0; 12566 12567 /* 12568 * Calculate 'intermediate' watermarks that satisfy both the 12569 * old state and the new state. We can program these 12570 * immediately. 12571 */ 12572 ret = dev_priv->display.compute_intermediate_wm(crtc_state); 12573 if (ret) { 12574 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); 12575 return ret; 12576 } 12577 } 12578 12579 if (INTEL_GEN(dev_priv) >= 9) { 12580 if (mode_changed || crtc_state->update_pipe) 12581 ret = skl_update_scaler_crtc(crtc_state); 12582 if (!ret) 12583 ret = intel_atomic_setup_scalers(dev_priv, crtc, 12584 crtc_state); 12585 } 12586 12587 if (HAS_IPS(dev_priv)) 12588 crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state); 12589 12590 return ret; 12591 } 12592 12593 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 12594 { 12595 struct intel_connector *connector; 12596 struct drm_connector_list_iter conn_iter; 12597 12598 drm_connector_list_iter_begin(dev, &conn_iter); 12599 for_each_intel_connector_iter(connector, &conn_iter) { 12600 if (connector->base.state->crtc) 12601 drm_connector_put(&connector->base); 12602 12603 if (connector->base.encoder) { 12604 connector->base.state->best_encoder = 12605 connector->base.encoder; 12606 connector->base.state->crtc = 12607 connector->base.encoder->crtc; 12608 12609 drm_connector_get(&connector->base); 12610 } else { 12611 connector->base.state->best_encoder = NULL; 12612 connector->base.state->crtc = NULL; 12613 } 12614 } 12615 drm_connector_list_iter_end(&conn_iter); 12616 } 12617 12618 static int 12619 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 12620 struct intel_crtc_state *pipe_config) 12621 { 12622 struct drm_connector *connector = conn_state->connector; 12623 const struct drm_display_info *info = &connector->display_info; 12624 int bpp; 12625 12626 switch (conn_state->max_bpc) { 12627 case 6 ... 7: 12628 bpp = 6 * 3; 12629 break; 12630 case 8 ... 9: 12631 bpp = 8 * 3; 12632 break; 12633 case 10 ... 11: 12634 bpp = 10 * 3; 12635 break; 12636 case 12: 12637 bpp = 12 * 3; 12638 break; 12639 default: 12640 return -EINVAL; 12641 } 12642 12643 if (bpp < pipe_config->pipe_bpp) { 12644 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of " 12645 "EDID bpp %d, requested bpp %d, max platform bpp %d\n", 12646 connector->base.id, connector->name, 12647 bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc, 12648 pipe_config->pipe_bpp); 12649 12650 pipe_config->pipe_bpp = bpp; 12651 } 12652 12653 return 0; 12654 } 12655 12656 static int 12657 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 12658 struct intel_crtc_state *pipe_config) 12659 { 12660 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12661 struct drm_atomic_state *state = pipe_config->uapi.state; 12662 struct drm_connector *connector; 12663 struct drm_connector_state *connector_state; 12664 int bpp, i; 12665 12666 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 12667 IS_CHERRYVIEW(dev_priv))) 12668 bpp = 10*3; 12669 else if (INTEL_GEN(dev_priv) >= 5) 12670 bpp = 12*3; 12671 else 12672 bpp = 8*3; 12673 12674 pipe_config->pipe_bpp = bpp; 12675 12676 /* Clamp display bpp to connector max bpp */ 12677 for_each_new_connector_in_state(state, connector, connector_state, i) { 12678 int ret; 12679 12680 if (connector_state->crtc != &crtc->base) 12681 continue; 12682 12683 ret = compute_sink_pipe_bpp(connector_state, pipe_config); 12684 if (ret) 12685 return ret; 12686 } 12687 12688 return 0; 12689 } 12690 12691 static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 12692 { 12693 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 12694 "type: 0x%x flags: 0x%x\n", 12695 mode->crtc_clock, 12696 mode->crtc_hdisplay, mode->crtc_hsync_start, 12697 mode->crtc_hsync_end, mode->crtc_htotal, 12698 mode->crtc_vdisplay, mode->crtc_vsync_start, 12699 mode->crtc_vsync_end, mode->crtc_vtotal, 12700 mode->type, mode->flags); 12701 } 12702 12703 static inline void 12704 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config, 12705 const char *id, unsigned int lane_count, 12706 const struct intel_link_m_n *m_n) 12707 { 12708 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 12709 id, lane_count, 12710 m_n->gmch_m, m_n->gmch_n, 12711 m_n->link_m, m_n->link_n, m_n->tu); 12712 } 12713 12714 static void 12715 intel_dump_infoframe(struct drm_i915_private *dev_priv, 12716 const union hdmi_infoframe *frame) 12717 { 12718 if (!drm_debug_enabled(DRM_UT_KMS)) 12719 return; 12720 12721 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame); 12722 } 12723 12724 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x 12725 12726 static const char * const output_type_str[] = { 12727 OUTPUT_TYPE(UNUSED), 12728 OUTPUT_TYPE(ANALOG), 12729 OUTPUT_TYPE(DVO), 12730 OUTPUT_TYPE(SDVO), 12731 OUTPUT_TYPE(LVDS), 12732 OUTPUT_TYPE(TVOUT), 12733 OUTPUT_TYPE(HDMI), 12734 OUTPUT_TYPE(DP), 12735 OUTPUT_TYPE(EDP), 12736 OUTPUT_TYPE(DSI), 12737 OUTPUT_TYPE(DDI), 12738 OUTPUT_TYPE(DP_MST), 12739 }; 12740 12741 #undef OUTPUT_TYPE 12742 12743 static void snprintf_output_types(char *buf, size_t len, 12744 unsigned int output_types) 12745 { 12746 char *str = buf; 12747 int i; 12748 12749 str[0] = '\0'; 12750 12751 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) { 12752 int r; 12753 12754 if ((output_types & BIT(i)) == 0) 12755 continue; 12756 12757 r = snprintf(str, len, "%s%s", 12758 str != buf ? "," : "", output_type_str[i]); 12759 if (r >= len) 12760 break; 12761 str += r; 12762 len -= r; 12763 12764 output_types &= ~BIT(i); 12765 } 12766 12767 WARN_ON_ONCE(output_types != 0); 12768 } 12769 12770 static const char * const output_format_str[] = { 12771 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid", 12772 [INTEL_OUTPUT_FORMAT_RGB] = "RGB", 12773 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0", 12774 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4", 12775 }; 12776 12777 static const char *output_formats(enum intel_output_format format) 12778 { 12779 if (format >= ARRAY_SIZE(output_format_str)) 12780 format = INTEL_OUTPUT_FORMAT_INVALID; 12781 return output_format_str[format]; 12782 } 12783 12784 static void intel_dump_plane_state(const struct intel_plane_state *plane_state) 12785 { 12786 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 12787 const struct drm_framebuffer *fb = plane_state->hw.fb; 12788 struct drm_format_name_buf format_name; 12789 12790 if (!fb) { 12791 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n", 12792 plane->base.base.id, plane->base.name, 12793 yesno(plane_state->uapi.visible)); 12794 return; 12795 } 12796 12797 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n", 12798 plane->base.base.id, plane->base.name, 12799 fb->base.id, fb->width, fb->height, 12800 drm_get_format_name(fb->format->format, &format_name), 12801 yesno(plane_state->uapi.visible)); 12802 DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n", 12803 plane_state->hw.rotation, plane_state->scaler_id); 12804 if (plane_state->uapi.visible) 12805 DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n", 12806 DRM_RECT_FP_ARG(&plane_state->uapi.src), 12807 DRM_RECT_ARG(&plane_state->uapi.dst)); 12808 } 12809 12810 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, 12811 struct intel_atomic_state *state, 12812 const char *context) 12813 { 12814 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 12815 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12816 const struct intel_plane_state *plane_state; 12817 struct intel_plane *plane; 12818 char buf[64]; 12819 int i; 12820 12821 DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n", 12822 crtc->base.base.id, crtc->base.name, 12823 yesno(pipe_config->hw.enable), context); 12824 12825 if (!pipe_config->hw.enable) 12826 goto dump_planes; 12827 12828 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types); 12829 DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n", 12830 yesno(pipe_config->hw.active), 12831 buf, pipe_config->output_types, 12832 output_formats(pipe_config->output_format)); 12833 12834 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", 12835 transcoder_name(pipe_config->cpu_transcoder), 12836 pipe_config->pipe_bpp, pipe_config->dither); 12837 12838 if (pipe_config->has_pch_encoder) 12839 intel_dump_m_n_config(pipe_config, "fdi", 12840 pipe_config->fdi_lanes, 12841 &pipe_config->fdi_m_n); 12842 12843 if (intel_crtc_has_dp_encoder(pipe_config)) { 12844 intel_dump_m_n_config(pipe_config, "dp m_n", 12845 pipe_config->lane_count, &pipe_config->dp_m_n); 12846 if (pipe_config->has_drrs) 12847 intel_dump_m_n_config(pipe_config, "dp m2_n2", 12848 pipe_config->lane_count, 12849 &pipe_config->dp_m2_n2); 12850 } 12851 12852 DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n", 12853 pipe_config->has_audio, pipe_config->has_infoframe, 12854 pipe_config->infoframes.enable); 12855 12856 if (pipe_config->infoframes.enable & 12857 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) 12858 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp); 12859 if (pipe_config->infoframes.enable & 12860 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) 12861 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi); 12862 if (pipe_config->infoframes.enable & 12863 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD)) 12864 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd); 12865 if (pipe_config->infoframes.enable & 12866 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR)) 12867 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi); 12868 12869 DRM_DEBUG_KMS("requested mode:\n"); 12870 drm_mode_debug_printmodeline(&pipe_config->hw.mode); 12871 DRM_DEBUG_KMS("adjusted mode:\n"); 12872 drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode); 12873 intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode); 12874 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n", 12875 pipe_config->port_clock, 12876 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 12877 pipe_config->pixel_rate); 12878 12879 if (INTEL_GEN(dev_priv) >= 9) 12880 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 12881 crtc->num_scalers, 12882 pipe_config->scaler_state.scaler_users, 12883 pipe_config->scaler_state.scaler_id); 12884 12885 if (HAS_GMCH(dev_priv)) 12886 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 12887 pipe_config->gmch_pfit.control, 12888 pipe_config->gmch_pfit.pgm_ratios, 12889 pipe_config->gmch_pfit.lvds_border_bits); 12890 else 12891 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n", 12892 pipe_config->pch_pfit.pos, 12893 pipe_config->pch_pfit.size, 12894 enableddisabled(pipe_config->pch_pfit.enabled), 12895 yesno(pipe_config->pch_pfit.force_thru)); 12896 12897 DRM_DEBUG_KMS("ips: %i, double wide: %i\n", 12898 pipe_config->ips_enabled, pipe_config->double_wide); 12899 12900 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state); 12901 12902 if (IS_CHERRYVIEW(dev_priv)) 12903 DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", 12904 pipe_config->cgm_mode, pipe_config->gamma_mode, 12905 pipe_config->gamma_enable, pipe_config->csc_enable); 12906 else 12907 DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", 12908 pipe_config->csc_mode, pipe_config->gamma_mode, 12909 pipe_config->gamma_enable, pipe_config->csc_enable); 12910 12911 DRM_DEBUG_KMS("MST master transcoder: %s\n", 12912 transcoder_name(pipe_config->mst_master_transcoder)); 12913 12914 dump_planes: 12915 if (!state) 12916 return; 12917 12918 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12919 if (plane->pipe == crtc->pipe) 12920 intel_dump_plane_state(plane_state); 12921 } 12922 } 12923 12924 static bool check_digital_port_conflicts(struct intel_atomic_state *state) 12925 { 12926 struct drm_device *dev = state->base.dev; 12927 struct drm_connector *connector; 12928 struct drm_connector_list_iter conn_iter; 12929 unsigned int used_ports = 0; 12930 unsigned int used_mst_ports = 0; 12931 bool ret = true; 12932 12933 /* 12934 * We're going to peek into connector->state, 12935 * hence connection_mutex must be held. 12936 */ 12937 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex); 12938 12939 /* 12940 * Walk the connector list instead of the encoder 12941 * list to detect the problem on ddi platforms 12942 * where there's just one encoder per digital port. 12943 */ 12944 drm_connector_list_iter_begin(dev, &conn_iter); 12945 drm_for_each_connector_iter(connector, &conn_iter) { 12946 struct drm_connector_state *connector_state; 12947 struct intel_encoder *encoder; 12948 12949 connector_state = 12950 drm_atomic_get_new_connector_state(&state->base, 12951 connector); 12952 if (!connector_state) 12953 connector_state = connector->state; 12954 12955 if (!connector_state->best_encoder) 12956 continue; 12957 12958 encoder = to_intel_encoder(connector_state->best_encoder); 12959 12960 WARN_ON(!connector_state->crtc); 12961 12962 switch (encoder->type) { 12963 unsigned int port_mask; 12964 case INTEL_OUTPUT_DDI: 12965 if (WARN_ON(!HAS_DDI(to_i915(dev)))) 12966 break; 12967 /* else, fall through */ 12968 case INTEL_OUTPUT_DP: 12969 case INTEL_OUTPUT_HDMI: 12970 case INTEL_OUTPUT_EDP: 12971 port_mask = 1 << encoder->port; 12972 12973 /* the same port mustn't appear more than once */ 12974 if (used_ports & port_mask) 12975 ret = false; 12976 12977 used_ports |= port_mask; 12978 break; 12979 case INTEL_OUTPUT_DP_MST: 12980 used_mst_ports |= 12981 1 << encoder->port; 12982 break; 12983 default: 12984 break; 12985 } 12986 } 12987 drm_connector_list_iter_end(&conn_iter); 12988 12989 /* can't mix MST and SST/HDMI on the same port */ 12990 if (used_ports & used_mst_ports) 12991 return false; 12992 12993 return ret; 12994 } 12995 12996 static void 12997 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state) 12998 { 12999 intel_crtc_copy_color_blobs(crtc_state); 13000 } 13001 13002 static void 13003 intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state) 13004 { 13005 crtc_state->hw.enable = crtc_state->uapi.enable; 13006 crtc_state->hw.active = crtc_state->uapi.active; 13007 crtc_state->hw.mode = crtc_state->uapi.mode; 13008 crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode; 13009 intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state); 13010 } 13011 13012 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state) 13013 { 13014 crtc_state->uapi.enable = crtc_state->hw.enable; 13015 crtc_state->uapi.active = crtc_state->hw.active; 13016 WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0); 13017 13018 crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode; 13019 13020 /* copy color blobs to uapi */ 13021 drm_property_replace_blob(&crtc_state->uapi.degamma_lut, 13022 crtc_state->hw.degamma_lut); 13023 drm_property_replace_blob(&crtc_state->uapi.gamma_lut, 13024 crtc_state->hw.gamma_lut); 13025 drm_property_replace_blob(&crtc_state->uapi.ctm, 13026 crtc_state->hw.ctm); 13027 } 13028 13029 static int 13030 intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state) 13031 { 13032 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 13033 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13034 struct intel_crtc_state *saved_state; 13035 13036 saved_state = intel_crtc_state_alloc(crtc); 13037 if (!saved_state) 13038 return -ENOMEM; 13039 13040 /* free the old crtc_state->hw members */ 13041 intel_crtc_free_hw_state(crtc_state); 13042 13043 /* FIXME: before the switch to atomic started, a new pipe_config was 13044 * kzalloc'd. Code that depends on any field being zero should be 13045 * fixed, so that the crtc_state can be safely duplicated. For now, 13046 * only fields that are know to not cause problems are preserved. */ 13047 13048 saved_state->uapi = crtc_state->uapi; 13049 saved_state->scaler_state = crtc_state->scaler_state; 13050 saved_state->shared_dpll = crtc_state->shared_dpll; 13051 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 13052 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 13053 sizeof(saved_state->icl_port_dplls)); 13054 saved_state->crc_enabled = crtc_state->crc_enabled; 13055 if (IS_G4X(dev_priv) || 13056 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 13057 saved_state->wm = crtc_state->wm; 13058 /* 13059 * Save the slave bitmask which gets filled for master crtc state during 13060 * slave atomic check call. For all other CRTCs reset the port sync variables 13061 * crtc_state->master_transcoder needs to be set to INVALID 13062 */ 13063 reset_port_sync_mode_state(saved_state); 13064 if (intel_atomic_is_master_connector(crtc_state)) 13065 saved_state->sync_mode_slaves_mask = 13066 crtc_state->sync_mode_slaves_mask; 13067 13068 memcpy(crtc_state, saved_state, sizeof(*crtc_state)); 13069 kfree(saved_state); 13070 13071 intel_crtc_copy_uapi_to_hw_state(crtc_state); 13072 13073 return 0; 13074 } 13075 13076 static int 13077 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config) 13078 { 13079 struct drm_crtc *crtc = pipe_config->uapi.crtc; 13080 struct drm_atomic_state *state = pipe_config->uapi.state; 13081 struct intel_encoder *encoder; 13082 struct drm_connector *connector; 13083 struct drm_connector_state *connector_state; 13084 int base_bpp, ret; 13085 int i, tile_group_id = -1, num_tiled_conns = 0; 13086 bool retry = true; 13087 13088 pipe_config->cpu_transcoder = 13089 (enum transcoder) to_intel_crtc(crtc)->pipe; 13090 13091 /* 13092 * Sanitize sync polarity flags based on requested ones. If neither 13093 * positive or negative polarity is requested, treat this as meaning 13094 * negative polarity. 13095 */ 13096 if (!(pipe_config->hw.adjusted_mode.flags & 13097 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 13098 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 13099 13100 if (!(pipe_config->hw.adjusted_mode.flags & 13101 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 13102 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 13103 13104 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 13105 pipe_config); 13106 if (ret) 13107 return ret; 13108 13109 base_bpp = pipe_config->pipe_bpp; 13110 13111 /* 13112 * Determine the real pipe dimensions. Note that stereo modes can 13113 * increase the actual pipe size due to the frame doubling and 13114 * insertion of additional space for blanks between the frame. This 13115 * is stored in the crtc timings. We use the requested mode to do this 13116 * computation to clearly distinguish it from the adjusted mode, which 13117 * can be changed by the connectors in the below retry loop. 13118 */ 13119 drm_mode_get_hv_timing(&pipe_config->hw.mode, 13120 &pipe_config->pipe_src_w, 13121 &pipe_config->pipe_src_h); 13122 13123 for_each_new_connector_in_state(state, connector, connector_state, i) { 13124 if (connector_state->crtc != crtc) 13125 continue; 13126 13127 encoder = to_intel_encoder(connector_state->best_encoder); 13128 13129 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { 13130 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 13131 return -EINVAL; 13132 } 13133 13134 /* 13135 * Determine output_types before calling the .compute_config() 13136 * hooks so that the hooks can use this information safely. 13137 */ 13138 if (encoder->compute_output_type) 13139 pipe_config->output_types |= 13140 BIT(encoder->compute_output_type(encoder, pipe_config, 13141 connector_state)); 13142 else 13143 pipe_config->output_types |= BIT(encoder->type); 13144 } 13145 13146 encoder_retry: 13147 /* Ensure the port clock defaults are reset when retrying. */ 13148 pipe_config->port_clock = 0; 13149 pipe_config->pixel_multiplier = 1; 13150 13151 /* Fill in default crtc timings, allow encoders to overwrite them. */ 13152 drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode, 13153 CRTC_STEREO_DOUBLE); 13154 13155 /* Get tile_group_id of tiled connector */ 13156 for_each_new_connector_in_state(state, connector, connector_state, i) { 13157 if (connector_state->crtc == crtc && 13158 connector->has_tile) { 13159 tile_group_id = connector->tile_group->id; 13160 break; 13161 } 13162 } 13163 13164 /* Get total number of tiled connectors in state that belong to 13165 * this tile group. 13166 */ 13167 for_each_new_connector_in_state(state, connector, connector_state, i) { 13168 if (connector->has_tile && 13169 connector->tile_group->id == tile_group_id) 13170 num_tiled_conns++; 13171 } 13172 13173 /* Pass our mode to the connectors and the CRTC to give them a chance to 13174 * adjust it according to limitations or connector properties, and also 13175 * a chance to reject the mode entirely. 13176 */ 13177 for_each_new_connector_in_state(state, connector, connector_state, i) { 13178 if (connector_state->crtc != crtc) 13179 continue; 13180 13181 ret = icl_compute_port_sync_crtc_state(connector, pipe_config, 13182 num_tiled_conns); 13183 if (ret) { 13184 DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n", 13185 ret); 13186 return ret; 13187 } 13188 13189 encoder = to_intel_encoder(connector_state->best_encoder); 13190 ret = encoder->compute_config(encoder, pipe_config, 13191 connector_state); 13192 if (ret < 0) { 13193 if (ret != -EDEADLK) 13194 DRM_DEBUG_KMS("Encoder config failure: %d\n", 13195 ret); 13196 return ret; 13197 } 13198 } 13199 13200 /* Set default port clock if not overwritten by the encoder. Needs to be 13201 * done afterwards in case the encoder adjusts the mode. */ 13202 if (!pipe_config->port_clock) 13203 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock 13204 * pipe_config->pixel_multiplier; 13205 13206 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 13207 if (ret == -EDEADLK) 13208 return ret; 13209 if (ret < 0) { 13210 DRM_DEBUG_KMS("CRTC fixup failed\n"); 13211 return ret; 13212 } 13213 13214 if (ret == RETRY) { 13215 if (WARN(!retry, "loop in pipe configuration computation\n")) 13216 return -EINVAL; 13217 13218 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 13219 retry = false; 13220 goto encoder_retry; 13221 } 13222 13223 /* Dithering seems to not pass-through bits correctly when it should, so 13224 * only enable it on 6bpc panels and when its not a compliance 13225 * test requesting 6bpc video pattern. 13226 */ 13227 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) && 13228 !pipe_config->dither_force_disable; 13229 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 13230 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 13231 13232 /* 13233 * Make drm_calc_timestamping_constants in 13234 * drm_atomic_helper_update_legacy_modeset_state() happy 13235 */ 13236 pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode; 13237 13238 return 0; 13239 } 13240 13241 bool intel_fuzzy_clock_check(int clock1, int clock2) 13242 { 13243 int diff; 13244 13245 if (clock1 == clock2) 13246 return true; 13247 13248 if (!clock1 || !clock2) 13249 return false; 13250 13251 diff = abs(clock1 - clock2); 13252 13253 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 13254 return true; 13255 13256 return false; 13257 } 13258 13259 static bool 13260 intel_compare_m_n(unsigned int m, unsigned int n, 13261 unsigned int m2, unsigned int n2, 13262 bool exact) 13263 { 13264 if (m == m2 && n == n2) 13265 return true; 13266 13267 if (exact || !m || !n || !m2 || !n2) 13268 return false; 13269 13270 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 13271 13272 if (n > n2) { 13273 while (n > n2) { 13274 m2 <<= 1; 13275 n2 <<= 1; 13276 } 13277 } else if (n < n2) { 13278 while (n < n2) { 13279 m <<= 1; 13280 n <<= 1; 13281 } 13282 } 13283 13284 if (n != n2) 13285 return false; 13286 13287 return intel_fuzzy_clock_check(m, m2); 13288 } 13289 13290 static bool 13291 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 13292 const struct intel_link_m_n *m2_n2, 13293 bool exact) 13294 { 13295 return m_n->tu == m2_n2->tu && 13296 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, 13297 m2_n2->gmch_m, m2_n2->gmch_n, exact) && 13298 intel_compare_m_n(m_n->link_m, m_n->link_n, 13299 m2_n2->link_m, m2_n2->link_n, exact); 13300 } 13301 13302 static bool 13303 intel_compare_infoframe(const union hdmi_infoframe *a, 13304 const union hdmi_infoframe *b) 13305 { 13306 return memcmp(a, b, sizeof(*a)) == 0; 13307 } 13308 13309 static void 13310 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, 13311 bool fastset, const char *name, 13312 const union hdmi_infoframe *a, 13313 const union hdmi_infoframe *b) 13314 { 13315 if (fastset) { 13316 if (!drm_debug_enabled(DRM_UT_KMS)) 13317 return; 13318 13319 DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name); 13320 DRM_DEBUG_KMS("expected:\n"); 13321 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); 13322 DRM_DEBUG_KMS("found:\n"); 13323 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); 13324 } else { 13325 DRM_ERROR("mismatch in %s infoframe\n", name); 13326 DRM_ERROR("expected:\n"); 13327 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); 13328 DRM_ERROR("found:\n"); 13329 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); 13330 } 13331 } 13332 13333 static void __printf(4, 5) 13334 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, 13335 const char *name, const char *format, ...) 13336 { 13337 struct va_format vaf; 13338 va_list args; 13339 13340 va_start(args, format); 13341 vaf.fmt = format; 13342 vaf.va = &args; 13343 13344 if (fastset) 13345 DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n", 13346 crtc->base.base.id, crtc->base.name, name, &vaf); 13347 else 13348 DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n", 13349 crtc->base.base.id, crtc->base.name, name, &vaf); 13350 13351 va_end(args); 13352 } 13353 13354 static bool fastboot_enabled(struct drm_i915_private *dev_priv) 13355 { 13356 if (i915_modparams.fastboot != -1) 13357 return i915_modparams.fastboot; 13358 13359 /* Enable fastboot by default on Skylake and newer */ 13360 if (INTEL_GEN(dev_priv) >= 9) 13361 return true; 13362 13363 /* Enable fastboot by default on VLV and CHV */ 13364 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 13365 return true; 13366 13367 /* Disabled by default on all others */ 13368 return false; 13369 } 13370 13371 static bool 13372 intel_pipe_config_compare(const struct intel_crtc_state *current_config, 13373 const struct intel_crtc_state *pipe_config, 13374 bool fastset) 13375 { 13376 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev); 13377 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 13378 bool ret = true; 13379 u32 bp_gamma = 0; 13380 bool fixup_inherited = fastset && 13381 (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) && 13382 !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED); 13383 13384 if (fixup_inherited && !fastboot_enabled(dev_priv)) { 13385 DRM_DEBUG_KMS("initial modeset and fastboot not set\n"); 13386 ret = false; 13387 } 13388 13389 #define PIPE_CONF_CHECK_X(name) do { \ 13390 if (current_config->name != pipe_config->name) { \ 13391 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13392 "(expected 0x%08x, found 0x%08x)", \ 13393 current_config->name, \ 13394 pipe_config->name); \ 13395 ret = false; \ 13396 } \ 13397 } while (0) 13398 13399 #define PIPE_CONF_CHECK_I(name) do { \ 13400 if (current_config->name != pipe_config->name) { \ 13401 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13402 "(expected %i, found %i)", \ 13403 current_config->name, \ 13404 pipe_config->name); \ 13405 ret = false; \ 13406 } \ 13407 } while (0) 13408 13409 #define PIPE_CONF_CHECK_BOOL(name) do { \ 13410 if (current_config->name != pipe_config->name) { \ 13411 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13412 "(expected %s, found %s)", \ 13413 yesno(current_config->name), \ 13414 yesno(pipe_config->name)); \ 13415 ret = false; \ 13416 } \ 13417 } while (0) 13418 13419 /* 13420 * Checks state where we only read out the enabling, but not the entire 13421 * state itself (like full infoframes or ELD for audio). These states 13422 * require a full modeset on bootup to fix up. 13423 */ 13424 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \ 13425 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \ 13426 PIPE_CONF_CHECK_BOOL(name); \ 13427 } else { \ 13428 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13429 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \ 13430 yesno(current_config->name), \ 13431 yesno(pipe_config->name)); \ 13432 ret = false; \ 13433 } \ 13434 } while (0) 13435 13436 #define PIPE_CONF_CHECK_P(name) do { \ 13437 if (current_config->name != pipe_config->name) { \ 13438 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13439 "(expected %p, found %p)", \ 13440 current_config->name, \ 13441 pipe_config->name); \ 13442 ret = false; \ 13443 } \ 13444 } while (0) 13445 13446 #define PIPE_CONF_CHECK_M_N(name) do { \ 13447 if (!intel_compare_link_m_n(¤t_config->name, \ 13448 &pipe_config->name,\ 13449 !fastset)) { \ 13450 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13451 "(expected tu %i gmch %i/%i link %i/%i, " \ 13452 "found tu %i, gmch %i/%i link %i/%i)", \ 13453 current_config->name.tu, \ 13454 current_config->name.gmch_m, \ 13455 current_config->name.gmch_n, \ 13456 current_config->name.link_m, \ 13457 current_config->name.link_n, \ 13458 pipe_config->name.tu, \ 13459 pipe_config->name.gmch_m, \ 13460 pipe_config->name.gmch_n, \ 13461 pipe_config->name.link_m, \ 13462 pipe_config->name.link_n); \ 13463 ret = false; \ 13464 } \ 13465 } while (0) 13466 13467 /* This is required for BDW+ where there is only one set of registers for 13468 * switching between high and low RR. 13469 * This macro can be used whenever a comparison has to be made between one 13470 * hw state and multiple sw state variables. 13471 */ 13472 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \ 13473 if (!intel_compare_link_m_n(¤t_config->name, \ 13474 &pipe_config->name, !fastset) && \ 13475 !intel_compare_link_m_n(¤t_config->alt_name, \ 13476 &pipe_config->name, !fastset)) { \ 13477 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13478 "(expected tu %i gmch %i/%i link %i/%i, " \ 13479 "or tu %i gmch %i/%i link %i/%i, " \ 13480 "found tu %i, gmch %i/%i link %i/%i)", \ 13481 current_config->name.tu, \ 13482 current_config->name.gmch_m, \ 13483 current_config->name.gmch_n, \ 13484 current_config->name.link_m, \ 13485 current_config->name.link_n, \ 13486 current_config->alt_name.tu, \ 13487 current_config->alt_name.gmch_m, \ 13488 current_config->alt_name.gmch_n, \ 13489 current_config->alt_name.link_m, \ 13490 current_config->alt_name.link_n, \ 13491 pipe_config->name.tu, \ 13492 pipe_config->name.gmch_m, \ 13493 pipe_config->name.gmch_n, \ 13494 pipe_config->name.link_m, \ 13495 pipe_config->name.link_n); \ 13496 ret = false; \ 13497 } \ 13498 } while (0) 13499 13500 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 13501 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 13502 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13503 "(%x) (expected %i, found %i)", \ 13504 (mask), \ 13505 current_config->name & (mask), \ 13506 pipe_config->name & (mask)); \ 13507 ret = false; \ 13508 } \ 13509 } while (0) 13510 13511 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \ 13512 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 13513 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13514 "(expected %i, found %i)", \ 13515 current_config->name, \ 13516 pipe_config->name); \ 13517 ret = false; \ 13518 } \ 13519 } while (0) 13520 13521 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 13522 if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 13523 &pipe_config->infoframes.name)) { \ 13524 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \ 13525 ¤t_config->infoframes.name, \ 13526 &pipe_config->infoframes.name); \ 13527 ret = false; \ 13528 } \ 13529 } while (0) 13530 13531 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \ 13532 if (current_config->name1 != pipe_config->name1) { \ 13533 pipe_config_mismatch(fastset, crtc, __stringify(name1), \ 13534 "(expected %i, found %i, won't compare lut values)", \ 13535 current_config->name1, \ 13536 pipe_config->name1); \ 13537 ret = false;\ 13538 } else { \ 13539 if (!intel_color_lut_equal(current_config->name2, \ 13540 pipe_config->name2, pipe_config->name1, \ 13541 bit_precision)) { \ 13542 pipe_config_mismatch(fastset, crtc, __stringify(name2), \ 13543 "hw_state doesn't match sw_state"); \ 13544 ret = false; \ 13545 } \ 13546 } \ 13547 } while (0) 13548 13549 #define PIPE_CONF_QUIRK(quirk) \ 13550 ((current_config->quirks | pipe_config->quirks) & (quirk)) 13551 13552 PIPE_CONF_CHECK_I(cpu_transcoder); 13553 13554 PIPE_CONF_CHECK_BOOL(has_pch_encoder); 13555 PIPE_CONF_CHECK_I(fdi_lanes); 13556 PIPE_CONF_CHECK_M_N(fdi_m_n); 13557 13558 PIPE_CONF_CHECK_I(lane_count); 13559 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 13560 13561 if (INTEL_GEN(dev_priv) < 8) { 13562 PIPE_CONF_CHECK_M_N(dp_m_n); 13563 13564 if (current_config->has_drrs) 13565 PIPE_CONF_CHECK_M_N(dp_m2_n2); 13566 } else 13567 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 13568 13569 PIPE_CONF_CHECK_X(output_types); 13570 13571 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay); 13572 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal); 13573 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start); 13574 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end); 13575 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start); 13576 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end); 13577 13578 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay); 13579 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal); 13580 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start); 13581 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end); 13582 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start); 13583 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end); 13584 13585 PIPE_CONF_CHECK_I(pixel_multiplier); 13586 PIPE_CONF_CHECK_I(output_format); 13587 PIPE_CONF_CHECK_I(dc3co_exitline); 13588 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 13589 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 13590 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 13591 PIPE_CONF_CHECK_BOOL(limited_color_range); 13592 13593 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 13594 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 13595 PIPE_CONF_CHECK_BOOL(has_infoframe); 13596 PIPE_CONF_CHECK_BOOL(fec_enable); 13597 13598 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); 13599 13600 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13601 DRM_MODE_FLAG_INTERLACE); 13602 13603 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 13604 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13605 DRM_MODE_FLAG_PHSYNC); 13606 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13607 DRM_MODE_FLAG_NHSYNC); 13608 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13609 DRM_MODE_FLAG_PVSYNC); 13610 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13611 DRM_MODE_FLAG_NVSYNC); 13612 } 13613 13614 PIPE_CONF_CHECK_X(gmch_pfit.control); 13615 /* pfit ratios are autocomputed by the hw on gen4+ */ 13616 if (INTEL_GEN(dev_priv) < 4) 13617 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 13618 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 13619 13620 /* 13621 * Changing the EDP transcoder input mux 13622 * (A_ONOFF vs. A_ON) requires a full modeset. 13623 */ 13624 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 13625 13626 if (!fastset) { 13627 PIPE_CONF_CHECK_I(pipe_src_w); 13628 PIPE_CONF_CHECK_I(pipe_src_h); 13629 13630 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 13631 if (current_config->pch_pfit.enabled) { 13632 PIPE_CONF_CHECK_X(pch_pfit.pos); 13633 PIPE_CONF_CHECK_X(pch_pfit.size); 13634 } 13635 13636 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 13637 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); 13638 13639 PIPE_CONF_CHECK_X(gamma_mode); 13640 if (IS_CHERRYVIEW(dev_priv)) 13641 PIPE_CONF_CHECK_X(cgm_mode); 13642 else 13643 PIPE_CONF_CHECK_X(csc_mode); 13644 PIPE_CONF_CHECK_BOOL(gamma_enable); 13645 PIPE_CONF_CHECK_BOOL(csc_enable); 13646 13647 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config); 13648 if (bp_gamma) 13649 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma); 13650 13651 } 13652 13653 PIPE_CONF_CHECK_BOOL(double_wide); 13654 13655 PIPE_CONF_CHECK_P(shared_dpll); 13656 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 13657 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 13658 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 13659 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 13660 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 13661 PIPE_CONF_CHECK_X(dpll_hw_state.spll); 13662 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 13663 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 13664 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 13665 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0); 13666 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0); 13667 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4); 13668 PIPE_CONF_CHECK_X(dpll_hw_state.pll0); 13669 PIPE_CONF_CHECK_X(dpll_hw_state.pll1); 13670 PIPE_CONF_CHECK_X(dpll_hw_state.pll2); 13671 PIPE_CONF_CHECK_X(dpll_hw_state.pll3); 13672 PIPE_CONF_CHECK_X(dpll_hw_state.pll6); 13673 PIPE_CONF_CHECK_X(dpll_hw_state.pll8); 13674 PIPE_CONF_CHECK_X(dpll_hw_state.pll9); 13675 PIPE_CONF_CHECK_X(dpll_hw_state.pll10); 13676 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12); 13677 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl); 13678 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1); 13679 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl); 13680 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0); 13681 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1); 13682 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf); 13683 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock); 13684 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc); 13685 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias); 13686 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); 13687 13688 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 13689 PIPE_CONF_CHECK_X(dsi_pll.div); 13690 13691 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) 13692 PIPE_CONF_CHECK_I(pipe_bpp); 13693 13694 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); 13695 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 13696 13697 PIPE_CONF_CHECK_I(min_voltage_level); 13698 13699 PIPE_CONF_CHECK_X(infoframes.enable); 13700 PIPE_CONF_CHECK_X(infoframes.gcp); 13701 PIPE_CONF_CHECK_INFOFRAME(avi); 13702 PIPE_CONF_CHECK_INFOFRAME(spd); 13703 PIPE_CONF_CHECK_INFOFRAME(hdmi); 13704 PIPE_CONF_CHECK_INFOFRAME(drm); 13705 13706 PIPE_CONF_CHECK_I(sync_mode_slaves_mask); 13707 PIPE_CONF_CHECK_I(master_transcoder); 13708 13709 PIPE_CONF_CHECK_I(dsc.compression_enable); 13710 PIPE_CONF_CHECK_I(dsc.dsc_split); 13711 PIPE_CONF_CHECK_I(dsc.compressed_bpp); 13712 13713 PIPE_CONF_CHECK_I(mst_master_transcoder); 13714 13715 #undef PIPE_CONF_CHECK_X 13716 #undef PIPE_CONF_CHECK_I 13717 #undef PIPE_CONF_CHECK_BOOL 13718 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE 13719 #undef PIPE_CONF_CHECK_P 13720 #undef PIPE_CONF_CHECK_FLAGS 13721 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 13722 #undef PIPE_CONF_CHECK_COLOR_LUT 13723 #undef PIPE_CONF_QUIRK 13724 13725 return ret; 13726 } 13727 13728 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, 13729 const struct intel_crtc_state *pipe_config) 13730 { 13731 if (pipe_config->has_pch_encoder) { 13732 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 13733 &pipe_config->fdi_m_n); 13734 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock; 13735 13736 /* 13737 * FDI already provided one idea for the dotclock. 13738 * Yell if the encoder disagrees. 13739 */ 13740 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock), 13741 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 13742 fdi_dotclock, dotclock); 13743 } 13744 } 13745 13746 static void verify_wm_state(struct intel_crtc *crtc, 13747 struct intel_crtc_state *new_crtc_state) 13748 { 13749 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13750 struct skl_hw_state { 13751 struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; 13752 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES]; 13753 struct skl_ddb_allocation ddb; 13754 struct skl_pipe_wm wm; 13755 } *hw; 13756 struct skl_ddb_allocation *sw_ddb; 13757 struct skl_pipe_wm *sw_wm; 13758 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; 13759 const enum pipe pipe = crtc->pipe; 13760 int plane, level, max_level = ilk_wm_max_level(dev_priv); 13761 13762 if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active) 13763 return; 13764 13765 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 13766 if (!hw) 13767 return; 13768 13769 skl_pipe_wm_get_hw_state(crtc, &hw->wm); 13770 sw_wm = &new_crtc_state->wm.skl.optimal; 13771 13772 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv); 13773 13774 skl_ddb_get_hw_state(dev_priv, &hw->ddb); 13775 sw_ddb = &dev_priv->wm.skl_hw.ddb; 13776 13777 if (INTEL_GEN(dev_priv) >= 11 && 13778 hw->ddb.enabled_slices != sw_ddb->enabled_slices) 13779 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n", 13780 sw_ddb->enabled_slices, 13781 hw->ddb.enabled_slices); 13782 13783 /* planes */ 13784 for_each_universal_plane(dev_priv, pipe, plane) { 13785 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 13786 13787 hw_plane_wm = &hw->wm.planes[plane]; 13788 sw_plane_wm = &sw_wm->planes[plane]; 13789 13790 /* Watermarks */ 13791 for (level = 0; level <= max_level; level++) { 13792 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 13793 &sw_plane_wm->wm[level])) 13794 continue; 13795 13796 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 13797 pipe_name(pipe), plane + 1, level, 13798 sw_plane_wm->wm[level].plane_en, 13799 sw_plane_wm->wm[level].plane_res_b, 13800 sw_plane_wm->wm[level].plane_res_l, 13801 hw_plane_wm->wm[level].plane_en, 13802 hw_plane_wm->wm[level].plane_res_b, 13803 hw_plane_wm->wm[level].plane_res_l); 13804 } 13805 13806 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 13807 &sw_plane_wm->trans_wm)) { 13808 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 13809 pipe_name(pipe), plane + 1, 13810 sw_plane_wm->trans_wm.plane_en, 13811 sw_plane_wm->trans_wm.plane_res_b, 13812 sw_plane_wm->trans_wm.plane_res_l, 13813 hw_plane_wm->trans_wm.plane_en, 13814 hw_plane_wm->trans_wm.plane_res_b, 13815 hw_plane_wm->trans_wm.plane_res_l); 13816 } 13817 13818 /* DDB */ 13819 hw_ddb_entry = &hw->ddb_y[plane]; 13820 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane]; 13821 13822 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 13823 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", 13824 pipe_name(pipe), plane + 1, 13825 sw_ddb_entry->start, sw_ddb_entry->end, 13826 hw_ddb_entry->start, hw_ddb_entry->end); 13827 } 13828 } 13829 13830 /* 13831 * cursor 13832 * If the cursor plane isn't active, we may not have updated it's ddb 13833 * allocation. In that case since the ddb allocation will be updated 13834 * once the plane becomes visible, we can skip this check 13835 */ 13836 if (1) { 13837 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 13838 13839 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR]; 13840 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR]; 13841 13842 /* Watermarks */ 13843 for (level = 0; level <= max_level; level++) { 13844 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 13845 &sw_plane_wm->wm[level])) 13846 continue; 13847 13848 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 13849 pipe_name(pipe), level, 13850 sw_plane_wm->wm[level].plane_en, 13851 sw_plane_wm->wm[level].plane_res_b, 13852 sw_plane_wm->wm[level].plane_res_l, 13853 hw_plane_wm->wm[level].plane_en, 13854 hw_plane_wm->wm[level].plane_res_b, 13855 hw_plane_wm->wm[level].plane_res_l); 13856 } 13857 13858 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 13859 &sw_plane_wm->trans_wm)) { 13860 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 13861 pipe_name(pipe), 13862 sw_plane_wm->trans_wm.plane_en, 13863 sw_plane_wm->trans_wm.plane_res_b, 13864 sw_plane_wm->trans_wm.plane_res_l, 13865 hw_plane_wm->trans_wm.plane_en, 13866 hw_plane_wm->trans_wm.plane_res_b, 13867 hw_plane_wm->trans_wm.plane_res_l); 13868 } 13869 13870 /* DDB */ 13871 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR]; 13872 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; 13873 13874 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 13875 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", 13876 pipe_name(pipe), 13877 sw_ddb_entry->start, sw_ddb_entry->end, 13878 hw_ddb_entry->start, hw_ddb_entry->end); 13879 } 13880 } 13881 13882 kfree(hw); 13883 } 13884 13885 static void 13886 verify_connector_state(struct intel_atomic_state *state, 13887 struct intel_crtc *crtc) 13888 { 13889 struct drm_connector *connector; 13890 struct drm_connector_state *new_conn_state; 13891 int i; 13892 13893 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { 13894 struct drm_encoder *encoder = connector->encoder; 13895 struct intel_crtc_state *crtc_state = NULL; 13896 13897 if (new_conn_state->crtc != &crtc->base) 13898 continue; 13899 13900 if (crtc) 13901 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 13902 13903 intel_connector_verify_state(crtc_state, new_conn_state); 13904 13905 I915_STATE_WARN(new_conn_state->best_encoder != encoder, 13906 "connector's atomic encoder doesn't match legacy encoder\n"); 13907 } 13908 } 13909 13910 static void 13911 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state) 13912 { 13913 struct intel_encoder *encoder; 13914 struct drm_connector *connector; 13915 struct drm_connector_state *old_conn_state, *new_conn_state; 13916 int i; 13917 13918 for_each_intel_encoder(&dev_priv->drm, encoder) { 13919 bool enabled = false, found = false; 13920 enum pipe pipe; 13921 13922 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 13923 encoder->base.base.id, 13924 encoder->base.name); 13925 13926 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state, 13927 new_conn_state, i) { 13928 if (old_conn_state->best_encoder == &encoder->base) 13929 found = true; 13930 13931 if (new_conn_state->best_encoder != &encoder->base) 13932 continue; 13933 found = enabled = true; 13934 13935 I915_STATE_WARN(new_conn_state->crtc != 13936 encoder->base.crtc, 13937 "connector's crtc doesn't match encoder crtc\n"); 13938 } 13939 13940 if (!found) 13941 continue; 13942 13943 I915_STATE_WARN(!!encoder->base.crtc != enabled, 13944 "encoder's enabled state mismatch " 13945 "(expected %i, found %i)\n", 13946 !!encoder->base.crtc, enabled); 13947 13948 if (!encoder->base.crtc) { 13949 bool active; 13950 13951 active = encoder->get_hw_state(encoder, &pipe); 13952 I915_STATE_WARN(active, 13953 "encoder detached but still enabled on pipe %c.\n", 13954 pipe_name(pipe)); 13955 } 13956 } 13957 } 13958 13959 static void 13960 verify_crtc_state(struct intel_crtc *crtc, 13961 struct intel_crtc_state *old_crtc_state, 13962 struct intel_crtc_state *new_crtc_state) 13963 { 13964 struct drm_device *dev = crtc->base.dev; 13965 struct drm_i915_private *dev_priv = to_i915(dev); 13966 struct intel_encoder *encoder; 13967 struct intel_crtc_state *pipe_config = old_crtc_state; 13968 struct drm_atomic_state *state = old_crtc_state->uapi.state; 13969 bool active; 13970 13971 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi); 13972 intel_crtc_free_hw_state(old_crtc_state); 13973 intel_crtc_state_reset(old_crtc_state, crtc); 13974 old_crtc_state->uapi.state = state; 13975 13976 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name); 13977 13978 active = dev_priv->display.get_pipe_config(crtc, pipe_config); 13979 13980 /* we keep both pipes enabled on 830 */ 13981 if (IS_I830(dev_priv)) 13982 active = new_crtc_state->hw.active; 13983 13984 I915_STATE_WARN(new_crtc_state->hw.active != active, 13985 "crtc active state doesn't match with hw state " 13986 "(expected %i, found %i)\n", 13987 new_crtc_state->hw.active, active); 13988 13989 I915_STATE_WARN(crtc->active != new_crtc_state->hw.active, 13990 "transitional active state does not match atomic hw state " 13991 "(expected %i, found %i)\n", 13992 new_crtc_state->hw.active, crtc->active); 13993 13994 for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 13995 enum pipe pipe; 13996 13997 active = encoder->get_hw_state(encoder, &pipe); 13998 I915_STATE_WARN(active != new_crtc_state->hw.active, 13999 "[ENCODER:%i] active %i with crtc active %i\n", 14000 encoder->base.base.id, active, 14001 new_crtc_state->hw.active); 14002 14003 I915_STATE_WARN(active && crtc->pipe != pipe, 14004 "Encoder connected to wrong pipe %c\n", 14005 pipe_name(pipe)); 14006 14007 if (active) 14008 encoder->get_config(encoder, pipe_config); 14009 } 14010 14011 intel_crtc_compute_pixel_rate(pipe_config); 14012 14013 if (!new_crtc_state->hw.active) 14014 return; 14015 14016 intel_pipe_config_sanity_check(dev_priv, pipe_config); 14017 14018 if (!intel_pipe_config_compare(new_crtc_state, 14019 pipe_config, false)) { 14020 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 14021 intel_dump_pipe_config(pipe_config, NULL, "[hw state]"); 14022 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]"); 14023 } 14024 } 14025 14026 static void 14027 intel_verify_planes(struct intel_atomic_state *state) 14028 { 14029 struct intel_plane *plane; 14030 const struct intel_plane_state *plane_state; 14031 int i; 14032 14033 for_each_new_intel_plane_in_state(state, plane, 14034 plane_state, i) 14035 assert_plane(plane, plane_state->planar_slave || 14036 plane_state->uapi.visible); 14037 } 14038 14039 static void 14040 verify_single_dpll_state(struct drm_i915_private *dev_priv, 14041 struct intel_shared_dpll *pll, 14042 struct intel_crtc *crtc, 14043 struct intel_crtc_state *new_crtc_state) 14044 { 14045 struct intel_dpll_hw_state dpll_hw_state; 14046 unsigned int crtc_mask; 14047 bool active; 14048 14049 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 14050 14051 DRM_DEBUG_KMS("%s\n", pll->info->name); 14052 14053 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state); 14054 14055 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) { 14056 I915_STATE_WARN(!pll->on && pll->active_mask, 14057 "pll in active use but not on in sw tracking\n"); 14058 I915_STATE_WARN(pll->on && !pll->active_mask, 14059 "pll is on but not used by any active crtc\n"); 14060 I915_STATE_WARN(pll->on != active, 14061 "pll on state mismatch (expected %i, found %i)\n", 14062 pll->on, active); 14063 } 14064 14065 if (!crtc) { 14066 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask, 14067 "more active pll users than references: %x vs %x\n", 14068 pll->active_mask, pll->state.crtc_mask); 14069 14070 return; 14071 } 14072 14073 crtc_mask = drm_crtc_mask(&crtc->base); 14074 14075 if (new_crtc_state->hw.active) 14076 I915_STATE_WARN(!(pll->active_mask & crtc_mask), 14077 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", 14078 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); 14079 else 14080 I915_STATE_WARN(pll->active_mask & crtc_mask, 14081 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", 14082 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); 14083 14084 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), 14085 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", 14086 crtc_mask, pll->state.crtc_mask); 14087 14088 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state, 14089 &dpll_hw_state, 14090 sizeof(dpll_hw_state)), 14091 "pll hw state mismatch\n"); 14092 } 14093 14094 static void 14095 verify_shared_dpll_state(struct intel_crtc *crtc, 14096 struct intel_crtc_state *old_crtc_state, 14097 struct intel_crtc_state *new_crtc_state) 14098 { 14099 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14100 14101 if (new_crtc_state->shared_dpll) 14102 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state); 14103 14104 if (old_crtc_state->shared_dpll && 14105 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) { 14106 unsigned int crtc_mask = drm_crtc_mask(&crtc->base); 14107 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll; 14108 14109 I915_STATE_WARN(pll->active_mask & crtc_mask, 14110 "pll active mismatch (didn't expect pipe %c in active mask)\n", 14111 pipe_name(drm_crtc_index(&crtc->base))); 14112 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, 14113 "pll enabled crtcs mismatch (found %x in enabled mask)\n", 14114 pipe_name(drm_crtc_index(&crtc->base))); 14115 } 14116 } 14117 14118 static void 14119 intel_modeset_verify_crtc(struct intel_crtc *crtc, 14120 struct intel_atomic_state *state, 14121 struct intel_crtc_state *old_crtc_state, 14122 struct intel_crtc_state *new_crtc_state) 14123 { 14124 if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) 14125 return; 14126 14127 verify_wm_state(crtc, new_crtc_state); 14128 verify_connector_state(state, crtc); 14129 verify_crtc_state(crtc, old_crtc_state, new_crtc_state); 14130 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state); 14131 } 14132 14133 static void 14134 verify_disabled_dpll_state(struct drm_i915_private *dev_priv) 14135 { 14136 int i; 14137 14138 for (i = 0; i < dev_priv->num_shared_dpll; i++) 14139 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL); 14140 } 14141 14142 static void 14143 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv, 14144 struct intel_atomic_state *state) 14145 { 14146 verify_encoder_state(dev_priv, state); 14147 verify_connector_state(state, NULL); 14148 verify_disabled_dpll_state(dev_priv); 14149 } 14150 14151 static void 14152 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) 14153 { 14154 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 14155 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14156 const struct drm_display_mode *adjusted_mode = 14157 &crtc_state->hw.adjusted_mode; 14158 14159 drm_calc_timestamping_constants(&crtc->base, adjusted_mode); 14160 14161 /* 14162 * The scanline counter increments at the leading edge of hsync. 14163 * 14164 * On most platforms it starts counting from vtotal-1 on the 14165 * first active line. That means the scanline counter value is 14166 * always one less than what we would expect. Ie. just after 14167 * start of vblank, which also occurs at start of hsync (on the 14168 * last active line), the scanline counter will read vblank_start-1. 14169 * 14170 * On gen2 the scanline counter starts counting from 1 instead 14171 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 14172 * to keep the value positive), instead of adding one. 14173 * 14174 * On HSW+ the behaviour of the scanline counter depends on the output 14175 * type. For DP ports it behaves like most other platforms, but on HDMI 14176 * there's an extra 1 line difference. So we need to add two instead of 14177 * one to the value. 14178 * 14179 * On VLV/CHV DSI the scanline counter would appear to increment 14180 * approx. 1/3 of a scanline before start of vblank. Unfortunately 14181 * that means we can't tell whether we're in vblank or not while 14182 * we're on that particular line. We must still set scanline_offset 14183 * to 1 so that the vblank timestamps come out correct when we query 14184 * the scanline counter from within the vblank interrupt handler. 14185 * However if queried just before the start of vblank we'll get an 14186 * answer that's slightly in the future. 14187 */ 14188 if (IS_GEN(dev_priv, 2)) { 14189 int vtotal; 14190 14191 vtotal = adjusted_mode->crtc_vtotal; 14192 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 14193 vtotal /= 2; 14194 14195 crtc->scanline_offset = vtotal - 1; 14196 } else if (HAS_DDI(dev_priv) && 14197 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 14198 crtc->scanline_offset = 2; 14199 } else { 14200 crtc->scanline_offset = 1; 14201 } 14202 } 14203 14204 static void intel_modeset_clear_plls(struct intel_atomic_state *state) 14205 { 14206 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14207 struct intel_crtc_state *new_crtc_state; 14208 struct intel_crtc *crtc; 14209 int i; 14210 14211 if (!dev_priv->display.crtc_compute_clock) 14212 return; 14213 14214 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14215 if (!needs_modeset(new_crtc_state)) 14216 continue; 14217 14218 intel_release_shared_dplls(state, crtc); 14219 } 14220 } 14221 14222 /* 14223 * This implements the workaround described in the "notes" section of the mode 14224 * set sequence documentation. When going from no pipes or single pipe to 14225 * multiple pipes, and planes are enabled after the pipe, we need to wait at 14226 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 14227 */ 14228 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) 14229 { 14230 struct intel_crtc_state *crtc_state; 14231 struct intel_crtc *crtc; 14232 struct intel_crtc_state *first_crtc_state = NULL; 14233 struct intel_crtc_state *other_crtc_state = NULL; 14234 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 14235 int i; 14236 14237 /* look at all crtc's that are going to be enabled in during modeset */ 14238 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 14239 if (!crtc_state->hw.active || 14240 !needs_modeset(crtc_state)) 14241 continue; 14242 14243 if (first_crtc_state) { 14244 other_crtc_state = crtc_state; 14245 break; 14246 } else { 14247 first_crtc_state = crtc_state; 14248 first_pipe = crtc->pipe; 14249 } 14250 } 14251 14252 /* No workaround needed? */ 14253 if (!first_crtc_state) 14254 return 0; 14255 14256 /* w/a possibly needed, check how many crtc's are already enabled. */ 14257 for_each_intel_crtc(state->base.dev, crtc) { 14258 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 14259 if (IS_ERR(crtc_state)) 14260 return PTR_ERR(crtc_state); 14261 14262 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 14263 14264 if (!crtc_state->hw.active || 14265 needs_modeset(crtc_state)) 14266 continue; 14267 14268 /* 2 or more enabled crtcs means no need for w/a */ 14269 if (enabled_pipe != INVALID_PIPE) 14270 return 0; 14271 14272 enabled_pipe = crtc->pipe; 14273 } 14274 14275 if (enabled_pipe != INVALID_PIPE) 14276 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 14277 else if (other_crtc_state) 14278 other_crtc_state->hsw_workaround_pipe = first_pipe; 14279 14280 return 0; 14281 } 14282 14283 static int intel_modeset_checks(struct intel_atomic_state *state) 14284 { 14285 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14286 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 14287 struct intel_crtc *crtc; 14288 int ret, i; 14289 14290 /* keep the current setting */ 14291 if (!state->cdclk.force_min_cdclk_changed) 14292 state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk; 14293 14294 state->modeset = true; 14295 state->active_pipes = dev_priv->active_pipes; 14296 state->cdclk.logical = dev_priv->cdclk.logical; 14297 state->cdclk.actual = dev_priv->cdclk.actual; 14298 14299 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14300 new_crtc_state, i) { 14301 if (new_crtc_state->hw.active) 14302 state->active_pipes |= BIT(crtc->pipe); 14303 else 14304 state->active_pipes &= ~BIT(crtc->pipe); 14305 14306 if (old_crtc_state->hw.active != new_crtc_state->hw.active) 14307 state->active_pipe_changes |= BIT(crtc->pipe); 14308 } 14309 14310 if (state->active_pipe_changes) { 14311 ret = intel_atomic_lock_global_state(state); 14312 if (ret) 14313 return ret; 14314 } 14315 14316 ret = intel_modeset_calc_cdclk(state); 14317 if (ret) 14318 return ret; 14319 14320 intel_modeset_clear_plls(state); 14321 14322 if (IS_HASWELL(dev_priv)) 14323 return hsw_mode_set_planes_workaround(state); 14324 14325 return 0; 14326 } 14327 14328 /* 14329 * Handle calculation of various watermark data at the end of the atomic check 14330 * phase. The code here should be run after the per-crtc and per-plane 'check' 14331 * handlers to ensure that all derived state has been updated. 14332 */ 14333 static int calc_watermark_data(struct intel_atomic_state *state) 14334 { 14335 struct drm_device *dev = state->base.dev; 14336 struct drm_i915_private *dev_priv = to_i915(dev); 14337 14338 /* Is there platform-specific watermark information to calculate? */ 14339 if (dev_priv->display.compute_global_watermarks) 14340 return dev_priv->display.compute_global_watermarks(state); 14341 14342 return 0; 14343 } 14344 14345 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 14346 struct intel_crtc_state *new_crtc_state) 14347 { 14348 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) 14349 return; 14350 14351 new_crtc_state->uapi.mode_changed = false; 14352 new_crtc_state->update_pipe = true; 14353 } 14354 14355 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state, 14356 struct intel_crtc_state *new_crtc_state) 14357 { 14358 /* 14359 * If we're not doing the full modeset we want to 14360 * keep the current M/N values as they may be 14361 * sufficiently different to the computed values 14362 * to cause problems. 14363 * 14364 * FIXME: should really copy more fuzzy state here 14365 */ 14366 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n; 14367 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n; 14368 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2; 14369 new_crtc_state->has_drrs = old_crtc_state->has_drrs; 14370 } 14371 14372 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, 14373 struct intel_crtc *crtc, 14374 u8 plane_ids_mask) 14375 { 14376 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14377 struct intel_plane *plane; 14378 14379 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 14380 struct intel_plane_state *plane_state; 14381 14382 if ((plane_ids_mask & BIT(plane->id)) == 0) 14383 continue; 14384 14385 plane_state = intel_atomic_get_plane_state(state, plane); 14386 if (IS_ERR(plane_state)) 14387 return PTR_ERR(plane_state); 14388 } 14389 14390 return 0; 14391 } 14392 14393 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) 14394 { 14395 /* See {hsw,vlv,ivb}_plane_ratio() */ 14396 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) || 14397 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) || 14398 IS_IVYBRIDGE(dev_priv); 14399 } 14400 14401 static int intel_atomic_check_planes(struct intel_atomic_state *state, 14402 bool *need_modeset) 14403 { 14404 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14405 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 14406 struct intel_plane_state *plane_state; 14407 struct intel_plane *plane; 14408 struct intel_crtc *crtc; 14409 int i, ret; 14410 14411 ret = icl_add_linked_planes(state); 14412 if (ret) 14413 return ret; 14414 14415 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 14416 ret = intel_plane_atomic_check(state, plane); 14417 if (ret) { 14418 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n", 14419 plane->base.base.id, plane->base.name); 14420 return ret; 14421 } 14422 } 14423 14424 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14425 new_crtc_state, i) { 14426 u8 old_active_planes, new_active_planes; 14427 14428 ret = icl_check_nv12_planes(new_crtc_state); 14429 if (ret) 14430 return ret; 14431 14432 /* 14433 * On some platforms the number of active planes affects 14434 * the planes' minimum cdclk calculation. Add such planes 14435 * to the state before we compute the minimum cdclk. 14436 */ 14437 if (!active_planes_affects_min_cdclk(dev_priv)) 14438 continue; 14439 14440 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 14441 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 14442 14443 if (hweight8(old_active_planes) == hweight8(new_active_planes)) 14444 continue; 14445 14446 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes); 14447 if (ret) 14448 return ret; 14449 } 14450 14451 /* 14452 * active_planes bitmask has been updated, and potentially 14453 * affected planes are part of the state. We can now 14454 * compute the minimum cdclk for each plane. 14455 */ 14456 for_each_new_intel_plane_in_state(state, plane, plane_state, i) 14457 *need_modeset |= intel_plane_calc_min_cdclk(state, plane); 14458 14459 return 0; 14460 } 14461 14462 static int intel_atomic_check_crtcs(struct intel_atomic_state *state) 14463 { 14464 struct intel_crtc_state *crtc_state; 14465 struct intel_crtc *crtc; 14466 int i; 14467 14468 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 14469 int ret = intel_crtc_atomic_check(state, crtc); 14470 if (ret) { 14471 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", 14472 crtc->base.base.id, crtc->base.name); 14473 return ret; 14474 } 14475 } 14476 14477 return 0; 14478 } 14479 14480 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, 14481 u8 transcoders) 14482 { 14483 const struct intel_crtc_state *new_crtc_state; 14484 struct intel_crtc *crtc; 14485 int i; 14486 14487 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14488 if (new_crtc_state->hw.enable && 14489 transcoders & BIT(new_crtc_state->cpu_transcoder) && 14490 needs_modeset(new_crtc_state)) 14491 return true; 14492 } 14493 14494 return false; 14495 } 14496 14497 static int 14498 intel_modeset_all_tiles(struct intel_atomic_state *state, int tile_grp_id) 14499 { 14500 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14501 struct drm_connector *connector; 14502 struct drm_connector_list_iter conn_iter; 14503 int ret = 0; 14504 14505 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 14506 drm_for_each_connector_iter(connector, &conn_iter) { 14507 struct drm_connector_state *conn_state; 14508 struct drm_crtc_state *crtc_state; 14509 14510 if (!connector->has_tile || 14511 connector->tile_group->id != tile_grp_id) 14512 continue; 14513 conn_state = drm_atomic_get_connector_state(&state->base, 14514 connector); 14515 if (IS_ERR(conn_state)) { 14516 ret = PTR_ERR(conn_state); 14517 break; 14518 } 14519 14520 if (!conn_state->crtc) 14521 continue; 14522 14523 crtc_state = drm_atomic_get_crtc_state(&state->base, 14524 conn_state->crtc); 14525 if (IS_ERR(crtc_state)) { 14526 ret = PTR_ERR(crtc_state); 14527 break; 14528 } 14529 crtc_state->mode_changed = true; 14530 ret = drm_atomic_add_affected_connectors(&state->base, 14531 conn_state->crtc); 14532 if (ret) 14533 break; 14534 } 14535 drm_connector_list_iter_end(&conn_iter); 14536 14537 return ret; 14538 } 14539 14540 static int 14541 intel_atomic_check_tiled_conns(struct intel_atomic_state *state) 14542 { 14543 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14544 struct drm_connector *connector; 14545 struct drm_connector_state *old_conn_state, *new_conn_state; 14546 int i, ret; 14547 14548 if (INTEL_GEN(dev_priv) < 11) 14549 return 0; 14550 14551 /* Is tiled, mark all other tiled CRTCs as needing a modeset */ 14552 for_each_oldnew_connector_in_state(&state->base, connector, 14553 old_conn_state, new_conn_state, i) { 14554 if (!connector->has_tile) 14555 continue; 14556 if (!intel_connector_needs_modeset(state, connector)) 14557 continue; 14558 14559 ret = intel_modeset_all_tiles(state, connector->tile_group->id); 14560 if (ret) 14561 return ret; 14562 } 14563 14564 return 0; 14565 } 14566 14567 /** 14568 * intel_atomic_check - validate state object 14569 * @dev: drm device 14570 * @_state: state to validate 14571 */ 14572 static int intel_atomic_check(struct drm_device *dev, 14573 struct drm_atomic_state *_state) 14574 { 14575 struct drm_i915_private *dev_priv = to_i915(dev); 14576 struct intel_atomic_state *state = to_intel_atomic_state(_state); 14577 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 14578 struct intel_crtc *crtc; 14579 int ret, i; 14580 bool any_ms = false; 14581 14582 /* Catch I915_MODE_FLAG_INHERITED */ 14583 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14584 new_crtc_state, i) { 14585 if (new_crtc_state->hw.mode.private_flags != 14586 old_crtc_state->hw.mode.private_flags) 14587 new_crtc_state->uapi.mode_changed = true; 14588 } 14589 14590 ret = drm_atomic_helper_check_modeset(dev, &state->base); 14591 if (ret) 14592 goto fail; 14593 14594 /** 14595 * This check adds all the connectors in current state that belong to 14596 * the same tile group to a full modeset. 14597 * This function directly sets the mode_changed to true and we also call 14598 * drm_atomic_add_affected_connectors(). Hence we are not explicitly 14599 * calling drm_atomic_helper_check_modeset() after this. 14600 * 14601 * Fixme: Handle some corner cases where one of the 14602 * tiled connectors gets disconnected and tile info is lost but since it 14603 * was previously synced to other conn, we need to add that to the modeset. 14604 */ 14605 ret = intel_atomic_check_tiled_conns(state); 14606 if (ret) 14607 goto fail; 14608 14609 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14610 new_crtc_state, i) { 14611 if (!needs_modeset(new_crtc_state)) { 14612 /* Light copy */ 14613 intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state); 14614 14615 continue; 14616 } 14617 14618 if (!new_crtc_state->uapi.enable) { 14619 intel_crtc_copy_uapi_to_hw_state(new_crtc_state); 14620 continue; 14621 } 14622 14623 ret = intel_crtc_prepare_cleared_state(new_crtc_state); 14624 if (ret) 14625 goto fail; 14626 14627 ret = intel_modeset_pipe_config(new_crtc_state); 14628 if (ret) 14629 goto fail; 14630 14631 intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 14632 } 14633 14634 /** 14635 * Check if fastset is allowed by external dependencies like other 14636 * pipes and transcoders. 14637 * 14638 * Right now it only forces a fullmodeset when the MST master 14639 * transcoder did not changed but the pipe of the master transcoder 14640 * needs a fullmodeset so all slaves also needs to do a fullmodeset or 14641 * in case of port synced crtcs, if one of the synced crtcs 14642 * needs a full modeset, all other synced crtcs should be 14643 * forced a full modeset. 14644 */ 14645 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14646 if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state)) 14647 continue; 14648 14649 if (intel_dp_mst_is_slave_trans(new_crtc_state)) { 14650 enum transcoder master = new_crtc_state->mst_master_transcoder; 14651 14652 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) { 14653 new_crtc_state->uapi.mode_changed = true; 14654 new_crtc_state->update_pipe = false; 14655 } 14656 } 14657 14658 if (is_trans_port_sync_mode(new_crtc_state)) { 14659 u8 trans = new_crtc_state->sync_mode_slaves_mask | 14660 BIT(new_crtc_state->master_transcoder); 14661 14662 if (intel_cpu_transcoders_need_modeset(state, trans)) { 14663 new_crtc_state->uapi.mode_changed = true; 14664 new_crtc_state->update_pipe = false; 14665 } 14666 } 14667 } 14668 14669 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14670 new_crtc_state, i) { 14671 if (needs_modeset(new_crtc_state)) { 14672 any_ms = true; 14673 continue; 14674 } 14675 14676 if (!new_crtc_state->update_pipe) 14677 continue; 14678 14679 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state); 14680 } 14681 14682 if (any_ms && !check_digital_port_conflicts(state)) { 14683 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); 14684 ret = EINVAL; 14685 goto fail; 14686 } 14687 14688 ret = drm_dp_mst_atomic_check(&state->base); 14689 if (ret) 14690 goto fail; 14691 14692 any_ms |= state->cdclk.force_min_cdclk_changed; 14693 14694 ret = intel_atomic_check_planes(state, &any_ms); 14695 if (ret) 14696 goto fail; 14697 14698 if (any_ms) { 14699 ret = intel_modeset_checks(state); 14700 if (ret) 14701 goto fail; 14702 } else { 14703 state->cdclk.logical = dev_priv->cdclk.logical; 14704 } 14705 14706 ret = intel_atomic_check_crtcs(state); 14707 if (ret) 14708 goto fail; 14709 14710 intel_fbc_choose_crtc(dev_priv, state); 14711 ret = calc_watermark_data(state); 14712 if (ret) 14713 goto fail; 14714 14715 ret = intel_bw_atomic_check(state); 14716 if (ret) 14717 goto fail; 14718 14719 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14720 new_crtc_state, i) { 14721 if (!needs_modeset(new_crtc_state) && 14722 !new_crtc_state->update_pipe) 14723 continue; 14724 14725 intel_dump_pipe_config(new_crtc_state, state, 14726 needs_modeset(new_crtc_state) ? 14727 "[modeset]" : "[fastset]"); 14728 } 14729 14730 return 0; 14731 14732 fail: 14733 if (ret == -EDEADLK) 14734 return ret; 14735 14736 /* 14737 * FIXME would probably be nice to know which crtc specifically 14738 * caused the failure, in cases where we can pinpoint it. 14739 */ 14740 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14741 new_crtc_state, i) 14742 intel_dump_pipe_config(new_crtc_state, state, "[failed]"); 14743 14744 return ret; 14745 } 14746 14747 static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 14748 { 14749 return drm_atomic_helper_prepare_planes(state->base.dev, 14750 &state->base); 14751 } 14752 14753 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) 14754 { 14755 struct drm_device *dev = crtc->base.dev; 14756 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)]; 14757 14758 if (!vblank->max_vblank_count) 14759 return (u32)drm_crtc_accurate_vblank_count(&crtc->base); 14760 14761 return crtc->base.funcs->get_vblank_counter(&crtc->base); 14762 } 14763 14764 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 14765 struct intel_crtc_state *crtc_state) 14766 { 14767 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14768 14769 if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes) 14770 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 14771 14772 if (crtc_state->has_pch_encoder) { 14773 enum pipe pch_transcoder = 14774 intel_crtc_pch_transcoder(crtc); 14775 14776 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); 14777 } 14778 } 14779 14780 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, 14781 const struct intel_crtc_state *new_crtc_state) 14782 { 14783 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 14784 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14785 14786 /* 14787 * Update pipe size and adjust fitter if needed: the reason for this is 14788 * that in compute_mode_changes we check the native mode (not the pfit 14789 * mode) to see if we can flip rather than do a full mode set. In the 14790 * fastboot case, we'll flip, but if we don't update the pipesrc and 14791 * pfit state, we'll end up with a big fb scanned out into the wrong 14792 * sized surface. 14793 */ 14794 intel_set_pipe_src_size(new_crtc_state); 14795 14796 /* on skylake this is done by detaching scalers */ 14797 if (INTEL_GEN(dev_priv) >= 9) { 14798 skl_detach_scalers(new_crtc_state); 14799 14800 if (new_crtc_state->pch_pfit.enabled) 14801 skl_pfit_enable(new_crtc_state); 14802 } else if (HAS_PCH_SPLIT(dev_priv)) { 14803 if (new_crtc_state->pch_pfit.enabled) 14804 ilk_pfit_enable(new_crtc_state); 14805 else if (old_crtc_state->pch_pfit.enabled) 14806 ilk_pfit_disable(old_crtc_state); 14807 } 14808 14809 if (INTEL_GEN(dev_priv) >= 11) 14810 icl_set_pipe_chicken(crtc); 14811 } 14812 14813 static void commit_pipe_config(struct intel_atomic_state *state, 14814 struct intel_crtc_state *old_crtc_state, 14815 struct intel_crtc_state *new_crtc_state) 14816 { 14817 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 14818 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14819 bool modeset = needs_modeset(new_crtc_state); 14820 14821 /* 14822 * During modesets pipe configuration was programmed as the 14823 * CRTC was enabled. 14824 */ 14825 if (!modeset) { 14826 if (new_crtc_state->uapi.color_mgmt_changed || 14827 new_crtc_state->update_pipe) 14828 intel_color_commit(new_crtc_state); 14829 14830 if (INTEL_GEN(dev_priv) >= 9) 14831 skl_detach_scalers(new_crtc_state); 14832 14833 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 14834 bdw_set_pipemisc(new_crtc_state); 14835 14836 if (new_crtc_state->update_pipe) 14837 intel_pipe_fastset(old_crtc_state, new_crtc_state); 14838 } 14839 14840 if (dev_priv->display.atomic_update_watermarks) 14841 dev_priv->display.atomic_update_watermarks(state, crtc); 14842 } 14843 14844 static void intel_update_crtc(struct intel_crtc *crtc, 14845 struct intel_atomic_state *state, 14846 struct intel_crtc_state *old_crtc_state, 14847 struct intel_crtc_state *new_crtc_state) 14848 { 14849 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14850 bool modeset = needs_modeset(new_crtc_state); 14851 struct intel_plane_state *new_plane_state = 14852 intel_atomic_get_new_plane_state(state, 14853 to_intel_plane(crtc->base.primary)); 14854 14855 if (modeset) { 14856 intel_crtc_update_active_timings(new_crtc_state); 14857 14858 dev_priv->display.crtc_enable(state, crtc); 14859 14860 /* vblanks work again, re-enable pipe CRC. */ 14861 intel_crtc_enable_pipe_crc(crtc); 14862 } else { 14863 if (new_crtc_state->preload_luts && 14864 (new_crtc_state->uapi.color_mgmt_changed || 14865 new_crtc_state->update_pipe)) 14866 intel_color_load_luts(new_crtc_state); 14867 14868 intel_pre_plane_update(state, crtc); 14869 14870 if (new_crtc_state->update_pipe) 14871 intel_encoders_update_pipe(state, crtc); 14872 } 14873 14874 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) 14875 intel_fbc_disable(crtc); 14876 else if (new_plane_state) 14877 intel_fbc_enable(crtc, new_crtc_state, new_plane_state); 14878 14879 /* Perform vblank evasion around commit operation */ 14880 intel_pipe_update_start(new_crtc_state); 14881 14882 commit_pipe_config(state, old_crtc_state, new_crtc_state); 14883 14884 if (INTEL_GEN(dev_priv) >= 9) 14885 skl_update_planes_on_crtc(state, crtc); 14886 else 14887 i9xx_update_planes_on_crtc(state, crtc); 14888 14889 intel_pipe_update_end(new_crtc_state); 14890 14891 /* 14892 * We usually enable FIFO underrun interrupts as part of the 14893 * CRTC enable sequence during modesets. But when we inherit a 14894 * valid pipe configuration from the BIOS we need to take care 14895 * of enabling them on the CRTC's first fastset. 14896 */ 14897 if (new_crtc_state->update_pipe && !modeset && 14898 old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) 14899 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 14900 } 14901 14902 static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state) 14903 { 14904 struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev); 14905 enum transcoder slave_transcoder; 14906 14907 WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask)); 14908 14909 slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1; 14910 return intel_get_crtc_for_pipe(dev_priv, 14911 (enum pipe)slave_transcoder); 14912 } 14913 14914 static void intel_old_crtc_state_disables(struct intel_atomic_state *state, 14915 struct intel_crtc_state *old_crtc_state, 14916 struct intel_crtc_state *new_crtc_state, 14917 struct intel_crtc *crtc) 14918 { 14919 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14920 14921 intel_crtc_disable_planes(state, crtc); 14922 14923 /* 14924 * We need to disable pipe CRC before disabling the pipe, 14925 * or we race against vblank off. 14926 */ 14927 intel_crtc_disable_pipe_crc(crtc); 14928 14929 dev_priv->display.crtc_disable(state, crtc); 14930 crtc->active = false; 14931 intel_fbc_disable(crtc); 14932 intel_disable_shared_dpll(old_crtc_state); 14933 14934 /* FIXME unify this for all platforms */ 14935 if (!new_crtc_state->hw.active && 14936 !HAS_GMCH(dev_priv) && 14937 dev_priv->display.initial_watermarks) 14938 dev_priv->display.initial_watermarks(state, crtc); 14939 } 14940 14941 static void intel_commit_modeset_disables(struct intel_atomic_state *state) 14942 { 14943 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 14944 struct intel_crtc *crtc; 14945 u32 handled = 0; 14946 int i; 14947 14948 /* Only disable port sync and MST slaves */ 14949 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14950 new_crtc_state, i) { 14951 if (!needs_modeset(new_crtc_state)) 14952 continue; 14953 14954 if (!old_crtc_state->hw.active) 14955 continue; 14956 14957 /* In case of Transcoder port Sync master slave CRTCs can be 14958 * assigned in any order and we need to make sure that 14959 * slave CRTCs are disabled first and then master CRTC since 14960 * Slave vblanks are masked till Master Vblanks. 14961 */ 14962 if (!is_trans_port_sync_slave(old_crtc_state) && 14963 !intel_dp_mst_is_slave_trans(old_crtc_state)) 14964 continue; 14965 14966 intel_pre_plane_update(state, crtc); 14967 intel_old_crtc_state_disables(state, old_crtc_state, 14968 new_crtc_state, crtc); 14969 handled |= BIT(crtc->pipe); 14970 } 14971 14972 /* Disable everything else left on */ 14973 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14974 new_crtc_state, i) { 14975 if (!needs_modeset(new_crtc_state) || 14976 (handled & BIT(crtc->pipe))) 14977 continue; 14978 14979 intel_pre_plane_update(state, crtc); 14980 if (old_crtc_state->hw.active) 14981 intel_old_crtc_state_disables(state, old_crtc_state, 14982 new_crtc_state, crtc); 14983 } 14984 } 14985 14986 static void intel_commit_modeset_enables(struct intel_atomic_state *state) 14987 { 14988 struct intel_crtc *crtc; 14989 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 14990 int i; 14991 14992 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 14993 if (!new_crtc_state->hw.active) 14994 continue; 14995 14996 intel_update_crtc(crtc, state, old_crtc_state, 14997 new_crtc_state); 14998 } 14999 } 15000 15001 static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc, 15002 struct intel_atomic_state *state, 15003 struct intel_crtc_state *new_crtc_state) 15004 { 15005 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 15006 15007 intel_crtc_update_active_timings(new_crtc_state); 15008 dev_priv->display.crtc_enable(state, crtc); 15009 intel_crtc_enable_pipe_crc(crtc); 15010 } 15011 15012 static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc, 15013 struct intel_atomic_state *state) 15014 { 15015 struct drm_connector *uninitialized_var(conn); 15016 struct drm_connector_state *conn_state; 15017 struct intel_dp *intel_dp; 15018 int i; 15019 15020 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 15021 if (conn_state->crtc == &crtc->base) 15022 break; 15023 } 15024 intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(conn))); 15025 intel_dp_stop_link_train(intel_dp); 15026 } 15027 15028 /* 15029 * TODO: This is only called from port sync and it is identical to what will be 15030 * executed again in intel_update_crtc() over port sync pipes 15031 */ 15032 static void intel_post_crtc_enable_updates(struct intel_crtc *crtc, 15033 struct intel_atomic_state *state) 15034 { 15035 struct intel_crtc_state *new_crtc_state = 15036 intel_atomic_get_new_crtc_state(state, crtc); 15037 struct intel_crtc_state *old_crtc_state = 15038 intel_atomic_get_old_crtc_state(state, crtc); 15039 struct intel_plane_state *new_plane_state = 15040 intel_atomic_get_new_plane_state(state, 15041 to_intel_plane(crtc->base.primary)); 15042 bool modeset = needs_modeset(new_crtc_state); 15043 15044 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) 15045 intel_fbc_disable(crtc); 15046 else if (new_plane_state) 15047 intel_fbc_enable(crtc, new_crtc_state, new_plane_state); 15048 15049 /* Perform vblank evasion around commit operation */ 15050 intel_pipe_update_start(new_crtc_state); 15051 commit_pipe_config(state, old_crtc_state, new_crtc_state); 15052 skl_update_planes_on_crtc(state, crtc); 15053 intel_pipe_update_end(new_crtc_state); 15054 15055 /* 15056 * We usually enable FIFO underrun interrupts as part of the 15057 * CRTC enable sequence during modesets. But when we inherit a 15058 * valid pipe configuration from the BIOS we need to take care 15059 * of enabling them on the CRTC's first fastset. 15060 */ 15061 if (new_crtc_state->update_pipe && !modeset && 15062 old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) 15063 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 15064 } 15065 15066 static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc, 15067 struct intel_atomic_state *state, 15068 struct intel_crtc_state *old_crtc_state, 15069 struct intel_crtc_state *new_crtc_state) 15070 { 15071 struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state); 15072 struct intel_crtc_state *new_slave_crtc_state = 15073 intel_atomic_get_new_crtc_state(state, slave_crtc); 15074 struct intel_crtc_state *old_slave_crtc_state = 15075 intel_atomic_get_old_crtc_state(state, slave_crtc); 15076 15077 WARN_ON(!slave_crtc || !new_slave_crtc_state || 15078 !old_slave_crtc_state); 15079 15080 DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n", 15081 crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id, 15082 slave_crtc->base.name); 15083 15084 /* Enable seq for slave with with DP_TP_CTL left Idle until the 15085 * master is ready 15086 */ 15087 intel_crtc_enable_trans_port_sync(slave_crtc, 15088 state, 15089 new_slave_crtc_state); 15090 15091 /* Enable seq for master with with DP_TP_CTL left Idle */ 15092 intel_crtc_enable_trans_port_sync(crtc, 15093 state, 15094 new_crtc_state); 15095 15096 /* Set Slave's DP_TP_CTL to Normal */ 15097 intel_set_dp_tp_ctl_normal(slave_crtc, 15098 state); 15099 15100 /* Set Master's DP_TP_CTL To Normal */ 15101 usleep_range(200, 400); 15102 intel_set_dp_tp_ctl_normal(crtc, 15103 state); 15104 15105 /* Now do the post crtc enable for all master and slaves */ 15106 intel_post_crtc_enable_updates(slave_crtc, 15107 state); 15108 intel_post_crtc_enable_updates(crtc, 15109 state); 15110 } 15111 15112 static void skl_commit_modeset_enables(struct intel_atomic_state *state) 15113 { 15114 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 15115 struct intel_crtc *crtc; 15116 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 15117 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; 15118 u8 required_slices = state->wm_results.ddb.enabled_slices; 15119 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 15120 const u8 num_pipes = INTEL_NUM_PIPES(dev_priv); 15121 u8 update_pipes = 0, modeset_pipes = 0; 15122 int i; 15123 15124 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 15125 if (!new_crtc_state->hw.active) 15126 continue; 15127 15128 /* ignore allocations for crtc's that have been turned off. */ 15129 if (!needs_modeset(new_crtc_state)) { 15130 entries[i] = old_crtc_state->wm.skl.ddb; 15131 update_pipes |= BIT(crtc->pipe); 15132 } else { 15133 modeset_pipes |= BIT(crtc->pipe); 15134 } 15135 } 15136 15137 /* If 2nd DBuf slice required, enable it here */ 15138 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices) 15139 icl_dbuf_slices_update(dev_priv, required_slices); 15140 15141 /* 15142 * Whenever the number of active pipes changes, we need to make sure we 15143 * update the pipes in the right order so that their ddb allocations 15144 * never overlap with each other between CRTC updates. Otherwise we'll 15145 * cause pipe underruns and other bad stuff. 15146 * 15147 * So first lets enable all pipes that do not need a fullmodeset as 15148 * those don't have any external dependency. 15149 */ 15150 while (update_pipes) { 15151 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15152 new_crtc_state, i) { 15153 enum pipe pipe = crtc->pipe; 15154 15155 if ((update_pipes & BIT(pipe)) == 0) 15156 continue; 15157 15158 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 15159 entries, num_pipes, i)) 15160 continue; 15161 15162 entries[i] = new_crtc_state->wm.skl.ddb; 15163 update_pipes &= ~BIT(pipe); 15164 15165 intel_update_crtc(crtc, state, old_crtc_state, 15166 new_crtc_state); 15167 15168 /* 15169 * If this is an already active pipe, it's DDB changed, 15170 * and this isn't the last pipe that needs updating 15171 * then we need to wait for a vblank to pass for the 15172 * new ddb allocation to take effect. 15173 */ 15174 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 15175 &old_crtc_state->wm.skl.ddb) && 15176 (update_pipes | modeset_pipes)) 15177 intel_wait_for_vblank(dev_priv, pipe); 15178 } 15179 } 15180 15181 /* 15182 * Enable all pipes that needs a modeset and do not depends on other 15183 * pipes 15184 */ 15185 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15186 new_crtc_state, i) { 15187 enum pipe pipe = crtc->pipe; 15188 15189 if ((modeset_pipes & BIT(pipe)) == 0) 15190 continue; 15191 15192 if (intel_dp_mst_is_slave_trans(new_crtc_state) || 15193 is_trans_port_sync_slave(new_crtc_state)) 15194 continue; 15195 15196 WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 15197 entries, num_pipes, i)); 15198 15199 entries[i] = new_crtc_state->wm.skl.ddb; 15200 modeset_pipes &= ~BIT(pipe); 15201 15202 if (is_trans_port_sync_mode(new_crtc_state)) { 15203 struct intel_crtc *slave_crtc; 15204 15205 intel_update_trans_port_sync_crtcs(crtc, state, 15206 old_crtc_state, 15207 new_crtc_state); 15208 15209 slave_crtc = intel_get_slave_crtc(new_crtc_state); 15210 /* TODO: update entries[] of slave */ 15211 modeset_pipes &= ~BIT(slave_crtc->pipe); 15212 15213 } else { 15214 intel_update_crtc(crtc, state, old_crtc_state, 15215 new_crtc_state); 15216 } 15217 } 15218 15219 /* 15220 * Finally enable all pipes that needs a modeset and depends on 15221 * other pipes, right now it is only MST slaves as both port sync slave 15222 * and master are enabled together 15223 */ 15224 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15225 new_crtc_state, i) { 15226 enum pipe pipe = crtc->pipe; 15227 15228 if ((modeset_pipes & BIT(pipe)) == 0) 15229 continue; 15230 15231 WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 15232 entries, num_pipes, i)); 15233 15234 entries[i] = new_crtc_state->wm.skl.ddb; 15235 modeset_pipes &= ~BIT(pipe); 15236 15237 intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state); 15238 } 15239 15240 WARN_ON(modeset_pipes); 15241 15242 /* If 2nd DBuf slice is no more required disable it */ 15243 if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices) 15244 icl_dbuf_slices_update(dev_priv, required_slices); 15245 } 15246 15247 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) 15248 { 15249 struct intel_atomic_state *state, *next; 15250 struct llist_node *freed; 15251 15252 freed = llist_del_all(&dev_priv->atomic_helper.free_list); 15253 llist_for_each_entry_safe(state, next, freed, freed) 15254 drm_atomic_state_put(&state->base); 15255 } 15256 15257 static void intel_atomic_helper_free_state_worker(struct work_struct *work) 15258 { 15259 struct drm_i915_private *dev_priv = 15260 container_of(work, typeof(*dev_priv), atomic_helper.free_work); 15261 15262 intel_atomic_helper_free_state(dev_priv); 15263 } 15264 15265 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 15266 { 15267 struct wait_queue_entry wait_fence, wait_reset; 15268 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); 15269 15270 init_wait_entry(&wait_fence, 0); 15271 init_wait_entry(&wait_reset, 0); 15272 for (;;) { 15273 prepare_to_wait(&intel_state->commit_ready.wait, 15274 &wait_fence, TASK_UNINTERRUPTIBLE); 15275 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags, 15276 I915_RESET_MODESET), 15277 &wait_reset, TASK_UNINTERRUPTIBLE); 15278 15279 15280 if (i915_sw_fence_done(&intel_state->commit_ready) || 15281 test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) 15282 break; 15283 15284 schedule(); 15285 } 15286 finish_wait(&intel_state->commit_ready.wait, &wait_fence); 15287 finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags, 15288 I915_RESET_MODESET), 15289 &wait_reset); 15290 } 15291 15292 static void intel_atomic_cleanup_work(struct work_struct *work) 15293 { 15294 struct drm_atomic_state *state = 15295 container_of(work, struct drm_atomic_state, commit_work); 15296 struct drm_i915_private *i915 = to_i915(state->dev); 15297 15298 drm_atomic_helper_cleanup_planes(&i915->drm, state); 15299 drm_atomic_helper_commit_cleanup_done(state); 15300 drm_atomic_state_put(state); 15301 15302 intel_atomic_helper_free_state(i915); 15303 } 15304 15305 static void intel_atomic_commit_tail(struct intel_atomic_state *state) 15306 { 15307 struct drm_device *dev = state->base.dev; 15308 struct drm_i915_private *dev_priv = to_i915(dev); 15309 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 15310 struct intel_crtc *crtc; 15311 u64 put_domains[I915_MAX_PIPES] = {}; 15312 intel_wakeref_t wakeref = 0; 15313 int i; 15314 15315 intel_atomic_commit_fence_wait(state); 15316 15317 drm_atomic_helper_wait_for_dependencies(&state->base); 15318 15319 if (state->modeset) 15320 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 15321 15322 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15323 new_crtc_state, i) { 15324 if (needs_modeset(new_crtc_state) || 15325 new_crtc_state->update_pipe) { 15326 15327 put_domains[crtc->pipe] = 15328 modeset_get_crtc_power_domains(new_crtc_state); 15329 } 15330 } 15331 15332 intel_commit_modeset_disables(state); 15333 15334 /* FIXME: Eventually get rid of our crtc->config pointer */ 15335 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 15336 crtc->config = new_crtc_state; 15337 15338 if (state->modeset) { 15339 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); 15340 15341 intel_set_cdclk_pre_plane_update(dev_priv, 15342 &state->cdclk.actual, 15343 &dev_priv->cdclk.actual, 15344 state->cdclk.pipe); 15345 15346 /* 15347 * SKL workaround: bspec recommends we disable the SAGV when we 15348 * have more then one pipe enabled 15349 */ 15350 if (!intel_can_enable_sagv(state)) 15351 intel_disable_sagv(dev_priv); 15352 15353 intel_modeset_verify_disabled(dev_priv, state); 15354 } 15355 15356 /* Complete the events for pipes that have now been disabled */ 15357 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15358 bool modeset = needs_modeset(new_crtc_state); 15359 15360 /* Complete events for now disable pipes here. */ 15361 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { 15362 spin_lock_irq(&dev->event_lock); 15363 drm_crtc_send_vblank_event(&crtc->base, 15364 new_crtc_state->uapi.event); 15365 spin_unlock_irq(&dev->event_lock); 15366 15367 new_crtc_state->uapi.event = NULL; 15368 } 15369 } 15370 15371 if (state->modeset) 15372 intel_encoders_update_prepare(state); 15373 15374 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 15375 dev_priv->display.commit_modeset_enables(state); 15376 15377 if (state->modeset) { 15378 intel_encoders_update_complete(state); 15379 15380 intel_set_cdclk_post_plane_update(dev_priv, 15381 &state->cdclk.actual, 15382 &dev_priv->cdclk.actual, 15383 state->cdclk.pipe); 15384 } 15385 15386 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 15387 * already, but still need the state for the delayed optimization. To 15388 * fix this: 15389 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 15390 * - schedule that vblank worker _before_ calling hw_done 15391 * - at the start of commit_tail, cancel it _synchrously 15392 * - switch over to the vblank wait helper in the core after that since 15393 * we don't need out special handling any more. 15394 */ 15395 drm_atomic_helper_wait_for_flip_done(dev, &state->base); 15396 15397 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15398 if (new_crtc_state->hw.active && 15399 !needs_modeset(new_crtc_state) && 15400 !new_crtc_state->preload_luts && 15401 (new_crtc_state->uapi.color_mgmt_changed || 15402 new_crtc_state->update_pipe)) 15403 intel_color_load_luts(new_crtc_state); 15404 } 15405 15406 /* 15407 * Now that the vblank has passed, we can go ahead and program the 15408 * optimal watermarks on platforms that need two-step watermark 15409 * programming. 15410 * 15411 * TODO: Move this (and other cleanup) to an async worker eventually. 15412 */ 15413 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15414 new_crtc_state, i) { 15415 /* 15416 * Gen2 reports pipe underruns whenever all planes are disabled. 15417 * So re-enable underrun reporting after some planes get enabled. 15418 * 15419 * We do this before .optimize_watermarks() so that we have a 15420 * chance of catching underruns with the intermediate watermarks 15421 * vs. the new plane configuration. 15422 */ 15423 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state)) 15424 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 15425 15426 if (dev_priv->display.optimize_watermarks) 15427 dev_priv->display.optimize_watermarks(state, crtc); 15428 } 15429 15430 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 15431 intel_post_plane_update(state, crtc); 15432 15433 if (put_domains[i]) 15434 modeset_put_power_domains(dev_priv, put_domains[i]); 15435 15436 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); 15437 } 15438 15439 /* Underruns don't always raise interrupts, so check manually */ 15440 intel_check_cpu_fifo_underruns(dev_priv); 15441 intel_check_pch_fifo_underruns(dev_priv); 15442 15443 if (state->modeset) 15444 intel_verify_planes(state); 15445 15446 if (state->modeset && intel_can_enable_sagv(state)) 15447 intel_enable_sagv(dev_priv); 15448 15449 drm_atomic_helper_commit_hw_done(&state->base); 15450 15451 if (state->modeset) { 15452 /* As one of the primary mmio accessors, KMS has a high 15453 * likelihood of triggering bugs in unclaimed access. After we 15454 * finish modesetting, see if an error has been flagged, and if 15455 * so enable debugging for the next modeset - and hope we catch 15456 * the culprit. 15457 */ 15458 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 15459 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); 15460 } 15461 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 15462 15463 /* 15464 * Defer the cleanup of the old state to a separate worker to not 15465 * impede the current task (userspace for blocking modesets) that 15466 * are executed inline. For out-of-line asynchronous modesets/flips, 15467 * deferring to a new worker seems overkill, but we would place a 15468 * schedule point (cond_resched()) here anyway to keep latencies 15469 * down. 15470 */ 15471 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work); 15472 queue_work(system_highpri_wq, &state->base.commit_work); 15473 } 15474 15475 static void intel_atomic_commit_work(struct work_struct *work) 15476 { 15477 struct intel_atomic_state *state = 15478 container_of(work, struct intel_atomic_state, base.commit_work); 15479 15480 intel_atomic_commit_tail(state); 15481 } 15482 15483 static int __i915_sw_fence_call 15484 intel_atomic_commit_ready(struct i915_sw_fence *fence, 15485 enum i915_sw_fence_notify notify) 15486 { 15487 struct intel_atomic_state *state = 15488 container_of(fence, struct intel_atomic_state, commit_ready); 15489 15490 switch (notify) { 15491 case FENCE_COMPLETE: 15492 /* we do blocking waits in the worker, nothing to do here */ 15493 break; 15494 case FENCE_FREE: 15495 { 15496 struct intel_atomic_helper *helper = 15497 &to_i915(state->base.dev)->atomic_helper; 15498 15499 if (llist_add(&state->freed, &helper->free_list)) 15500 schedule_work(&helper->free_work); 15501 break; 15502 } 15503 } 15504 15505 return NOTIFY_DONE; 15506 } 15507 15508 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 15509 { 15510 struct intel_plane_state *old_plane_state, *new_plane_state; 15511 struct intel_plane *plane; 15512 int i; 15513 15514 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 15515 new_plane_state, i) 15516 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 15517 to_intel_frontbuffer(new_plane_state->hw.fb), 15518 plane->frontbuffer_bit); 15519 } 15520 15521 static void assert_global_state_locked(struct drm_i915_private *dev_priv) 15522 { 15523 struct intel_crtc *crtc; 15524 15525 for_each_intel_crtc(&dev_priv->drm, crtc) 15526 drm_modeset_lock_assert_held(&crtc->base.mutex); 15527 } 15528 15529 static int intel_atomic_commit(struct drm_device *dev, 15530 struct drm_atomic_state *_state, 15531 bool nonblock) 15532 { 15533 struct intel_atomic_state *state = to_intel_atomic_state(_state); 15534 struct drm_i915_private *dev_priv = to_i915(dev); 15535 int ret = 0; 15536 15537 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 15538 15539 drm_atomic_state_get(&state->base); 15540 i915_sw_fence_init(&state->commit_ready, 15541 intel_atomic_commit_ready); 15542 15543 /* 15544 * The intel_legacy_cursor_update() fast path takes care 15545 * of avoiding the vblank waits for simple cursor 15546 * movement and flips. For cursor on/off and size changes, 15547 * we want to perform the vblank waits so that watermark 15548 * updates happen during the correct frames. Gen9+ have 15549 * double buffered watermarks and so shouldn't need this. 15550 * 15551 * Unset state->legacy_cursor_update before the call to 15552 * drm_atomic_helper_setup_commit() because otherwise 15553 * drm_atomic_helper_wait_for_flip_done() is a noop and 15554 * we get FIFO underruns because we didn't wait 15555 * for vblank. 15556 * 15557 * FIXME doing watermarks and fb cleanup from a vblank worker 15558 * (assuming we had any) would solve these problems. 15559 */ 15560 if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) { 15561 struct intel_crtc_state *new_crtc_state; 15562 struct intel_crtc *crtc; 15563 int i; 15564 15565 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 15566 if (new_crtc_state->wm.need_postvbl_update || 15567 new_crtc_state->update_wm_post) 15568 state->base.legacy_cursor_update = false; 15569 } 15570 15571 ret = intel_atomic_prepare_commit(state); 15572 if (ret) { 15573 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); 15574 i915_sw_fence_commit(&state->commit_ready); 15575 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 15576 return ret; 15577 } 15578 15579 ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 15580 if (!ret) 15581 ret = drm_atomic_helper_swap_state(&state->base, true); 15582 15583 if (ret) { 15584 i915_sw_fence_commit(&state->commit_ready); 15585 15586 drm_atomic_helper_cleanup_planes(dev, &state->base); 15587 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 15588 return ret; 15589 } 15590 dev_priv->wm.distrust_bios_wm = false; 15591 intel_shared_dpll_swap_state(state); 15592 intel_atomic_track_fbs(state); 15593 15594 if (state->global_state_changed) { 15595 assert_global_state_locked(dev_priv); 15596 15597 memcpy(dev_priv->min_cdclk, state->min_cdclk, 15598 sizeof(state->min_cdclk)); 15599 memcpy(dev_priv->min_voltage_level, state->min_voltage_level, 15600 sizeof(state->min_voltage_level)); 15601 dev_priv->active_pipes = state->active_pipes; 15602 dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk; 15603 15604 intel_cdclk_swap_state(state); 15605 } 15606 15607 drm_atomic_state_get(&state->base); 15608 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 15609 15610 i915_sw_fence_commit(&state->commit_ready); 15611 if (nonblock && state->modeset) { 15612 queue_work(dev_priv->modeset_wq, &state->base.commit_work); 15613 } else if (nonblock) { 15614 queue_work(dev_priv->flip_wq, &state->base.commit_work); 15615 } else { 15616 if (state->modeset) 15617 flush_workqueue(dev_priv->modeset_wq); 15618 intel_atomic_commit_tail(state); 15619 } 15620 15621 return 0; 15622 } 15623 15624 struct wait_rps_boost { 15625 struct wait_queue_entry wait; 15626 15627 struct drm_crtc *crtc; 15628 struct i915_request *request; 15629 }; 15630 15631 static int do_rps_boost(struct wait_queue_entry *_wait, 15632 unsigned mode, int sync, void *key) 15633 { 15634 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); 15635 struct i915_request *rq = wait->request; 15636 15637 /* 15638 * If we missed the vblank, but the request is already running it 15639 * is reasonable to assume that it will complete before the next 15640 * vblank without our intervention, so leave RPS alone. 15641 */ 15642 if (!i915_request_started(rq)) 15643 intel_rps_boost(rq); 15644 i915_request_put(rq); 15645 15646 drm_crtc_vblank_put(wait->crtc); 15647 15648 list_del(&wait->wait.entry); 15649 kfree(wait); 15650 return 1; 15651 } 15652 15653 static void add_rps_boost_after_vblank(struct drm_crtc *crtc, 15654 struct dma_fence *fence) 15655 { 15656 struct wait_rps_boost *wait; 15657 15658 if (!dma_fence_is_i915(fence)) 15659 return; 15660 15661 if (INTEL_GEN(to_i915(crtc->dev)) < 6) 15662 return; 15663 15664 if (drm_crtc_vblank_get(crtc)) 15665 return; 15666 15667 wait = kmalloc(sizeof(*wait), GFP_KERNEL); 15668 if (!wait) { 15669 drm_crtc_vblank_put(crtc); 15670 return; 15671 } 15672 15673 wait->request = to_request(dma_fence_get(fence)); 15674 wait->crtc = crtc; 15675 15676 wait->wait.func = do_rps_boost; 15677 wait->wait.flags = 0; 15678 15679 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); 15680 } 15681 15682 static int intel_plane_pin_fb(struct intel_plane_state *plane_state) 15683 { 15684 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 15685 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 15686 struct drm_framebuffer *fb = plane_state->hw.fb; 15687 struct i915_vma *vma; 15688 15689 if (plane->id == PLANE_CURSOR && 15690 INTEL_INFO(dev_priv)->display.cursor_needs_physical) { 15691 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 15692 const int align = intel_cursor_alignment(dev_priv); 15693 int err; 15694 15695 err = i915_gem_object_attach_phys(obj, align); 15696 if (err) 15697 return err; 15698 } 15699 15700 vma = intel_pin_and_fence_fb_obj(fb, 15701 &plane_state->view, 15702 intel_plane_uses_fence(plane_state), 15703 &plane_state->flags); 15704 if (IS_ERR(vma)) 15705 return PTR_ERR(vma); 15706 15707 plane_state->vma = vma; 15708 15709 return 0; 15710 } 15711 15712 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) 15713 { 15714 struct i915_vma *vma; 15715 15716 vma = fetch_and_zero(&old_plane_state->vma); 15717 if (vma) 15718 intel_unpin_fb_vma(vma, old_plane_state->flags); 15719 } 15720 15721 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj) 15722 { 15723 struct i915_sched_attr attr = { 15724 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY), 15725 }; 15726 15727 i915_gem_object_wait_priority(obj, 0, &attr); 15728 } 15729 15730 /** 15731 * intel_prepare_plane_fb - Prepare fb for usage on plane 15732 * @plane: drm plane to prepare for 15733 * @_new_plane_state: the plane state being prepared 15734 * 15735 * Prepares a framebuffer for usage on a display plane. Generally this 15736 * involves pinning the underlying object and updating the frontbuffer tracking 15737 * bits. Some older platforms need special physical address handling for 15738 * cursor planes. 15739 * 15740 * Returns 0 on success, negative error code on failure. 15741 */ 15742 int 15743 intel_prepare_plane_fb(struct drm_plane *plane, 15744 struct drm_plane_state *_new_plane_state) 15745 { 15746 struct intel_plane_state *new_plane_state = 15747 to_intel_plane_state(_new_plane_state); 15748 struct intel_atomic_state *intel_state = 15749 to_intel_atomic_state(new_plane_state->uapi.state); 15750 struct drm_i915_private *dev_priv = to_i915(plane->dev); 15751 struct drm_framebuffer *fb = new_plane_state->hw.fb; 15752 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 15753 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); 15754 int ret; 15755 15756 if (old_obj) { 15757 struct intel_crtc_state *crtc_state = 15758 intel_atomic_get_new_crtc_state(intel_state, 15759 to_intel_crtc(plane->state->crtc)); 15760 15761 /* Big Hammer, we also need to ensure that any pending 15762 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 15763 * current scanout is retired before unpinning the old 15764 * framebuffer. Note that we rely on userspace rendering 15765 * into the buffer attached to the pipe they are waiting 15766 * on. If not, userspace generates a GPU hang with IPEHR 15767 * point to the MI_WAIT_FOR_EVENT. 15768 * 15769 * This should only fail upon a hung GPU, in which case we 15770 * can safely continue. 15771 */ 15772 if (needs_modeset(crtc_state)) { 15773 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, 15774 old_obj->base.resv, NULL, 15775 false, 0, 15776 GFP_KERNEL); 15777 if (ret < 0) 15778 return ret; 15779 } 15780 } 15781 15782 if (new_plane_state->uapi.fence) { /* explicit fencing */ 15783 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready, 15784 new_plane_state->uapi.fence, 15785 I915_FENCE_TIMEOUT, 15786 GFP_KERNEL); 15787 if (ret < 0) 15788 return ret; 15789 } 15790 15791 if (!obj) 15792 return 0; 15793 15794 ret = i915_gem_object_pin_pages(obj); 15795 if (ret) 15796 return ret; 15797 15798 ret = intel_plane_pin_fb(new_plane_state); 15799 15800 i915_gem_object_unpin_pages(obj); 15801 if (ret) 15802 return ret; 15803 15804 fb_obj_bump_render_priority(obj); 15805 i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB); 15806 15807 if (!new_plane_state->uapi.fence) { /* implicit fencing */ 15808 struct dma_fence *fence; 15809 15810 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, 15811 obj->base.resv, NULL, 15812 false, I915_FENCE_TIMEOUT, 15813 GFP_KERNEL); 15814 if (ret < 0) 15815 return ret; 15816 15817 fence = dma_resv_get_excl_rcu(obj->base.resv); 15818 if (fence) { 15819 add_rps_boost_after_vblank(new_plane_state->hw.crtc, 15820 fence); 15821 dma_fence_put(fence); 15822 } 15823 } else { 15824 add_rps_boost_after_vblank(new_plane_state->hw.crtc, 15825 new_plane_state->uapi.fence); 15826 } 15827 15828 /* 15829 * We declare pageflips to be interactive and so merit a small bias 15830 * towards upclocking to deliver the frame on time. By only changing 15831 * the RPS thresholds to sample more regularly and aim for higher 15832 * clocks we can hopefully deliver low power workloads (like kodi) 15833 * that are not quite steady state without resorting to forcing 15834 * maximum clocks following a vblank miss (see do_rps_boost()). 15835 */ 15836 if (!intel_state->rps_interactive) { 15837 intel_rps_mark_interactive(&dev_priv->gt.rps, true); 15838 intel_state->rps_interactive = true; 15839 } 15840 15841 return 0; 15842 } 15843 15844 /** 15845 * intel_cleanup_plane_fb - Cleans up an fb after plane use 15846 * @plane: drm plane to clean up for 15847 * @_old_plane_state: the state from the previous modeset 15848 * 15849 * Cleans up a framebuffer that has just been removed from a plane. 15850 */ 15851 void 15852 intel_cleanup_plane_fb(struct drm_plane *plane, 15853 struct drm_plane_state *_old_plane_state) 15854 { 15855 struct intel_plane_state *old_plane_state = 15856 to_intel_plane_state(_old_plane_state); 15857 struct intel_atomic_state *intel_state = 15858 to_intel_atomic_state(old_plane_state->uapi.state); 15859 struct drm_i915_private *dev_priv = to_i915(plane->dev); 15860 15861 if (intel_state->rps_interactive) { 15862 intel_rps_mark_interactive(&dev_priv->gt.rps, false); 15863 intel_state->rps_interactive = false; 15864 } 15865 15866 /* Should only be called after a successful intel_prepare_plane_fb()! */ 15867 intel_plane_unpin_fb(old_plane_state); 15868 } 15869 15870 /** 15871 * intel_plane_destroy - destroy a plane 15872 * @plane: plane to destroy 15873 * 15874 * Common destruction function for all types of planes (primary, cursor, 15875 * sprite). 15876 */ 15877 void intel_plane_destroy(struct drm_plane *plane) 15878 { 15879 drm_plane_cleanup(plane); 15880 kfree(to_intel_plane(plane)); 15881 } 15882 15883 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, 15884 u32 format, u64 modifier) 15885 { 15886 switch (modifier) { 15887 case DRM_FORMAT_MOD_LINEAR: 15888 case I915_FORMAT_MOD_X_TILED: 15889 break; 15890 default: 15891 return false; 15892 } 15893 15894 switch (format) { 15895 case DRM_FORMAT_C8: 15896 case DRM_FORMAT_RGB565: 15897 case DRM_FORMAT_XRGB1555: 15898 case DRM_FORMAT_XRGB8888: 15899 return modifier == DRM_FORMAT_MOD_LINEAR || 15900 modifier == I915_FORMAT_MOD_X_TILED; 15901 default: 15902 return false; 15903 } 15904 } 15905 15906 static bool i965_plane_format_mod_supported(struct drm_plane *_plane, 15907 u32 format, u64 modifier) 15908 { 15909 switch (modifier) { 15910 case DRM_FORMAT_MOD_LINEAR: 15911 case I915_FORMAT_MOD_X_TILED: 15912 break; 15913 default: 15914 return false; 15915 } 15916 15917 switch (format) { 15918 case DRM_FORMAT_C8: 15919 case DRM_FORMAT_RGB565: 15920 case DRM_FORMAT_XRGB8888: 15921 case DRM_FORMAT_XBGR8888: 15922 case DRM_FORMAT_ARGB8888: 15923 case DRM_FORMAT_ABGR8888: 15924 case DRM_FORMAT_XRGB2101010: 15925 case DRM_FORMAT_XBGR2101010: 15926 case DRM_FORMAT_ARGB2101010: 15927 case DRM_FORMAT_ABGR2101010: 15928 case DRM_FORMAT_XBGR16161616F: 15929 return modifier == DRM_FORMAT_MOD_LINEAR || 15930 modifier == I915_FORMAT_MOD_X_TILED; 15931 default: 15932 return false; 15933 } 15934 } 15935 15936 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, 15937 u32 format, u64 modifier) 15938 { 15939 return modifier == DRM_FORMAT_MOD_LINEAR && 15940 format == DRM_FORMAT_ARGB8888; 15941 } 15942 15943 static const struct drm_plane_funcs i965_plane_funcs = { 15944 .update_plane = drm_atomic_helper_update_plane, 15945 .disable_plane = drm_atomic_helper_disable_plane, 15946 .destroy = intel_plane_destroy, 15947 .atomic_duplicate_state = intel_plane_duplicate_state, 15948 .atomic_destroy_state = intel_plane_destroy_state, 15949 .format_mod_supported = i965_plane_format_mod_supported, 15950 }; 15951 15952 static const struct drm_plane_funcs i8xx_plane_funcs = { 15953 .update_plane = drm_atomic_helper_update_plane, 15954 .disable_plane = drm_atomic_helper_disable_plane, 15955 .destroy = intel_plane_destroy, 15956 .atomic_duplicate_state = intel_plane_duplicate_state, 15957 .atomic_destroy_state = intel_plane_destroy_state, 15958 .format_mod_supported = i8xx_plane_format_mod_supported, 15959 }; 15960 15961 static int 15962 intel_legacy_cursor_update(struct drm_plane *_plane, 15963 struct drm_crtc *_crtc, 15964 struct drm_framebuffer *fb, 15965 int crtc_x, int crtc_y, 15966 unsigned int crtc_w, unsigned int crtc_h, 15967 u32 src_x, u32 src_y, 15968 u32 src_w, u32 src_h, 15969 struct drm_modeset_acquire_ctx *ctx) 15970 { 15971 struct intel_plane *plane = to_intel_plane(_plane); 15972 struct intel_crtc *crtc = to_intel_crtc(_crtc); 15973 struct intel_plane_state *old_plane_state = 15974 to_intel_plane_state(plane->base.state); 15975 struct intel_plane_state *new_plane_state; 15976 struct intel_crtc_state *crtc_state = 15977 to_intel_crtc_state(crtc->base.state); 15978 struct intel_crtc_state *new_crtc_state; 15979 int ret; 15980 15981 /* 15982 * When crtc is inactive or there is a modeset pending, 15983 * wait for it to complete in the slowpath 15984 */ 15985 if (!crtc_state->hw.active || needs_modeset(crtc_state) || 15986 crtc_state->update_pipe) 15987 goto slow; 15988 15989 /* 15990 * Don't do an async update if there is an outstanding commit modifying 15991 * the plane. This prevents our async update's changes from getting 15992 * overridden by a previous synchronous update's state. 15993 */ 15994 if (old_plane_state->uapi.commit && 15995 !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done)) 15996 goto slow; 15997 15998 /* 15999 * If any parameters change that may affect watermarks, 16000 * take the slowpath. Only changing fb or position should be 16001 * in the fastpath. 16002 */ 16003 if (old_plane_state->uapi.crtc != &crtc->base || 16004 old_plane_state->uapi.src_w != src_w || 16005 old_plane_state->uapi.src_h != src_h || 16006 old_plane_state->uapi.crtc_w != crtc_w || 16007 old_plane_state->uapi.crtc_h != crtc_h || 16008 !old_plane_state->uapi.fb != !fb) 16009 goto slow; 16010 16011 new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base)); 16012 if (!new_plane_state) 16013 return -ENOMEM; 16014 16015 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base)); 16016 if (!new_crtc_state) { 16017 ret = -ENOMEM; 16018 goto out_free; 16019 } 16020 16021 drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb); 16022 16023 new_plane_state->uapi.src_x = src_x; 16024 new_plane_state->uapi.src_y = src_y; 16025 new_plane_state->uapi.src_w = src_w; 16026 new_plane_state->uapi.src_h = src_h; 16027 new_plane_state->uapi.crtc_x = crtc_x; 16028 new_plane_state->uapi.crtc_y = crtc_y; 16029 new_plane_state->uapi.crtc_w = crtc_w; 16030 new_plane_state->uapi.crtc_h = crtc_h; 16031 16032 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state, 16033 old_plane_state, new_plane_state); 16034 if (ret) 16035 goto out_free; 16036 16037 ret = intel_plane_pin_fb(new_plane_state); 16038 if (ret) 16039 goto out_free; 16040 16041 intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb), 16042 ORIGIN_FLIP); 16043 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 16044 to_intel_frontbuffer(new_plane_state->hw.fb), 16045 plane->frontbuffer_bit); 16046 16047 /* Swap plane state */ 16048 plane->base.state = &new_plane_state->uapi; 16049 16050 /* 16051 * We cannot swap crtc_state as it may be in use by an atomic commit or 16052 * page flip that's running simultaneously. If we swap crtc_state and 16053 * destroy the old state, we will cause a use-after-free there. 16054 * 16055 * Only update active_planes, which is needed for our internal 16056 * bookkeeping. Either value will do the right thing when updating 16057 * planes atomically. If the cursor was part of the atomic update then 16058 * we would have taken the slowpath. 16059 */ 16060 crtc_state->active_planes = new_crtc_state->active_planes; 16061 16062 if (new_plane_state->uapi.visible) 16063 intel_update_plane(plane, crtc_state, new_plane_state); 16064 else 16065 intel_disable_plane(plane, crtc_state); 16066 16067 intel_plane_unpin_fb(old_plane_state); 16068 16069 out_free: 16070 if (new_crtc_state) 16071 intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi); 16072 if (ret) 16073 intel_plane_destroy_state(&plane->base, &new_plane_state->uapi); 16074 else 16075 intel_plane_destroy_state(&plane->base, &old_plane_state->uapi); 16076 return ret; 16077 16078 slow: 16079 return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb, 16080 crtc_x, crtc_y, crtc_w, crtc_h, 16081 src_x, src_y, src_w, src_h, ctx); 16082 } 16083 16084 static const struct drm_plane_funcs intel_cursor_plane_funcs = { 16085 .update_plane = intel_legacy_cursor_update, 16086 .disable_plane = drm_atomic_helper_disable_plane, 16087 .destroy = intel_plane_destroy, 16088 .atomic_duplicate_state = intel_plane_duplicate_state, 16089 .atomic_destroy_state = intel_plane_destroy_state, 16090 .format_mod_supported = intel_cursor_format_mod_supported, 16091 }; 16092 16093 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, 16094 enum i9xx_plane_id i9xx_plane) 16095 { 16096 if (!HAS_FBC(dev_priv)) 16097 return false; 16098 16099 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 16100 return i9xx_plane == PLANE_A; /* tied to pipe A */ 16101 else if (IS_IVYBRIDGE(dev_priv)) 16102 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B || 16103 i9xx_plane == PLANE_C; 16104 else if (INTEL_GEN(dev_priv) >= 4) 16105 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B; 16106 else 16107 return i9xx_plane == PLANE_A; 16108 } 16109 16110 static struct intel_plane * 16111 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) 16112 { 16113 struct intel_plane *plane; 16114 const struct drm_plane_funcs *plane_funcs; 16115 unsigned int supported_rotations; 16116 unsigned int possible_crtcs; 16117 const u32 *formats; 16118 int num_formats; 16119 int ret, zpos; 16120 16121 if (INTEL_GEN(dev_priv) >= 9) 16122 return skl_universal_plane_create(dev_priv, pipe, 16123 PLANE_PRIMARY); 16124 16125 plane = intel_plane_alloc(); 16126 if (IS_ERR(plane)) 16127 return plane; 16128 16129 plane->pipe = pipe; 16130 /* 16131 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS 16132 * port is hooked to pipe B. Hence we want plane A feeding pipe B. 16133 */ 16134 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) 16135 plane->i9xx_plane = (enum i9xx_plane_id) !pipe; 16136 else 16137 plane->i9xx_plane = (enum i9xx_plane_id) pipe; 16138 plane->id = PLANE_PRIMARY; 16139 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); 16140 16141 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane); 16142 if (plane->has_fbc) { 16143 struct intel_fbc *fbc = &dev_priv->fbc; 16144 16145 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; 16146 } 16147 16148 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 16149 formats = vlv_primary_formats; 16150 num_formats = ARRAY_SIZE(vlv_primary_formats); 16151 } else if (INTEL_GEN(dev_priv) >= 4) { 16152 /* 16153 * WaFP16GammaEnabling:ivb 16154 * "Workaround : When using the 64-bit format, the plane 16155 * output on each color channel has one quarter amplitude. 16156 * It can be brought up to full amplitude by using pipe 16157 * gamma correction or pipe color space conversion to 16158 * multiply the plane output by four." 16159 * 16160 * There is no dedicated plane gamma for the primary plane, 16161 * and using the pipe gamma/csc could conflict with other 16162 * planes, so we choose not to expose fp16 on IVB primary 16163 * planes. HSW primary planes no longer have this problem. 16164 */ 16165 if (IS_IVYBRIDGE(dev_priv)) { 16166 formats = ivb_primary_formats; 16167 num_formats = ARRAY_SIZE(ivb_primary_formats); 16168 } else { 16169 formats = i965_primary_formats; 16170 num_formats = ARRAY_SIZE(i965_primary_formats); 16171 } 16172 } else { 16173 formats = i8xx_primary_formats; 16174 num_formats = ARRAY_SIZE(i8xx_primary_formats); 16175 } 16176 16177 if (INTEL_GEN(dev_priv) >= 4) 16178 plane_funcs = &i965_plane_funcs; 16179 else 16180 plane_funcs = &i8xx_plane_funcs; 16181 16182 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 16183 plane->min_cdclk = vlv_plane_min_cdclk; 16184 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 16185 plane->min_cdclk = hsw_plane_min_cdclk; 16186 else if (IS_IVYBRIDGE(dev_priv)) 16187 plane->min_cdclk = ivb_plane_min_cdclk; 16188 else 16189 plane->min_cdclk = i9xx_plane_min_cdclk; 16190 16191 plane->max_stride = i9xx_plane_max_stride; 16192 plane->update_plane = i9xx_update_plane; 16193 plane->disable_plane = i9xx_disable_plane; 16194 plane->get_hw_state = i9xx_plane_get_hw_state; 16195 plane->check_plane = i9xx_plane_check; 16196 16197 possible_crtcs = BIT(pipe); 16198 16199 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 16200 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 16201 possible_crtcs, plane_funcs, 16202 formats, num_formats, 16203 i9xx_format_modifiers, 16204 DRM_PLANE_TYPE_PRIMARY, 16205 "primary %c", pipe_name(pipe)); 16206 else 16207 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 16208 possible_crtcs, plane_funcs, 16209 formats, num_formats, 16210 i9xx_format_modifiers, 16211 DRM_PLANE_TYPE_PRIMARY, 16212 "plane %c", 16213 plane_name(plane->i9xx_plane)); 16214 if (ret) 16215 goto fail; 16216 16217 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 16218 supported_rotations = 16219 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | 16220 DRM_MODE_REFLECT_X; 16221 } else if (INTEL_GEN(dev_priv) >= 4) { 16222 supported_rotations = 16223 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; 16224 } else { 16225 supported_rotations = DRM_MODE_ROTATE_0; 16226 } 16227 16228 if (INTEL_GEN(dev_priv) >= 4) 16229 drm_plane_create_rotation_property(&plane->base, 16230 DRM_MODE_ROTATE_0, 16231 supported_rotations); 16232 16233 zpos = 0; 16234 drm_plane_create_zpos_immutable_property(&plane->base, zpos); 16235 16236 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); 16237 16238 return plane; 16239 16240 fail: 16241 intel_plane_free(plane); 16242 16243 return ERR_PTR(ret); 16244 } 16245 16246 static struct intel_plane * 16247 intel_cursor_plane_create(struct drm_i915_private *dev_priv, 16248 enum pipe pipe) 16249 { 16250 unsigned int possible_crtcs; 16251 struct intel_plane *cursor; 16252 int ret, zpos; 16253 16254 cursor = intel_plane_alloc(); 16255 if (IS_ERR(cursor)) 16256 return cursor; 16257 16258 cursor->pipe = pipe; 16259 cursor->i9xx_plane = (enum i9xx_plane_id) pipe; 16260 cursor->id = PLANE_CURSOR; 16261 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id); 16262 16263 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 16264 cursor->max_stride = i845_cursor_max_stride; 16265 cursor->update_plane = i845_update_cursor; 16266 cursor->disable_plane = i845_disable_cursor; 16267 cursor->get_hw_state = i845_cursor_get_hw_state; 16268 cursor->check_plane = i845_check_cursor; 16269 } else { 16270 cursor->max_stride = i9xx_cursor_max_stride; 16271 cursor->update_plane = i9xx_update_cursor; 16272 cursor->disable_plane = i9xx_disable_cursor; 16273 cursor->get_hw_state = i9xx_cursor_get_hw_state; 16274 cursor->check_plane = i9xx_check_cursor; 16275 } 16276 16277 cursor->cursor.base = ~0; 16278 cursor->cursor.cntl = ~0; 16279 16280 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) 16281 cursor->cursor.size = ~0; 16282 16283 possible_crtcs = BIT(pipe); 16284 16285 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 16286 possible_crtcs, &intel_cursor_plane_funcs, 16287 intel_cursor_formats, 16288 ARRAY_SIZE(intel_cursor_formats), 16289 cursor_format_modifiers, 16290 DRM_PLANE_TYPE_CURSOR, 16291 "cursor %c", pipe_name(pipe)); 16292 if (ret) 16293 goto fail; 16294 16295 if (INTEL_GEN(dev_priv) >= 4) 16296 drm_plane_create_rotation_property(&cursor->base, 16297 DRM_MODE_ROTATE_0, 16298 DRM_MODE_ROTATE_0 | 16299 DRM_MODE_ROTATE_180); 16300 16301 zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1; 16302 drm_plane_create_zpos_immutable_property(&cursor->base, zpos); 16303 16304 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 16305 16306 return cursor; 16307 16308 fail: 16309 intel_plane_free(cursor); 16310 16311 return ERR_PTR(ret); 16312 } 16313 16314 #define INTEL_CRTC_FUNCS \ 16315 .gamma_set = drm_atomic_helper_legacy_gamma_set, \ 16316 .set_config = drm_atomic_helper_set_config, \ 16317 .destroy = intel_crtc_destroy, \ 16318 .page_flip = drm_atomic_helper_page_flip, \ 16319 .atomic_duplicate_state = intel_crtc_duplicate_state, \ 16320 .atomic_destroy_state = intel_crtc_destroy_state, \ 16321 .set_crc_source = intel_crtc_set_crc_source, \ 16322 .verify_crc_source = intel_crtc_verify_crc_source, \ 16323 .get_crc_sources = intel_crtc_get_crc_sources 16324 16325 static const struct drm_crtc_funcs bdw_crtc_funcs = { 16326 INTEL_CRTC_FUNCS, 16327 16328 .get_vblank_counter = g4x_get_vblank_counter, 16329 .enable_vblank = bdw_enable_vblank, 16330 .disable_vblank = bdw_disable_vblank, 16331 }; 16332 16333 static const struct drm_crtc_funcs ilk_crtc_funcs = { 16334 INTEL_CRTC_FUNCS, 16335 16336 .get_vblank_counter = g4x_get_vblank_counter, 16337 .enable_vblank = ilk_enable_vblank, 16338 .disable_vblank = ilk_disable_vblank, 16339 }; 16340 16341 static const struct drm_crtc_funcs g4x_crtc_funcs = { 16342 INTEL_CRTC_FUNCS, 16343 16344 .get_vblank_counter = g4x_get_vblank_counter, 16345 .enable_vblank = i965_enable_vblank, 16346 .disable_vblank = i965_disable_vblank, 16347 }; 16348 16349 static const struct drm_crtc_funcs i965_crtc_funcs = { 16350 INTEL_CRTC_FUNCS, 16351 16352 .get_vblank_counter = i915_get_vblank_counter, 16353 .enable_vblank = i965_enable_vblank, 16354 .disable_vblank = i965_disable_vblank, 16355 }; 16356 16357 static const struct drm_crtc_funcs i915gm_crtc_funcs = { 16358 INTEL_CRTC_FUNCS, 16359 16360 .get_vblank_counter = i915_get_vblank_counter, 16361 .enable_vblank = i915gm_enable_vblank, 16362 .disable_vblank = i915gm_disable_vblank, 16363 }; 16364 16365 static const struct drm_crtc_funcs i915_crtc_funcs = { 16366 INTEL_CRTC_FUNCS, 16367 16368 .get_vblank_counter = i915_get_vblank_counter, 16369 .enable_vblank = i8xx_enable_vblank, 16370 .disable_vblank = i8xx_disable_vblank, 16371 }; 16372 16373 static const struct drm_crtc_funcs i8xx_crtc_funcs = { 16374 INTEL_CRTC_FUNCS, 16375 16376 /* no hw vblank counter */ 16377 .enable_vblank = i8xx_enable_vblank, 16378 .disable_vblank = i8xx_disable_vblank, 16379 }; 16380 16381 static struct intel_crtc *intel_crtc_alloc(void) 16382 { 16383 struct intel_crtc_state *crtc_state; 16384 struct intel_crtc *crtc; 16385 16386 crtc = kzalloc(sizeof(*crtc), GFP_KERNEL); 16387 if (!crtc) 16388 return ERR_PTR(-ENOMEM); 16389 16390 crtc_state = intel_crtc_state_alloc(crtc); 16391 if (!crtc_state) { 16392 kfree(crtc); 16393 return ERR_PTR(-ENOMEM); 16394 } 16395 16396 crtc->base.state = &crtc_state->uapi; 16397 crtc->config = crtc_state; 16398 16399 return crtc; 16400 } 16401 16402 static void intel_crtc_free(struct intel_crtc *crtc) 16403 { 16404 intel_crtc_destroy_state(&crtc->base, crtc->base.state); 16405 kfree(crtc); 16406 } 16407 16408 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) 16409 { 16410 struct intel_plane *primary, *cursor; 16411 const struct drm_crtc_funcs *funcs; 16412 struct intel_crtc *crtc; 16413 int sprite, ret; 16414 16415 crtc = intel_crtc_alloc(); 16416 if (IS_ERR(crtc)) 16417 return PTR_ERR(crtc); 16418 16419 crtc->pipe = pipe; 16420 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe]; 16421 16422 primary = intel_primary_plane_create(dev_priv, pipe); 16423 if (IS_ERR(primary)) { 16424 ret = PTR_ERR(primary); 16425 goto fail; 16426 } 16427 crtc->plane_ids_mask |= BIT(primary->id); 16428 16429 for_each_sprite(dev_priv, pipe, sprite) { 16430 struct intel_plane *plane; 16431 16432 plane = intel_sprite_plane_create(dev_priv, pipe, sprite); 16433 if (IS_ERR(plane)) { 16434 ret = PTR_ERR(plane); 16435 goto fail; 16436 } 16437 crtc->plane_ids_mask |= BIT(plane->id); 16438 } 16439 16440 cursor = intel_cursor_plane_create(dev_priv, pipe); 16441 if (IS_ERR(cursor)) { 16442 ret = PTR_ERR(cursor); 16443 goto fail; 16444 } 16445 crtc->plane_ids_mask |= BIT(cursor->id); 16446 16447 if (HAS_GMCH(dev_priv)) { 16448 if (IS_CHERRYVIEW(dev_priv) || 16449 IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv)) 16450 funcs = &g4x_crtc_funcs; 16451 else if (IS_GEN(dev_priv, 4)) 16452 funcs = &i965_crtc_funcs; 16453 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv)) 16454 funcs = &i915gm_crtc_funcs; 16455 else if (IS_GEN(dev_priv, 3)) 16456 funcs = &i915_crtc_funcs; 16457 else 16458 funcs = &i8xx_crtc_funcs; 16459 } else { 16460 if (INTEL_GEN(dev_priv) >= 8) 16461 funcs = &bdw_crtc_funcs; 16462 else 16463 funcs = &ilk_crtc_funcs; 16464 } 16465 16466 ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base, 16467 &primary->base, &cursor->base, 16468 funcs, "pipe %c", pipe_name(pipe)); 16469 if (ret) 16470 goto fail; 16471 16472 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) || 16473 dev_priv->pipe_to_crtc_mapping[pipe] != NULL); 16474 dev_priv->pipe_to_crtc_mapping[pipe] = crtc; 16475 16476 if (INTEL_GEN(dev_priv) < 9) { 16477 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane; 16478 16479 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 16480 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL); 16481 dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc; 16482 } 16483 16484 intel_color_init(crtc); 16485 16486 WARN_ON(drm_crtc_index(&crtc->base) != crtc->pipe); 16487 16488 return 0; 16489 16490 fail: 16491 intel_crtc_free(crtc); 16492 16493 return ret; 16494 } 16495 16496 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 16497 struct drm_file *file) 16498 { 16499 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 16500 struct drm_crtc *drmmode_crtc; 16501 struct intel_crtc *crtc; 16502 16503 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); 16504 if (!drmmode_crtc) 16505 return -ENOENT; 16506 16507 crtc = to_intel_crtc(drmmode_crtc); 16508 pipe_from_crtc_id->pipe = crtc->pipe; 16509 16510 return 0; 16511 } 16512 16513 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) 16514 { 16515 struct drm_device *dev = encoder->base.dev; 16516 struct intel_encoder *source_encoder; 16517 u32 possible_clones = 0; 16518 16519 for_each_intel_encoder(dev, source_encoder) { 16520 if (encoders_cloneable(encoder, source_encoder)) 16521 possible_clones |= drm_encoder_mask(&source_encoder->base); 16522 } 16523 16524 return possible_clones; 16525 } 16526 16527 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) 16528 { 16529 struct drm_device *dev = encoder->base.dev; 16530 struct intel_crtc *crtc; 16531 u32 possible_crtcs = 0; 16532 16533 for_each_intel_crtc(dev, crtc) { 16534 if (encoder->pipe_mask & BIT(crtc->pipe)) 16535 possible_crtcs |= drm_crtc_mask(&crtc->base); 16536 } 16537 16538 return possible_crtcs; 16539 } 16540 16541 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) 16542 { 16543 if (!IS_MOBILE(dev_priv)) 16544 return false; 16545 16546 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 16547 return false; 16548 16549 if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 16550 return false; 16551 16552 return true; 16553 } 16554 16555 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) 16556 { 16557 if (INTEL_GEN(dev_priv) >= 9) 16558 return false; 16559 16560 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) 16561 return false; 16562 16563 if (HAS_PCH_LPT_H(dev_priv) && 16564 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 16565 return false; 16566 16567 /* DDI E can't be used if DDI A requires 4 lanes */ 16568 if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 16569 return false; 16570 16571 if (!dev_priv->vbt.int_crt_support) 16572 return false; 16573 16574 return true; 16575 } 16576 16577 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) 16578 { 16579 int pps_num; 16580 int pps_idx; 16581 16582 if (HAS_DDI(dev_priv)) 16583 return; 16584 /* 16585 * This w/a is needed at least on CPT/PPT, but to be sure apply it 16586 * everywhere where registers can be write protected. 16587 */ 16588 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 16589 pps_num = 2; 16590 else 16591 pps_num = 1; 16592 16593 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { 16594 u32 val = I915_READ(PP_CONTROL(pps_idx)); 16595 16596 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS; 16597 I915_WRITE(PP_CONTROL(pps_idx), val); 16598 } 16599 } 16600 16601 static void intel_pps_init(struct drm_i915_private *dev_priv) 16602 { 16603 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv)) 16604 dev_priv->pps_mmio_base = PCH_PPS_BASE; 16605 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 16606 dev_priv->pps_mmio_base = VLV_PPS_BASE; 16607 else 16608 dev_priv->pps_mmio_base = PPS_BASE; 16609 16610 intel_pps_unlock_regs_wa(dev_priv); 16611 } 16612 16613 static void intel_setup_outputs(struct drm_i915_private *dev_priv) 16614 { 16615 struct intel_encoder *encoder; 16616 bool dpd_is_edp = false; 16617 16618 intel_pps_init(dev_priv); 16619 16620 if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) 16621 return; 16622 16623 if (INTEL_GEN(dev_priv) >= 12) { 16624 intel_ddi_init(dev_priv, PORT_A); 16625 intel_ddi_init(dev_priv, PORT_B); 16626 intel_ddi_init(dev_priv, PORT_D); 16627 intel_ddi_init(dev_priv, PORT_E); 16628 intel_ddi_init(dev_priv, PORT_F); 16629 intel_ddi_init(dev_priv, PORT_G); 16630 intel_ddi_init(dev_priv, PORT_H); 16631 intel_ddi_init(dev_priv, PORT_I); 16632 icl_dsi_init(dev_priv); 16633 } else if (IS_ELKHARTLAKE(dev_priv)) { 16634 intel_ddi_init(dev_priv, PORT_A); 16635 intel_ddi_init(dev_priv, PORT_B); 16636 intel_ddi_init(dev_priv, PORT_C); 16637 intel_ddi_init(dev_priv, PORT_D); 16638 icl_dsi_init(dev_priv); 16639 } else if (IS_GEN(dev_priv, 11)) { 16640 intel_ddi_init(dev_priv, PORT_A); 16641 intel_ddi_init(dev_priv, PORT_B); 16642 intel_ddi_init(dev_priv, PORT_C); 16643 intel_ddi_init(dev_priv, PORT_D); 16644 intel_ddi_init(dev_priv, PORT_E); 16645 /* 16646 * On some ICL SKUs port F is not present. No strap bits for 16647 * this, so rely on VBT. 16648 * Work around broken VBTs on SKUs known to have no port F. 16649 */ 16650 if (IS_ICL_WITH_PORT_F(dev_priv) && 16651 intel_bios_is_port_present(dev_priv, PORT_F)) 16652 intel_ddi_init(dev_priv, PORT_F); 16653 16654 icl_dsi_init(dev_priv); 16655 } else if (IS_GEN9_LP(dev_priv)) { 16656 /* 16657 * FIXME: Broxton doesn't support port detection via the 16658 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 16659 * detect the ports. 16660 */ 16661 intel_ddi_init(dev_priv, PORT_A); 16662 intel_ddi_init(dev_priv, PORT_B); 16663 intel_ddi_init(dev_priv, PORT_C); 16664 16665 vlv_dsi_init(dev_priv); 16666 } else if (HAS_DDI(dev_priv)) { 16667 int found; 16668 16669 if (intel_ddi_crt_present(dev_priv)) 16670 intel_crt_init(dev_priv); 16671 16672 /* 16673 * Haswell uses DDI functions to detect digital outputs. 16674 * On SKL pre-D0 the strap isn't connected, so we assume 16675 * it's there. 16676 */ 16677 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 16678 /* WaIgnoreDDIAStrap: skl */ 16679 if (found || IS_GEN9_BC(dev_priv)) 16680 intel_ddi_init(dev_priv, PORT_A); 16681 16682 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP 16683 * register */ 16684 found = I915_READ(SFUSE_STRAP); 16685 16686 if (found & SFUSE_STRAP_DDIB_DETECTED) 16687 intel_ddi_init(dev_priv, PORT_B); 16688 if (found & SFUSE_STRAP_DDIC_DETECTED) 16689 intel_ddi_init(dev_priv, PORT_C); 16690 if (found & SFUSE_STRAP_DDID_DETECTED) 16691 intel_ddi_init(dev_priv, PORT_D); 16692 if (found & SFUSE_STRAP_DDIF_DETECTED) 16693 intel_ddi_init(dev_priv, PORT_F); 16694 /* 16695 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 16696 */ 16697 if (IS_GEN9_BC(dev_priv) && 16698 intel_bios_is_port_present(dev_priv, PORT_E)) 16699 intel_ddi_init(dev_priv, PORT_E); 16700 16701 } else if (HAS_PCH_SPLIT(dev_priv)) { 16702 int found; 16703 16704 /* 16705 * intel_edp_init_connector() depends on this completing first, 16706 * to prevent the registration of both eDP and LVDS and the 16707 * incorrect sharing of the PPS. 16708 */ 16709 intel_lvds_init(dev_priv); 16710 intel_crt_init(dev_priv); 16711 16712 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); 16713 16714 if (ilk_has_edp_a(dev_priv)) 16715 intel_dp_init(dev_priv, DP_A, PORT_A); 16716 16717 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 16718 /* PCH SDVOB multiplex with HDMIB */ 16719 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 16720 if (!found) 16721 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 16722 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 16723 intel_dp_init(dev_priv, PCH_DP_B, PORT_B); 16724 } 16725 16726 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 16727 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 16728 16729 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 16730 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 16731 16732 if (I915_READ(PCH_DP_C) & DP_DETECTED) 16733 intel_dp_init(dev_priv, PCH_DP_C, PORT_C); 16734 16735 if (I915_READ(PCH_DP_D) & DP_DETECTED) 16736 intel_dp_init(dev_priv, PCH_DP_D, PORT_D); 16737 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 16738 bool has_edp, has_port; 16739 16740 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support) 16741 intel_crt_init(dev_priv); 16742 16743 /* 16744 * The DP_DETECTED bit is the latched state of the DDC 16745 * SDA pin at boot. However since eDP doesn't require DDC 16746 * (no way to plug in a DP->HDMI dongle) the DDC pins for 16747 * eDP ports may have been muxed to an alternate function. 16748 * Thus we can't rely on the DP_DETECTED bit alone to detect 16749 * eDP ports. Consult the VBT as well as DP_DETECTED to 16750 * detect eDP ports. 16751 * 16752 * Sadly the straps seem to be missing sometimes even for HDMI 16753 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 16754 * and VBT for the presence of the port. Additionally we can't 16755 * trust the port type the VBT declares as we've seen at least 16756 * HDMI ports that the VBT claim are DP or eDP. 16757 */ 16758 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); 16759 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 16760 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) 16761 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B); 16762 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 16763 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 16764 16765 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); 16766 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 16767 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) 16768 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C); 16769 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 16770 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 16771 16772 if (IS_CHERRYVIEW(dev_priv)) { 16773 /* 16774 * eDP not supported on port D, 16775 * so no need to worry about it 16776 */ 16777 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 16778 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) 16779 intel_dp_init(dev_priv, CHV_DP_D, PORT_D); 16780 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) 16781 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 16782 } 16783 16784 vlv_dsi_init(dev_priv); 16785 } else if (IS_PINEVIEW(dev_priv)) { 16786 intel_lvds_init(dev_priv); 16787 intel_crt_init(dev_priv); 16788 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) { 16789 bool found = false; 16790 16791 if (IS_MOBILE(dev_priv)) 16792 intel_lvds_init(dev_priv); 16793 16794 intel_crt_init(dev_priv); 16795 16796 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 16797 DRM_DEBUG_KMS("probing SDVOB\n"); 16798 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 16799 if (!found && IS_G4X(dev_priv)) { 16800 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 16801 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 16802 } 16803 16804 if (!found && IS_G4X(dev_priv)) 16805 intel_dp_init(dev_priv, DP_B, PORT_B); 16806 } 16807 16808 /* Before G4X SDVOC doesn't have its own detect register */ 16809 16810 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 16811 DRM_DEBUG_KMS("probing SDVOC\n"); 16812 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 16813 } 16814 16815 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 16816 16817 if (IS_G4X(dev_priv)) { 16818 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 16819 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 16820 } 16821 if (IS_G4X(dev_priv)) 16822 intel_dp_init(dev_priv, DP_C, PORT_C); 16823 } 16824 16825 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) 16826 intel_dp_init(dev_priv, DP_D, PORT_D); 16827 16828 if (SUPPORTS_TV(dev_priv)) 16829 intel_tv_init(dev_priv); 16830 } else if (IS_GEN(dev_priv, 2)) { 16831 if (IS_I85X(dev_priv)) 16832 intel_lvds_init(dev_priv); 16833 16834 intel_crt_init(dev_priv); 16835 intel_dvo_init(dev_priv); 16836 } 16837 16838 intel_psr_init(dev_priv); 16839 16840 for_each_intel_encoder(&dev_priv->drm, encoder) { 16841 encoder->base.possible_crtcs = 16842 intel_encoder_possible_crtcs(encoder); 16843 encoder->base.possible_clones = 16844 intel_encoder_possible_clones(encoder); 16845 } 16846 16847 intel_init_pch_refclk(dev_priv); 16848 16849 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 16850 } 16851 16852 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 16853 { 16854 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 16855 16856 drm_framebuffer_cleanup(fb); 16857 intel_frontbuffer_put(intel_fb->frontbuffer); 16858 16859 kfree(intel_fb); 16860 } 16861 16862 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 16863 struct drm_file *file, 16864 unsigned int *handle) 16865 { 16866 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 16867 16868 if (obj->userptr.mm) { 16869 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); 16870 return -EINVAL; 16871 } 16872 16873 return drm_gem_handle_create(file, &obj->base, handle); 16874 } 16875 16876 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, 16877 struct drm_file *file, 16878 unsigned flags, unsigned color, 16879 struct drm_clip_rect *clips, 16880 unsigned num_clips) 16881 { 16882 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 16883 16884 i915_gem_object_flush_if_display(obj); 16885 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); 16886 16887 return 0; 16888 } 16889 16890 static const struct drm_framebuffer_funcs intel_fb_funcs = { 16891 .destroy = intel_user_framebuffer_destroy, 16892 .create_handle = intel_user_framebuffer_create_handle, 16893 .dirty = intel_user_framebuffer_dirty, 16894 }; 16895 16896 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, 16897 struct drm_i915_gem_object *obj, 16898 struct drm_mode_fb_cmd2 *mode_cmd) 16899 { 16900 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 16901 struct drm_framebuffer *fb = &intel_fb->base; 16902 u32 max_stride; 16903 unsigned int tiling, stride; 16904 int ret = -EINVAL; 16905 int i; 16906 16907 intel_fb->frontbuffer = intel_frontbuffer_get(obj); 16908 if (!intel_fb->frontbuffer) 16909 return -ENOMEM; 16910 16911 i915_gem_object_lock(obj); 16912 tiling = i915_gem_object_get_tiling(obj); 16913 stride = i915_gem_object_get_stride(obj); 16914 i915_gem_object_unlock(obj); 16915 16916 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 16917 /* 16918 * If there's a fence, enforce that 16919 * the fb modifier and tiling mode match. 16920 */ 16921 if (tiling != I915_TILING_NONE && 16922 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 16923 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n"); 16924 goto err; 16925 } 16926 } else { 16927 if (tiling == I915_TILING_X) { 16928 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 16929 } else if (tiling == I915_TILING_Y) { 16930 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n"); 16931 goto err; 16932 } 16933 } 16934 16935 if (!drm_any_plane_has_format(&dev_priv->drm, 16936 mode_cmd->pixel_format, 16937 mode_cmd->modifier[0])) { 16938 struct drm_format_name_buf format_name; 16939 16940 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n", 16941 drm_get_format_name(mode_cmd->pixel_format, 16942 &format_name), 16943 mode_cmd->modifier[0]); 16944 goto err; 16945 } 16946 16947 /* 16948 * gen2/3 display engine uses the fence if present, 16949 * so the tiling mode must match the fb modifier exactly. 16950 */ 16951 if (INTEL_GEN(dev_priv) < 4 && 16952 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 16953 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n"); 16954 goto err; 16955 } 16956 16957 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format, 16958 mode_cmd->modifier[0]); 16959 if (mode_cmd->pitches[0] > max_stride) { 16960 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n", 16961 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? 16962 "tiled" : "linear", 16963 mode_cmd->pitches[0], max_stride); 16964 goto err; 16965 } 16966 16967 /* 16968 * If there's a fence, enforce that 16969 * the fb pitch and fence stride match. 16970 */ 16971 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) { 16972 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n", 16973 mode_cmd->pitches[0], stride); 16974 goto err; 16975 } 16976 16977 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 16978 if (mode_cmd->offsets[0] != 0) { 16979 DRM_DEBUG_KMS("plane 0 offset (0x%08x) must be 0\n", 16980 mode_cmd->offsets[0]); 16981 goto err; 16982 } 16983 16984 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd); 16985 16986 for (i = 0; i < fb->format->num_planes; i++) { 16987 u32 stride_alignment; 16988 16989 if (mode_cmd->handles[i] != mode_cmd->handles[0]) { 16990 DRM_DEBUG_KMS("bad plane %d handle\n", i); 16991 goto err; 16992 } 16993 16994 stride_alignment = intel_fb_stride_alignment(fb, i); 16995 if (fb->pitches[i] & (stride_alignment - 1)) { 16996 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n", 16997 i, fb->pitches[i], stride_alignment); 16998 goto err; 16999 } 17000 17001 if (is_gen12_ccs_plane(fb, i)) { 17002 int ccs_aux_stride = gen12_ccs_aux_stride(fb, i); 17003 17004 if (fb->pitches[i] != ccs_aux_stride) { 17005 DRM_DEBUG_KMS("ccs aux plane %d pitch (%d) must be %d\n", 17006 i, 17007 fb->pitches[i], ccs_aux_stride); 17008 goto err; 17009 } 17010 } 17011 17012 fb->obj[i] = &obj->base; 17013 } 17014 17015 ret = intel_fill_fb_info(dev_priv, fb); 17016 if (ret) 17017 goto err; 17018 17019 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs); 17020 if (ret) { 17021 DRM_ERROR("framebuffer init failed %d\n", ret); 17022 goto err; 17023 } 17024 17025 return 0; 17026 17027 err: 17028 intel_frontbuffer_put(intel_fb->frontbuffer); 17029 return ret; 17030 } 17031 17032 static struct drm_framebuffer * 17033 intel_user_framebuffer_create(struct drm_device *dev, 17034 struct drm_file *filp, 17035 const struct drm_mode_fb_cmd2 *user_mode_cmd) 17036 { 17037 struct drm_framebuffer *fb; 17038 struct drm_i915_gem_object *obj; 17039 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 17040 17041 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); 17042 if (!obj) 17043 return ERR_PTR(-ENOENT); 17044 17045 fb = intel_framebuffer_create(obj, &mode_cmd); 17046 i915_gem_object_put(obj); 17047 17048 return fb; 17049 } 17050 17051 static void intel_atomic_state_free(struct drm_atomic_state *state) 17052 { 17053 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 17054 17055 drm_atomic_state_default_release(state); 17056 17057 i915_sw_fence_fini(&intel_state->commit_ready); 17058 17059 kfree(state); 17060 } 17061 17062 static enum drm_mode_status 17063 intel_mode_valid(struct drm_device *dev, 17064 const struct drm_display_mode *mode) 17065 { 17066 struct drm_i915_private *dev_priv = to_i915(dev); 17067 int hdisplay_max, htotal_max; 17068 int vdisplay_max, vtotal_max; 17069 17070 /* 17071 * Can't reject DBLSCAN here because Xorg ddxen can add piles 17072 * of DBLSCAN modes to the output's mode list when they detect 17073 * the scaling mode property on the connector. And they don't 17074 * ask the kernel to validate those modes in any way until 17075 * modeset time at which point the client gets a protocol error. 17076 * So in order to not upset those clients we silently ignore the 17077 * DBLSCAN flag on such connectors. For other connectors we will 17078 * reject modes with the DBLSCAN flag in encoder->compute_config(). 17079 * And we always reject DBLSCAN modes in connector->mode_valid() 17080 * as we never want such modes on the connector's mode list. 17081 */ 17082 17083 if (mode->vscan > 1) 17084 return MODE_NO_VSCAN; 17085 17086 if (mode->flags & DRM_MODE_FLAG_HSKEW) 17087 return MODE_H_ILLEGAL; 17088 17089 if (mode->flags & (DRM_MODE_FLAG_CSYNC | 17090 DRM_MODE_FLAG_NCSYNC | 17091 DRM_MODE_FLAG_PCSYNC)) 17092 return MODE_HSYNC; 17093 17094 if (mode->flags & (DRM_MODE_FLAG_BCAST | 17095 DRM_MODE_FLAG_PIXMUX | 17096 DRM_MODE_FLAG_CLKDIV2)) 17097 return MODE_BAD; 17098 17099 /* Transcoder timing limits */ 17100 if (INTEL_GEN(dev_priv) >= 11) { 17101 hdisplay_max = 16384; 17102 vdisplay_max = 8192; 17103 htotal_max = 16384; 17104 vtotal_max = 8192; 17105 } else if (INTEL_GEN(dev_priv) >= 9 || 17106 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 17107 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 17108 vdisplay_max = 4096; 17109 htotal_max = 8192; 17110 vtotal_max = 8192; 17111 } else if (INTEL_GEN(dev_priv) >= 3) { 17112 hdisplay_max = 4096; 17113 vdisplay_max = 4096; 17114 htotal_max = 8192; 17115 vtotal_max = 8192; 17116 } else { 17117 hdisplay_max = 2048; 17118 vdisplay_max = 2048; 17119 htotal_max = 4096; 17120 vtotal_max = 4096; 17121 } 17122 17123 if (mode->hdisplay > hdisplay_max || 17124 mode->hsync_start > htotal_max || 17125 mode->hsync_end > htotal_max || 17126 mode->htotal > htotal_max) 17127 return MODE_H_ILLEGAL; 17128 17129 if (mode->vdisplay > vdisplay_max || 17130 mode->vsync_start > vtotal_max || 17131 mode->vsync_end > vtotal_max || 17132 mode->vtotal > vtotal_max) 17133 return MODE_V_ILLEGAL; 17134 17135 if (INTEL_GEN(dev_priv) >= 5) { 17136 if (mode->hdisplay < 64 || 17137 mode->htotal - mode->hdisplay < 32) 17138 return MODE_H_ILLEGAL; 17139 17140 if (mode->vtotal - mode->vdisplay < 5) 17141 return MODE_V_ILLEGAL; 17142 } else { 17143 if (mode->htotal - mode->hdisplay < 32) 17144 return MODE_H_ILLEGAL; 17145 17146 if (mode->vtotal - mode->vdisplay < 3) 17147 return MODE_V_ILLEGAL; 17148 } 17149 17150 return MODE_OK; 17151 } 17152 17153 enum drm_mode_status 17154 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, 17155 const struct drm_display_mode *mode) 17156 { 17157 int plane_width_max, plane_height_max; 17158 17159 /* 17160 * intel_mode_valid() should be 17161 * sufficient on older platforms. 17162 */ 17163 if (INTEL_GEN(dev_priv) < 9) 17164 return MODE_OK; 17165 17166 /* 17167 * Most people will probably want a fullscreen 17168 * plane so let's not advertize modes that are 17169 * too big for that. 17170 */ 17171 if (INTEL_GEN(dev_priv) >= 11) { 17172 plane_width_max = 5120; 17173 plane_height_max = 4320; 17174 } else { 17175 plane_width_max = 5120; 17176 plane_height_max = 4096; 17177 } 17178 17179 if (mode->hdisplay > plane_width_max) 17180 return MODE_H_ILLEGAL; 17181 17182 if (mode->vdisplay > plane_height_max) 17183 return MODE_V_ILLEGAL; 17184 17185 return MODE_OK; 17186 } 17187 17188 static const struct drm_mode_config_funcs intel_mode_funcs = { 17189 .fb_create = intel_user_framebuffer_create, 17190 .get_format_info = intel_get_format_info, 17191 .output_poll_changed = intel_fbdev_output_poll_changed, 17192 .mode_valid = intel_mode_valid, 17193 .atomic_check = intel_atomic_check, 17194 .atomic_commit = intel_atomic_commit, 17195 .atomic_state_alloc = intel_atomic_state_alloc, 17196 .atomic_state_clear = intel_atomic_state_clear, 17197 .atomic_state_free = intel_atomic_state_free, 17198 }; 17199 17200 /** 17201 * intel_init_display_hooks - initialize the display modesetting hooks 17202 * @dev_priv: device private 17203 */ 17204 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 17205 { 17206 intel_init_cdclk_hooks(dev_priv); 17207 17208 if (INTEL_GEN(dev_priv) >= 9) { 17209 dev_priv->display.get_pipe_config = hsw_get_pipe_config; 17210 dev_priv->display.get_initial_plane_config = 17211 skl_get_initial_plane_config; 17212 dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock; 17213 dev_priv->display.crtc_enable = hsw_crtc_enable; 17214 dev_priv->display.crtc_disable = hsw_crtc_disable; 17215 } else if (HAS_DDI(dev_priv)) { 17216 dev_priv->display.get_pipe_config = hsw_get_pipe_config; 17217 dev_priv->display.get_initial_plane_config = 17218 i9xx_get_initial_plane_config; 17219 dev_priv->display.crtc_compute_clock = 17220 hsw_crtc_compute_clock; 17221 dev_priv->display.crtc_enable = hsw_crtc_enable; 17222 dev_priv->display.crtc_disable = hsw_crtc_disable; 17223 } else if (HAS_PCH_SPLIT(dev_priv)) { 17224 dev_priv->display.get_pipe_config = ilk_get_pipe_config; 17225 dev_priv->display.get_initial_plane_config = 17226 i9xx_get_initial_plane_config; 17227 dev_priv->display.crtc_compute_clock = 17228 ilk_crtc_compute_clock; 17229 dev_priv->display.crtc_enable = ilk_crtc_enable; 17230 dev_priv->display.crtc_disable = ilk_crtc_disable; 17231 } else if (IS_CHERRYVIEW(dev_priv)) { 17232 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17233 dev_priv->display.get_initial_plane_config = 17234 i9xx_get_initial_plane_config; 17235 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock; 17236 dev_priv->display.crtc_enable = valleyview_crtc_enable; 17237 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17238 } else if (IS_VALLEYVIEW(dev_priv)) { 17239 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17240 dev_priv->display.get_initial_plane_config = 17241 i9xx_get_initial_plane_config; 17242 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock; 17243 dev_priv->display.crtc_enable = valleyview_crtc_enable; 17244 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17245 } else if (IS_G4X(dev_priv)) { 17246 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17247 dev_priv->display.get_initial_plane_config = 17248 i9xx_get_initial_plane_config; 17249 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock; 17250 dev_priv->display.crtc_enable = i9xx_crtc_enable; 17251 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17252 } else if (IS_PINEVIEW(dev_priv)) { 17253 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17254 dev_priv->display.get_initial_plane_config = 17255 i9xx_get_initial_plane_config; 17256 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock; 17257 dev_priv->display.crtc_enable = i9xx_crtc_enable; 17258 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17259 } else if (!IS_GEN(dev_priv, 2)) { 17260 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17261 dev_priv->display.get_initial_plane_config = 17262 i9xx_get_initial_plane_config; 17263 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 17264 dev_priv->display.crtc_enable = i9xx_crtc_enable; 17265 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17266 } else { 17267 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17268 dev_priv->display.get_initial_plane_config = 17269 i9xx_get_initial_plane_config; 17270 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock; 17271 dev_priv->display.crtc_enable = i9xx_crtc_enable; 17272 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17273 } 17274 17275 if (IS_GEN(dev_priv, 5)) { 17276 dev_priv->display.fdi_link_train = ilk_fdi_link_train; 17277 } else if (IS_GEN(dev_priv, 6)) { 17278 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 17279 } else if (IS_IVYBRIDGE(dev_priv)) { 17280 /* FIXME: detect B0+ stepping and use auto training */ 17281 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 17282 } 17283 17284 if (INTEL_GEN(dev_priv) >= 9) 17285 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables; 17286 else 17287 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables; 17288 17289 } 17290 17291 void intel_modeset_init_hw(struct drm_i915_private *i915) 17292 { 17293 intel_update_cdclk(i915); 17294 intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK"); 17295 i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw; 17296 } 17297 17298 /* 17299 * Calculate what we think the watermarks should be for the state we've read 17300 * out of the hardware and then immediately program those watermarks so that 17301 * we ensure the hardware settings match our internal state. 17302 * 17303 * We can calculate what we think WM's should be by creating a duplicate of the 17304 * current state (which was constructed during hardware readout) and running it 17305 * through the atomic check code to calculate new watermark values in the 17306 * state object. 17307 */ 17308 static void sanitize_watermarks(struct drm_device *dev) 17309 { 17310 struct drm_i915_private *dev_priv = to_i915(dev); 17311 struct drm_atomic_state *state; 17312 struct intel_atomic_state *intel_state; 17313 struct intel_crtc *crtc; 17314 struct intel_crtc_state *crtc_state; 17315 struct drm_modeset_acquire_ctx ctx; 17316 int ret; 17317 int i; 17318 17319 /* Only supported on platforms that use atomic watermark design */ 17320 if (!dev_priv->display.optimize_watermarks) 17321 return; 17322 17323 /* 17324 * We need to hold connection_mutex before calling duplicate_state so 17325 * that the connector loop is protected. 17326 */ 17327 drm_modeset_acquire_init(&ctx, 0); 17328 retry: 17329 ret = drm_modeset_lock_all_ctx(dev, &ctx); 17330 if (ret == -EDEADLK) { 17331 drm_modeset_backoff(&ctx); 17332 goto retry; 17333 } else if (WARN_ON(ret)) { 17334 goto fail; 17335 } 17336 17337 state = drm_atomic_helper_duplicate_state(dev, &ctx); 17338 if (WARN_ON(IS_ERR(state))) 17339 goto fail; 17340 17341 intel_state = to_intel_atomic_state(state); 17342 17343 /* 17344 * Hardware readout is the only time we don't want to calculate 17345 * intermediate watermarks (since we don't trust the current 17346 * watermarks). 17347 */ 17348 if (!HAS_GMCH(dev_priv)) 17349 intel_state->skip_intermediate_wm = true; 17350 17351 ret = intel_atomic_check(dev, state); 17352 if (ret) { 17353 /* 17354 * If we fail here, it means that the hardware appears to be 17355 * programmed in a way that shouldn't be possible, given our 17356 * understanding of watermark requirements. This might mean a 17357 * mistake in the hardware readout code or a mistake in the 17358 * watermark calculations for a given platform. Raise a WARN 17359 * so that this is noticeable. 17360 * 17361 * If this actually happens, we'll have to just leave the 17362 * BIOS-programmed watermarks untouched and hope for the best. 17363 */ 17364 WARN(true, "Could not determine valid watermarks for inherited state\n"); 17365 goto put_state; 17366 } 17367 17368 /* Write calculated watermark values back */ 17369 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { 17370 crtc_state->wm.need_postvbl_update = true; 17371 dev_priv->display.optimize_watermarks(intel_state, crtc); 17372 17373 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; 17374 } 17375 17376 put_state: 17377 drm_atomic_state_put(state); 17378 fail: 17379 drm_modeset_drop_locks(&ctx); 17380 drm_modeset_acquire_fini(&ctx); 17381 } 17382 17383 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv) 17384 { 17385 if (IS_GEN(dev_priv, 5)) { 17386 u32 fdi_pll_clk = 17387 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; 17388 17389 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000; 17390 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) { 17391 dev_priv->fdi_pll_freq = 270000; 17392 } else { 17393 return; 17394 } 17395 17396 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq); 17397 } 17398 17399 static int intel_initial_commit(struct drm_device *dev) 17400 { 17401 struct drm_atomic_state *state = NULL; 17402 struct drm_modeset_acquire_ctx ctx; 17403 struct intel_crtc *crtc; 17404 int ret = 0; 17405 17406 state = drm_atomic_state_alloc(dev); 17407 if (!state) 17408 return -ENOMEM; 17409 17410 drm_modeset_acquire_init(&ctx, 0); 17411 17412 retry: 17413 state->acquire_ctx = &ctx; 17414 17415 for_each_intel_crtc(dev, crtc) { 17416 struct intel_crtc_state *crtc_state = 17417 intel_atomic_get_crtc_state(state, crtc); 17418 17419 if (IS_ERR(crtc_state)) { 17420 ret = PTR_ERR(crtc_state); 17421 goto out; 17422 } 17423 17424 if (crtc_state->hw.active) { 17425 ret = drm_atomic_add_affected_planes(state, &crtc->base); 17426 if (ret) 17427 goto out; 17428 17429 /* 17430 * FIXME hack to force a LUT update to avoid the 17431 * plane update forcing the pipe gamma on without 17432 * having a proper LUT loaded. Remove once we 17433 * have readout for pipe gamma enable. 17434 */ 17435 crtc_state->uapi.color_mgmt_changed = true; 17436 17437 /* 17438 * FIXME hack to force full modeset when DSC is being 17439 * used. 17440 * 17441 * As long as we do not have full state readout and 17442 * config comparison of crtc_state->dsc, we have no way 17443 * to ensure reliable fastset. Remove once we have 17444 * readout for DSC. 17445 */ 17446 if (crtc_state->dsc.compression_enable) { 17447 ret = drm_atomic_add_affected_connectors(state, 17448 &crtc->base); 17449 if (ret) 17450 goto out; 17451 crtc_state->uapi.mode_changed = true; 17452 drm_dbg_kms(dev, "Force full modeset for DSC\n"); 17453 } 17454 } 17455 } 17456 17457 ret = drm_atomic_commit(state); 17458 17459 out: 17460 if (ret == -EDEADLK) { 17461 drm_atomic_state_clear(state); 17462 drm_modeset_backoff(&ctx); 17463 goto retry; 17464 } 17465 17466 drm_atomic_state_put(state); 17467 17468 drm_modeset_drop_locks(&ctx); 17469 drm_modeset_acquire_fini(&ctx); 17470 17471 return ret; 17472 } 17473 17474 static void intel_mode_config_init(struct drm_i915_private *i915) 17475 { 17476 struct drm_mode_config *mode_config = &i915->drm.mode_config; 17477 17478 drm_mode_config_init(&i915->drm); 17479 17480 mode_config->min_width = 0; 17481 mode_config->min_height = 0; 17482 17483 mode_config->preferred_depth = 24; 17484 mode_config->prefer_shadow = 1; 17485 17486 mode_config->allow_fb_modifiers = true; 17487 17488 mode_config->funcs = &intel_mode_funcs; 17489 17490 /* 17491 * Maximum framebuffer dimensions, chosen to match 17492 * the maximum render engine surface size on gen4+. 17493 */ 17494 if (INTEL_GEN(i915) >= 7) { 17495 mode_config->max_width = 16384; 17496 mode_config->max_height = 16384; 17497 } else if (INTEL_GEN(i915) >= 4) { 17498 mode_config->max_width = 8192; 17499 mode_config->max_height = 8192; 17500 } else if (IS_GEN(i915, 3)) { 17501 mode_config->max_width = 4096; 17502 mode_config->max_height = 4096; 17503 } else { 17504 mode_config->max_width = 2048; 17505 mode_config->max_height = 2048; 17506 } 17507 17508 if (IS_I845G(i915) || IS_I865G(i915)) { 17509 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512; 17510 mode_config->cursor_height = 1023; 17511 } else if (IS_GEN(i915, 2)) { 17512 mode_config->cursor_width = 64; 17513 mode_config->cursor_height = 64; 17514 } else { 17515 mode_config->cursor_width = 256; 17516 mode_config->cursor_height = 256; 17517 } 17518 } 17519 17520 int intel_modeset_init(struct drm_i915_private *i915) 17521 { 17522 struct drm_device *dev = &i915->drm; 17523 enum pipe pipe; 17524 struct intel_crtc *crtc; 17525 int ret; 17526 17527 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); 17528 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI | 17529 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); 17530 17531 intel_mode_config_init(i915); 17532 17533 ret = intel_bw_init(i915); 17534 if (ret) 17535 return ret; 17536 17537 init_llist_head(&i915->atomic_helper.free_list); 17538 INIT_WORK(&i915->atomic_helper.free_work, 17539 intel_atomic_helper_free_state_worker); 17540 17541 intel_init_quirks(i915); 17542 17543 intel_fbc_init(i915); 17544 17545 intel_init_pm(i915); 17546 17547 intel_panel_sanitize_ssc(i915); 17548 17549 intel_gmbus_setup(i915); 17550 17551 DRM_DEBUG_KMS("%d display pipe%s available.\n", 17552 INTEL_NUM_PIPES(i915), 17553 INTEL_NUM_PIPES(i915) > 1 ? "s" : ""); 17554 17555 if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) { 17556 for_each_pipe(i915, pipe) { 17557 ret = intel_crtc_init(i915, pipe); 17558 if (ret) { 17559 drm_mode_config_cleanup(dev); 17560 return ret; 17561 } 17562 } 17563 } 17564 17565 intel_shared_dpll_init(dev); 17566 intel_update_fdi_pll_freq(i915); 17567 17568 intel_update_czclk(i915); 17569 intel_modeset_init_hw(i915); 17570 17571 intel_hdcp_component_init(i915); 17572 17573 if (i915->max_cdclk_freq == 0) 17574 intel_update_max_cdclk(i915); 17575 17576 /* Just disable it once at startup */ 17577 intel_vga_disable(i915); 17578 intel_setup_outputs(i915); 17579 17580 drm_modeset_lock_all(dev); 17581 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); 17582 drm_modeset_unlock_all(dev); 17583 17584 for_each_intel_crtc(dev, crtc) { 17585 struct intel_initial_plane_config plane_config = {}; 17586 17587 if (!crtc->active) 17588 continue; 17589 17590 /* 17591 * Note that reserving the BIOS fb up front prevents us 17592 * from stuffing other stolen allocations like the ring 17593 * on top. This prevents some ugliness at boot time, and 17594 * can even allow for smooth boot transitions if the BIOS 17595 * fb is large enough for the active pipe configuration. 17596 */ 17597 i915->display.get_initial_plane_config(crtc, &plane_config); 17598 17599 /* 17600 * If the fb is shared between multiple heads, we'll 17601 * just get the first one. 17602 */ 17603 intel_find_initial_plane_obj(crtc, &plane_config); 17604 } 17605 17606 /* 17607 * Make sure hardware watermarks really match the state we read out. 17608 * Note that we need to do this after reconstructing the BIOS fb's 17609 * since the watermark calculation done here will use pstate->fb. 17610 */ 17611 if (!HAS_GMCH(i915)) 17612 sanitize_watermarks(dev); 17613 17614 /* 17615 * Force all active planes to recompute their states. So that on 17616 * mode_setcrtc after probe, all the intel_plane_state variables 17617 * are already calculated and there is no assert_plane warnings 17618 * during bootup. 17619 */ 17620 ret = intel_initial_commit(dev); 17621 if (ret) 17622 DRM_DEBUG_KMS("Initial commit in probe failed.\n"); 17623 17624 return 0; 17625 } 17626 17627 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 17628 { 17629 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17630 /* 640x480@60Hz, ~25175 kHz */ 17631 struct dpll clock = { 17632 .m1 = 18, 17633 .m2 = 7, 17634 .p1 = 13, 17635 .p2 = 4, 17636 .n = 2, 17637 }; 17638 u32 dpll, fp; 17639 int i; 17640 17641 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154); 17642 17643 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 17644 pipe_name(pipe), clock.vco, clock.dot); 17645 17646 fp = i9xx_dpll_compute_fp(&clock); 17647 dpll = DPLL_DVO_2X_MODE | 17648 DPLL_VGA_MODE_DIS | 17649 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 17650 PLL_P2_DIVIDE_BY_4 | 17651 PLL_REF_INPUT_DREFCLK | 17652 DPLL_VCO_ENABLE; 17653 17654 I915_WRITE(FP0(pipe), fp); 17655 I915_WRITE(FP1(pipe), fp); 17656 17657 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); 17658 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); 17659 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); 17660 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16)); 17661 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16)); 17662 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); 17663 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); 17664 17665 /* 17666 * Apparently we need to have VGA mode enabled prior to changing 17667 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 17668 * dividers, even though the register value does change. 17669 */ 17670 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); 17671 I915_WRITE(DPLL(pipe), dpll); 17672 17673 /* Wait for the clocks to stabilize. */ 17674 POSTING_READ(DPLL(pipe)); 17675 udelay(150); 17676 17677 /* The pixel multiplier can only be updated once the 17678 * DPLL is enabled and the clocks are stable. 17679 * 17680 * So write it again. 17681 */ 17682 I915_WRITE(DPLL(pipe), dpll); 17683 17684 /* We do this three times for luck */ 17685 for (i = 0; i < 3 ; i++) { 17686 I915_WRITE(DPLL(pipe), dpll); 17687 POSTING_READ(DPLL(pipe)); 17688 udelay(150); /* wait for warmup */ 17689 } 17690 17691 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE); 17692 POSTING_READ(PIPECONF(pipe)); 17693 17694 intel_wait_for_pipe_scanline_moving(crtc); 17695 } 17696 17697 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 17698 { 17699 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17700 17701 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n", 17702 pipe_name(pipe)); 17703 17704 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE); 17705 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE); 17706 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE); 17707 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE); 17708 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE); 17709 17710 I915_WRITE(PIPECONF(pipe), 0); 17711 POSTING_READ(PIPECONF(pipe)); 17712 17713 intel_wait_for_pipe_scanline_stopped(crtc); 17714 17715 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 17716 POSTING_READ(DPLL(pipe)); 17717 } 17718 17719 static void 17720 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) 17721 { 17722 struct intel_crtc *crtc; 17723 17724 if (INTEL_GEN(dev_priv) >= 4) 17725 return; 17726 17727 for_each_intel_crtc(&dev_priv->drm, crtc) { 17728 struct intel_plane *plane = 17729 to_intel_plane(crtc->base.primary); 17730 struct intel_crtc *plane_crtc; 17731 enum pipe pipe; 17732 17733 if (!plane->get_hw_state(plane, &pipe)) 17734 continue; 17735 17736 if (pipe == crtc->pipe) 17737 continue; 17738 17739 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", 17740 plane->base.base.id, plane->base.name); 17741 17742 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17743 intel_plane_disable_noatomic(plane_crtc, plane); 17744 } 17745 } 17746 17747 static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 17748 { 17749 struct drm_device *dev = crtc->base.dev; 17750 struct intel_encoder *encoder; 17751 17752 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 17753 return true; 17754 17755 return false; 17756 } 17757 17758 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder) 17759 { 17760 struct drm_device *dev = encoder->base.dev; 17761 struct intel_connector *connector; 17762 17763 for_each_connector_on_encoder(dev, &encoder->base, connector) 17764 return connector; 17765 17766 return NULL; 17767 } 17768 17769 static bool has_pch_trancoder(struct drm_i915_private *dev_priv, 17770 enum pipe pch_transcoder) 17771 { 17772 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 17773 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A); 17774 } 17775 17776 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state) 17777 { 17778 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 17779 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 17780 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 17781 17782 if (INTEL_GEN(dev_priv) >= 9 || 17783 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 17784 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder); 17785 u32 val; 17786 17787 if (transcoder_is_dsi(cpu_transcoder)) 17788 return; 17789 17790 val = I915_READ(reg); 17791 val &= ~HSW_FRAME_START_DELAY_MASK; 17792 val |= HSW_FRAME_START_DELAY(0); 17793 I915_WRITE(reg, val); 17794 } else { 17795 i915_reg_t reg = PIPECONF(cpu_transcoder); 17796 u32 val; 17797 17798 val = I915_READ(reg); 17799 val &= ~PIPECONF_FRAME_START_DELAY_MASK; 17800 val |= PIPECONF_FRAME_START_DELAY(0); 17801 I915_WRITE(reg, val); 17802 } 17803 17804 if (!crtc_state->has_pch_encoder) 17805 return; 17806 17807 if (HAS_PCH_IBX(dev_priv)) { 17808 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe); 17809 u32 val; 17810 17811 val = I915_READ(reg); 17812 val &= ~TRANS_FRAME_START_DELAY_MASK; 17813 val |= TRANS_FRAME_START_DELAY(0); 17814 I915_WRITE(reg, val); 17815 } else { 17816 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc); 17817 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder); 17818 u32 val; 17819 17820 val = I915_READ(reg); 17821 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 17822 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); 17823 I915_WRITE(reg, val); 17824 } 17825 } 17826 17827 static void intel_sanitize_crtc(struct intel_crtc *crtc, 17828 struct drm_modeset_acquire_ctx *ctx) 17829 { 17830 struct drm_device *dev = crtc->base.dev; 17831 struct drm_i915_private *dev_priv = to_i915(dev); 17832 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); 17833 17834 if (crtc_state->hw.active) { 17835 struct intel_plane *plane; 17836 17837 /* Clear any frame start delays used for debugging left by the BIOS */ 17838 intel_sanitize_frame_start_delay(crtc_state); 17839 17840 /* Disable everything but the primary plane */ 17841 for_each_intel_plane_on_crtc(dev, crtc, plane) { 17842 const struct intel_plane_state *plane_state = 17843 to_intel_plane_state(plane->base.state); 17844 17845 if (plane_state->uapi.visible && 17846 plane->base.type != DRM_PLANE_TYPE_PRIMARY) 17847 intel_plane_disable_noatomic(crtc, plane); 17848 } 17849 17850 /* 17851 * Disable any background color set by the BIOS, but enable the 17852 * gamma and CSC to match how we program our planes. 17853 */ 17854 if (INTEL_GEN(dev_priv) >= 9) 17855 I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe), 17856 SKL_BOTTOM_COLOR_GAMMA_ENABLE | 17857 SKL_BOTTOM_COLOR_CSC_ENABLE); 17858 } 17859 17860 /* Adjust the state of the output pipe according to whether we 17861 * have active connectors/encoders. */ 17862 if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc)) 17863 intel_crtc_disable_noatomic(crtc, ctx); 17864 17865 if (crtc_state->hw.active || HAS_GMCH(dev_priv)) { 17866 /* 17867 * We start out with underrun reporting disabled to avoid races. 17868 * For correct bookkeeping mark this on active crtcs. 17869 * 17870 * Also on gmch platforms we dont have any hardware bits to 17871 * disable the underrun reporting. Which means we need to start 17872 * out with underrun reporting disabled also on inactive pipes, 17873 * since otherwise we'll complain about the garbage we read when 17874 * e.g. coming up after runtime pm. 17875 * 17876 * No protection against concurrent access is required - at 17877 * worst a fifo underrun happens which also sets this to false. 17878 */ 17879 crtc->cpu_fifo_underrun_disabled = true; 17880 /* 17881 * We track the PCH trancoder underrun reporting state 17882 * within the crtc. With crtc for pipe A housing the underrun 17883 * reporting state for PCH transcoder A, crtc for pipe B housing 17884 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, 17885 * and marking underrun reporting as disabled for the non-existing 17886 * PCH transcoders B and C would prevent enabling the south 17887 * error interrupt (see cpt_can_enable_serr_int()). 17888 */ 17889 if (has_pch_trancoder(dev_priv, crtc->pipe)) 17890 crtc->pch_fifo_underrun_disabled = true; 17891 } 17892 } 17893 17894 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) 17895 { 17896 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 17897 17898 /* 17899 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram 17900 * the hardware when a high res displays plugged in. DPLL P 17901 * divider is zero, and the pipe timings are bonkers. We'll 17902 * try to disable everything in that case. 17903 * 17904 * FIXME would be nice to be able to sanitize this state 17905 * without several WARNs, but for now let's take the easy 17906 * road. 17907 */ 17908 return IS_GEN(dev_priv, 6) && 17909 crtc_state->hw.active && 17910 crtc_state->shared_dpll && 17911 crtc_state->port_clock == 0; 17912 } 17913 17914 static void intel_sanitize_encoder(struct intel_encoder *encoder) 17915 { 17916 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 17917 struct intel_connector *connector; 17918 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 17919 struct intel_crtc_state *crtc_state = crtc ? 17920 to_intel_crtc_state(crtc->base.state) : NULL; 17921 17922 /* We need to check both for a crtc link (meaning that the 17923 * encoder is active and trying to read from a pipe) and the 17924 * pipe itself being active. */ 17925 bool has_active_crtc = crtc_state && 17926 crtc_state->hw.active; 17927 17928 if (crtc_state && has_bogus_dpll_config(crtc_state)) { 17929 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n", 17930 pipe_name(crtc->pipe)); 17931 has_active_crtc = false; 17932 } 17933 17934 connector = intel_encoder_find_connector(encoder); 17935 if (connector && !has_active_crtc) { 17936 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 17937 encoder->base.base.id, 17938 encoder->base.name); 17939 17940 /* Connector is active, but has no active pipe. This is 17941 * fallout from our resume register restoring. Disable 17942 * the encoder manually again. */ 17943 if (crtc_state) { 17944 struct drm_encoder *best_encoder; 17945 17946 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 17947 encoder->base.base.id, 17948 encoder->base.name); 17949 17950 /* avoid oopsing in case the hooks consult best_encoder */ 17951 best_encoder = connector->base.state->best_encoder; 17952 connector->base.state->best_encoder = &encoder->base; 17953 17954 if (encoder->disable) 17955 encoder->disable(encoder, crtc_state, 17956 connector->base.state); 17957 if (encoder->post_disable) 17958 encoder->post_disable(encoder, crtc_state, 17959 connector->base.state); 17960 17961 connector->base.state->best_encoder = best_encoder; 17962 } 17963 encoder->base.crtc = NULL; 17964 17965 /* Inconsistent output/port/pipe state happens presumably due to 17966 * a bug in one of the get_hw_state functions. Or someplace else 17967 * in our code, like the register restore mess on resume. Clamp 17968 * things to off as a safer default. */ 17969 17970 connector->base.dpms = DRM_MODE_DPMS_OFF; 17971 connector->base.encoder = NULL; 17972 } 17973 17974 /* notify opregion of the sanitized encoder state */ 17975 intel_opregion_notify_encoder(encoder, connector && has_active_crtc); 17976 17977 if (INTEL_GEN(dev_priv) >= 11) 17978 icl_sanitize_encoder_pll_mapping(encoder); 17979 } 17980 17981 /* FIXME read out full plane state for all planes */ 17982 static void readout_plane_state(struct drm_i915_private *dev_priv) 17983 { 17984 struct intel_plane *plane; 17985 struct intel_crtc *crtc; 17986 17987 for_each_intel_plane(&dev_priv->drm, plane) { 17988 struct intel_plane_state *plane_state = 17989 to_intel_plane_state(plane->base.state); 17990 struct intel_crtc_state *crtc_state; 17991 enum pipe pipe = PIPE_A; 17992 bool visible; 17993 17994 visible = plane->get_hw_state(plane, &pipe); 17995 17996 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17997 crtc_state = to_intel_crtc_state(crtc->base.state); 17998 17999 intel_set_plane_visible(crtc_state, plane_state, visible); 18000 18001 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n", 18002 plane->base.base.id, plane->base.name, 18003 enableddisabled(visible), pipe_name(pipe)); 18004 } 18005 18006 for_each_intel_crtc(&dev_priv->drm, crtc) { 18007 struct intel_crtc_state *crtc_state = 18008 to_intel_crtc_state(crtc->base.state); 18009 18010 fixup_active_planes(crtc_state); 18011 } 18012 } 18013 18014 static void intel_modeset_readout_hw_state(struct drm_device *dev) 18015 { 18016 struct drm_i915_private *dev_priv = to_i915(dev); 18017 enum pipe pipe; 18018 struct intel_crtc *crtc; 18019 struct intel_encoder *encoder; 18020 struct intel_connector *connector; 18021 struct drm_connector_list_iter conn_iter; 18022 int i; 18023 18024 dev_priv->active_pipes = 0; 18025 18026 for_each_intel_crtc(dev, crtc) { 18027 struct intel_crtc_state *crtc_state = 18028 to_intel_crtc_state(crtc->base.state); 18029 18030 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi); 18031 intel_crtc_free_hw_state(crtc_state); 18032 intel_crtc_state_reset(crtc_state, crtc); 18033 18034 crtc_state->hw.active = crtc_state->hw.enable = 18035 dev_priv->display.get_pipe_config(crtc, crtc_state); 18036 18037 crtc->base.enabled = crtc_state->hw.enable; 18038 crtc->active = crtc_state->hw.active; 18039 18040 if (crtc_state->hw.active) 18041 dev_priv->active_pipes |= BIT(crtc->pipe); 18042 18043 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", 18044 crtc->base.base.id, crtc->base.name, 18045 enableddisabled(crtc_state->hw.active)); 18046 } 18047 18048 readout_plane_state(dev_priv); 18049 18050 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 18051 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 18052 18053 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll, 18054 &pll->state.hw_state); 18055 18056 if (IS_ELKHARTLAKE(dev_priv) && pll->on && 18057 pll->info->id == DPLL_ID_EHL_DPLL4) { 18058 pll->wakeref = intel_display_power_get(dev_priv, 18059 POWER_DOMAIN_DPLL_DC_OFF); 18060 } 18061 18062 pll->state.crtc_mask = 0; 18063 for_each_intel_crtc(dev, crtc) { 18064 struct intel_crtc_state *crtc_state = 18065 to_intel_crtc_state(crtc->base.state); 18066 18067 if (crtc_state->hw.active && 18068 crtc_state->shared_dpll == pll) 18069 pll->state.crtc_mask |= 1 << crtc->pipe; 18070 } 18071 pll->active_mask = pll->state.crtc_mask; 18072 18073 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", 18074 pll->info->name, pll->state.crtc_mask, pll->on); 18075 } 18076 18077 for_each_intel_encoder(dev, encoder) { 18078 pipe = 0; 18079 18080 if (encoder->get_hw_state(encoder, &pipe)) { 18081 struct intel_crtc_state *crtc_state; 18082 18083 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 18084 crtc_state = to_intel_crtc_state(crtc->base.state); 18085 18086 encoder->base.crtc = &crtc->base; 18087 encoder->get_config(encoder, crtc_state); 18088 } else { 18089 encoder->base.crtc = NULL; 18090 } 18091 18092 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 18093 encoder->base.base.id, encoder->base.name, 18094 enableddisabled(encoder->base.crtc), 18095 pipe_name(pipe)); 18096 } 18097 18098 drm_connector_list_iter_begin(dev, &conn_iter); 18099 for_each_intel_connector_iter(connector, &conn_iter) { 18100 if (connector->get_hw_state(connector)) { 18101 struct intel_crtc_state *crtc_state; 18102 struct intel_crtc *crtc; 18103 18104 connector->base.dpms = DRM_MODE_DPMS_ON; 18105 18106 encoder = connector->encoder; 18107 connector->base.encoder = &encoder->base; 18108 18109 crtc = to_intel_crtc(encoder->base.crtc); 18110 crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL; 18111 18112 if (crtc_state && crtc_state->hw.active) { 18113 /* 18114 * This has to be done during hardware readout 18115 * because anything calling .crtc_disable may 18116 * rely on the connector_mask being accurate. 18117 */ 18118 crtc_state->uapi.connector_mask |= 18119 drm_connector_mask(&connector->base); 18120 crtc_state->uapi.encoder_mask |= 18121 drm_encoder_mask(&encoder->base); 18122 } 18123 } else { 18124 connector->base.dpms = DRM_MODE_DPMS_OFF; 18125 connector->base.encoder = NULL; 18126 } 18127 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 18128 connector->base.base.id, connector->base.name, 18129 enableddisabled(connector->base.encoder)); 18130 } 18131 drm_connector_list_iter_end(&conn_iter); 18132 18133 for_each_intel_crtc(dev, crtc) { 18134 struct intel_bw_state *bw_state = 18135 to_intel_bw_state(dev_priv->bw_obj.state); 18136 struct intel_crtc_state *crtc_state = 18137 to_intel_crtc_state(crtc->base.state); 18138 struct intel_plane *plane; 18139 int min_cdclk = 0; 18140 18141 if (crtc_state->hw.active) { 18142 struct drm_display_mode *mode = &crtc_state->hw.mode; 18143 18144 intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode, 18145 crtc_state); 18146 18147 *mode = crtc_state->hw.adjusted_mode; 18148 mode->hdisplay = crtc_state->pipe_src_w; 18149 mode->vdisplay = crtc_state->pipe_src_h; 18150 18151 /* 18152 * The initial mode needs to be set in order to keep 18153 * the atomic core happy. It wants a valid mode if the 18154 * crtc's enabled, so we do the above call. 18155 * 18156 * But we don't set all the derived state fully, hence 18157 * set a flag to indicate that a full recalculation is 18158 * needed on the next commit. 18159 */ 18160 mode->private_flags = I915_MODE_FLAG_INHERITED; 18161 18162 intel_crtc_compute_pixel_rate(crtc_state); 18163 18164 intel_crtc_update_active_timings(crtc_state); 18165 18166 intel_crtc_copy_hw_to_uapi_state(crtc_state); 18167 } 18168 18169 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 18170 const struct intel_plane_state *plane_state = 18171 to_intel_plane_state(plane->base.state); 18172 18173 /* 18174 * FIXME don't have the fb yet, so can't 18175 * use intel_plane_data_rate() :( 18176 */ 18177 if (plane_state->uapi.visible) 18178 crtc_state->data_rate[plane->id] = 18179 4 * crtc_state->pixel_rate; 18180 /* 18181 * FIXME don't have the fb yet, so can't 18182 * use plane->min_cdclk() :( 18183 */ 18184 if (plane_state->uapi.visible && plane->min_cdclk) { 18185 if (crtc_state->double_wide || 18186 INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 18187 crtc_state->min_cdclk[plane->id] = 18188 DIV_ROUND_UP(crtc_state->pixel_rate, 2); 18189 else 18190 crtc_state->min_cdclk[plane->id] = 18191 crtc_state->pixel_rate; 18192 } 18193 DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n", 18194 plane->base.base.id, plane->base.name, 18195 crtc_state->min_cdclk[plane->id]); 18196 } 18197 18198 if (crtc_state->hw.active) { 18199 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); 18200 if (WARN_ON(min_cdclk < 0)) 18201 min_cdclk = 0; 18202 } 18203 18204 dev_priv->min_cdclk[crtc->pipe] = min_cdclk; 18205 dev_priv->min_voltage_level[crtc->pipe] = 18206 crtc_state->min_voltage_level; 18207 18208 intel_bw_crtc_update(bw_state, crtc_state); 18209 18210 intel_pipe_config_sanity_check(dev_priv, crtc_state); 18211 } 18212 } 18213 18214 static void 18215 get_encoder_power_domains(struct drm_i915_private *dev_priv) 18216 { 18217 struct intel_encoder *encoder; 18218 18219 for_each_intel_encoder(&dev_priv->drm, encoder) { 18220 struct intel_crtc_state *crtc_state; 18221 18222 if (!encoder->get_power_domains) 18223 continue; 18224 18225 /* 18226 * MST-primary and inactive encoders don't have a crtc state 18227 * and neither of these require any power domain references. 18228 */ 18229 if (!encoder->base.crtc) 18230 continue; 18231 18232 crtc_state = to_intel_crtc_state(encoder->base.crtc->state); 18233 encoder->get_power_domains(encoder, crtc_state); 18234 } 18235 } 18236 18237 static void intel_early_display_was(struct drm_i915_private *dev_priv) 18238 { 18239 /* 18240 * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl 18241 * Also known as Wa_14010480278. 18242 */ 18243 if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv)) 18244 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | 18245 DARBF_GATING_DIS); 18246 18247 if (IS_HASWELL(dev_priv)) { 18248 /* 18249 * WaRsPkgCStateDisplayPMReq:hsw 18250 * System hang if this isn't done before disabling all planes! 18251 */ 18252 I915_WRITE(CHICKEN_PAR1_1, 18253 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); 18254 } 18255 } 18256 18257 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv, 18258 enum port port, i915_reg_t hdmi_reg) 18259 { 18260 u32 val = I915_READ(hdmi_reg); 18261 18262 if (val & SDVO_ENABLE || 18263 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A)) 18264 return; 18265 18266 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n", 18267 port_name(port)); 18268 18269 val &= ~SDVO_PIPE_SEL_MASK; 18270 val |= SDVO_PIPE_SEL(PIPE_A); 18271 18272 I915_WRITE(hdmi_reg, val); 18273 } 18274 18275 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv, 18276 enum port port, i915_reg_t dp_reg) 18277 { 18278 u32 val = I915_READ(dp_reg); 18279 18280 if (val & DP_PORT_EN || 18281 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A)) 18282 return; 18283 18284 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n", 18285 port_name(port)); 18286 18287 val &= ~DP_PIPE_SEL_MASK; 18288 val |= DP_PIPE_SEL(PIPE_A); 18289 18290 I915_WRITE(dp_reg, val); 18291 } 18292 18293 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv) 18294 { 18295 /* 18296 * The BIOS may select transcoder B on some of the PCH 18297 * ports even it doesn't enable the port. This would trip 18298 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled(). 18299 * Sanitize the transcoder select bits to prevent that. We 18300 * assume that the BIOS never actually enabled the port, 18301 * because if it did we'd actually have to toggle the port 18302 * on and back off to make the transcoder A select stick 18303 * (see. intel_dp_link_down(), intel_disable_hdmi(), 18304 * intel_disable_sdvo()). 18305 */ 18306 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B); 18307 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C); 18308 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D); 18309 18310 /* PCH SDVOB multiplex with HDMIB */ 18311 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB); 18312 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC); 18313 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID); 18314 } 18315 18316 /* Scan out the current hw modeset state, 18317 * and sanitizes it to the current state 18318 */ 18319 static void 18320 intel_modeset_setup_hw_state(struct drm_device *dev, 18321 struct drm_modeset_acquire_ctx *ctx) 18322 { 18323 struct drm_i915_private *dev_priv = to_i915(dev); 18324 struct intel_encoder *encoder; 18325 struct intel_crtc *crtc; 18326 intel_wakeref_t wakeref; 18327 int i; 18328 18329 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 18330 18331 intel_early_display_was(dev_priv); 18332 intel_modeset_readout_hw_state(dev); 18333 18334 /* HW state is read out, now we need to sanitize this mess. */ 18335 18336 /* Sanitize the TypeC port mode upfront, encoders depend on this */ 18337 for_each_intel_encoder(dev, encoder) { 18338 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 18339 18340 /* We need to sanitize only the MST primary port. */ 18341 if (encoder->type != INTEL_OUTPUT_DP_MST && 18342 intel_phy_is_tc(dev_priv, phy)) 18343 intel_tc_port_sanitize(enc_to_dig_port(encoder)); 18344 } 18345 18346 get_encoder_power_domains(dev_priv); 18347 18348 if (HAS_PCH_IBX(dev_priv)) 18349 ibx_sanitize_pch_ports(dev_priv); 18350 18351 /* 18352 * intel_sanitize_plane_mapping() may need to do vblank 18353 * waits, so we need vblank interrupts restored beforehand. 18354 */ 18355 for_each_intel_crtc(&dev_priv->drm, crtc) { 18356 struct intel_crtc_state *crtc_state = 18357 to_intel_crtc_state(crtc->base.state); 18358 18359 drm_crtc_vblank_reset(&crtc->base); 18360 18361 if (crtc_state->hw.active) 18362 intel_crtc_vblank_on(crtc_state); 18363 } 18364 18365 intel_sanitize_plane_mapping(dev_priv); 18366 18367 for_each_intel_encoder(dev, encoder) 18368 intel_sanitize_encoder(encoder); 18369 18370 for_each_intel_crtc(&dev_priv->drm, crtc) { 18371 struct intel_crtc_state *crtc_state = 18372 to_intel_crtc_state(crtc->base.state); 18373 18374 intel_sanitize_crtc(crtc, ctx); 18375 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]"); 18376 } 18377 18378 intel_modeset_update_connector_atomic_state(dev); 18379 18380 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 18381 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 18382 18383 if (!pll->on || pll->active_mask) 18384 continue; 18385 18386 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", 18387 pll->info->name); 18388 18389 pll->info->funcs->disable(dev_priv, pll); 18390 pll->on = false; 18391 } 18392 18393 if (IS_G4X(dev_priv)) { 18394 g4x_wm_get_hw_state(dev_priv); 18395 g4x_wm_sanitize(dev_priv); 18396 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 18397 vlv_wm_get_hw_state(dev_priv); 18398 vlv_wm_sanitize(dev_priv); 18399 } else if (INTEL_GEN(dev_priv) >= 9) { 18400 skl_wm_get_hw_state(dev_priv); 18401 } else if (HAS_PCH_SPLIT(dev_priv)) { 18402 ilk_wm_get_hw_state(dev_priv); 18403 } 18404 18405 for_each_intel_crtc(dev, crtc) { 18406 struct intel_crtc_state *crtc_state = 18407 to_intel_crtc_state(crtc->base.state); 18408 u64 put_domains; 18409 18410 put_domains = modeset_get_crtc_power_domains(crtc_state); 18411 if (WARN_ON(put_domains)) 18412 modeset_put_power_domains(dev_priv, put_domains); 18413 } 18414 18415 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); 18416 } 18417 18418 void intel_display_resume(struct drm_device *dev) 18419 { 18420 struct drm_i915_private *dev_priv = to_i915(dev); 18421 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 18422 struct drm_modeset_acquire_ctx ctx; 18423 int ret; 18424 18425 dev_priv->modeset_restore_state = NULL; 18426 if (state) 18427 state->acquire_ctx = &ctx; 18428 18429 drm_modeset_acquire_init(&ctx, 0); 18430 18431 while (1) { 18432 ret = drm_modeset_lock_all_ctx(dev, &ctx); 18433 if (ret != -EDEADLK) 18434 break; 18435 18436 drm_modeset_backoff(&ctx); 18437 } 18438 18439 if (!ret) 18440 ret = __intel_display_resume(dev, state, &ctx); 18441 18442 intel_enable_ipc(dev_priv); 18443 drm_modeset_drop_locks(&ctx); 18444 drm_modeset_acquire_fini(&ctx); 18445 18446 if (ret) 18447 DRM_ERROR("Restoring old state failed with %i\n", ret); 18448 if (state) 18449 drm_atomic_state_put(state); 18450 } 18451 18452 static void intel_hpd_poll_fini(struct drm_i915_private *i915) 18453 { 18454 struct intel_connector *connector; 18455 struct drm_connector_list_iter conn_iter; 18456 18457 /* Kill all the work that may have been queued by hpd. */ 18458 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 18459 for_each_intel_connector_iter(connector, &conn_iter) { 18460 if (connector->modeset_retry_work.func) 18461 cancel_work_sync(&connector->modeset_retry_work); 18462 if (connector->hdcp.shim) { 18463 cancel_delayed_work_sync(&connector->hdcp.check_work); 18464 cancel_work_sync(&connector->hdcp.prop_work); 18465 } 18466 } 18467 drm_connector_list_iter_end(&conn_iter); 18468 } 18469 18470 void intel_modeset_driver_remove(struct drm_i915_private *i915) 18471 { 18472 flush_workqueue(i915->flip_wq); 18473 flush_workqueue(i915->modeset_wq); 18474 18475 flush_work(&i915->atomic_helper.free_work); 18476 WARN_ON(!llist_empty(&i915->atomic_helper.free_list)); 18477 18478 /* 18479 * Interrupts and polling as the first thing to avoid creating havoc. 18480 * Too much stuff here (turning of connectors, ...) would 18481 * experience fancy races otherwise. 18482 */ 18483 intel_irq_uninstall(i915); 18484 18485 /* 18486 * Due to the hpd irq storm handling the hotplug work can re-arm the 18487 * poll handlers. Hence disable polling after hpd handling is shut down. 18488 */ 18489 intel_hpd_poll_fini(i915); 18490 18491 /* 18492 * MST topology needs to be suspended so we don't have any calls to 18493 * fbdev after it's finalized. MST will be destroyed later as part of 18494 * drm_mode_config_cleanup() 18495 */ 18496 intel_dp_mst_suspend(i915); 18497 18498 /* poll work can call into fbdev, hence clean that up afterwards */ 18499 intel_fbdev_fini(i915); 18500 18501 intel_unregister_dsm_handler(); 18502 18503 intel_fbc_global_disable(i915); 18504 18505 /* flush any delayed tasks or pending work */ 18506 flush_scheduled_work(); 18507 18508 intel_hdcp_component_fini(i915); 18509 18510 drm_mode_config_cleanup(&i915->drm); 18511 18512 intel_overlay_cleanup(i915); 18513 18514 intel_gmbus_teardown(i915); 18515 18516 intel_bw_cleanup(i915); 18517 18518 destroy_workqueue(i915->flip_wq); 18519 destroy_workqueue(i915->modeset_wq); 18520 18521 intel_fbc_cleanup_cfb(i915); 18522 } 18523 18524 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 18525 18526 struct intel_display_error_state { 18527 18528 u32 power_well_driver; 18529 18530 struct intel_cursor_error_state { 18531 u32 control; 18532 u32 position; 18533 u32 base; 18534 u32 size; 18535 } cursor[I915_MAX_PIPES]; 18536 18537 struct intel_pipe_error_state { 18538 bool power_domain_on; 18539 u32 source; 18540 u32 stat; 18541 } pipe[I915_MAX_PIPES]; 18542 18543 struct intel_plane_error_state { 18544 u32 control; 18545 u32 stride; 18546 u32 size; 18547 u32 pos; 18548 u32 addr; 18549 u32 surface; 18550 u32 tile_offset; 18551 } plane[I915_MAX_PIPES]; 18552 18553 struct intel_transcoder_error_state { 18554 bool available; 18555 bool power_domain_on; 18556 enum transcoder cpu_transcoder; 18557 18558 u32 conf; 18559 18560 u32 htotal; 18561 u32 hblank; 18562 u32 hsync; 18563 u32 vtotal; 18564 u32 vblank; 18565 u32 vsync; 18566 } transcoder[5]; 18567 }; 18568 18569 struct intel_display_error_state * 18570 intel_display_capture_error_state(struct drm_i915_private *dev_priv) 18571 { 18572 struct intel_display_error_state *error; 18573 int transcoders[] = { 18574 TRANSCODER_A, 18575 TRANSCODER_B, 18576 TRANSCODER_C, 18577 TRANSCODER_D, 18578 TRANSCODER_EDP, 18579 }; 18580 int i; 18581 18582 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder)); 18583 18584 if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) 18585 return NULL; 18586 18587 error = kzalloc(sizeof(*error), GFP_ATOMIC); 18588 if (error == NULL) 18589 return NULL; 18590 18591 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 18592 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2); 18593 18594 for_each_pipe(dev_priv, i) { 18595 error->pipe[i].power_domain_on = 18596 __intel_display_power_is_enabled(dev_priv, 18597 POWER_DOMAIN_PIPE(i)); 18598 if (!error->pipe[i].power_domain_on) 18599 continue; 18600 18601 error->cursor[i].control = I915_READ(CURCNTR(i)); 18602 error->cursor[i].position = I915_READ(CURPOS(i)); 18603 error->cursor[i].base = I915_READ(CURBASE(i)); 18604 18605 error->plane[i].control = I915_READ(DSPCNTR(i)); 18606 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 18607 if (INTEL_GEN(dev_priv) <= 3) { 18608 error->plane[i].size = I915_READ(DSPSIZE(i)); 18609 error->plane[i].pos = I915_READ(DSPPOS(i)); 18610 } 18611 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 18612 error->plane[i].addr = I915_READ(DSPADDR(i)); 18613 if (INTEL_GEN(dev_priv) >= 4) { 18614 error->plane[i].surface = I915_READ(DSPSURF(i)); 18615 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 18616 } 18617 18618 error->pipe[i].source = I915_READ(PIPESRC(i)); 18619 18620 if (HAS_GMCH(dev_priv)) 18621 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 18622 } 18623 18624 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { 18625 enum transcoder cpu_transcoder = transcoders[i]; 18626 18627 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder]) 18628 continue; 18629 18630 error->transcoder[i].available = true; 18631 error->transcoder[i].power_domain_on = 18632 __intel_display_power_is_enabled(dev_priv, 18633 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 18634 if (!error->transcoder[i].power_domain_on) 18635 continue; 18636 18637 error->transcoder[i].cpu_transcoder = cpu_transcoder; 18638 18639 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 18640 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 18641 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 18642 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 18643 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 18644 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 18645 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 18646 } 18647 18648 return error; 18649 } 18650 18651 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 18652 18653 void 18654 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 18655 struct intel_display_error_state *error) 18656 { 18657 struct drm_i915_private *dev_priv = m->i915; 18658 int i; 18659 18660 if (!error) 18661 return; 18662 18663 err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv)); 18664 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 18665 err_printf(m, "PWR_WELL_CTL2: %08x\n", 18666 error->power_well_driver); 18667 for_each_pipe(dev_priv, i) { 18668 err_printf(m, "Pipe [%d]:\n", i); 18669 err_printf(m, " Power: %s\n", 18670 onoff(error->pipe[i].power_domain_on)); 18671 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 18672 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 18673 18674 err_printf(m, "Plane [%d]:\n", i); 18675 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 18676 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 18677 if (INTEL_GEN(dev_priv) <= 3) { 18678 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 18679 err_printf(m, " POS: %08x\n", error->plane[i].pos); 18680 } 18681 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 18682 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 18683 if (INTEL_GEN(dev_priv) >= 4) { 18684 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 18685 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 18686 } 18687 18688 err_printf(m, "Cursor [%d]:\n", i); 18689 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 18690 err_printf(m, " POS: %08x\n", error->cursor[i].position); 18691 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 18692 } 18693 18694 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { 18695 if (!error->transcoder[i].available) 18696 continue; 18697 18698 err_printf(m, "CPU transcoder: %s\n", 18699 transcoder_name(error->transcoder[i].cpu_transcoder)); 18700 err_printf(m, " Power: %s\n", 18701 onoff(error->transcoder[i].power_domain_on)); 18702 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 18703 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 18704 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 18705 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 18706 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 18707 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 18708 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 18709 } 18710 } 18711 18712 #endif 18713