1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/i2c.h> 28 #include <linux/input.h> 29 #include <linux/intel-iommu.h> 30 #include <linux/kernel.h> 31 #include <linux/module.h> 32 #include <linux/dma-resv.h> 33 #include <linux/slab.h> 34 35 #include <drm/drm_atomic.h> 36 #include <drm/drm_atomic_helper.h> 37 #include <drm/drm_atomic_uapi.h> 38 #include <drm/drm_damage_helper.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_edid.h> 41 #include <drm/drm_fourcc.h> 42 #include <drm/drm_plane_helper.h> 43 #include <drm/drm_probe_helper.h> 44 #include <drm/drm_rect.h> 45 46 #include "display/intel_crt.h" 47 #include "display/intel_ddi.h" 48 #include "display/intel_dp.h" 49 #include "display/intel_dp_mst.h" 50 #include "display/intel_dsi.h" 51 #include "display/intel_dvo.h" 52 #include "display/intel_gmbus.h" 53 #include "display/intel_hdmi.h" 54 #include "display/intel_lvds.h" 55 #include "display/intel_sdvo.h" 56 #include "display/intel_tv.h" 57 #include "display/intel_vdsc.h" 58 59 #include "gt/intel_rps.h" 60 61 #include "i915_drv.h" 62 #include "i915_trace.h" 63 #include "intel_acpi.h" 64 #include "intel_atomic.h" 65 #include "intel_atomic_plane.h" 66 #include "intel_bw.h" 67 #include "intel_cdclk.h" 68 #include "intel_color.h" 69 #include "intel_display_types.h" 70 #include "intel_dp_link_training.h" 71 #include "intel_fbc.h" 72 #include "intel_fbdev.h" 73 #include "intel_fifo_underrun.h" 74 #include "intel_frontbuffer.h" 75 #include "intel_hdcp.h" 76 #include "intel_hotplug.h" 77 #include "intel_overlay.h" 78 #include "intel_pipe_crc.h" 79 #include "intel_pm.h" 80 #include "intel_psr.h" 81 #include "intel_quirks.h" 82 #include "intel_sideband.h" 83 #include "intel_sprite.h" 84 #include "intel_tc.h" 85 #include "intel_vga.h" 86 87 /* Primary plane formats for gen <= 3 */ 88 static const u32 i8xx_primary_formats[] = { 89 DRM_FORMAT_C8, 90 DRM_FORMAT_XRGB1555, 91 DRM_FORMAT_RGB565, 92 DRM_FORMAT_XRGB8888, 93 }; 94 95 /* Primary plane formats for ivb (no fp16 due to hw issue) */ 96 static const u32 ivb_primary_formats[] = { 97 DRM_FORMAT_C8, 98 DRM_FORMAT_RGB565, 99 DRM_FORMAT_XRGB8888, 100 DRM_FORMAT_XBGR8888, 101 DRM_FORMAT_XRGB2101010, 102 DRM_FORMAT_XBGR2101010, 103 }; 104 105 /* Primary plane formats for gen >= 4, except ivb */ 106 static const u32 i965_primary_formats[] = { 107 DRM_FORMAT_C8, 108 DRM_FORMAT_RGB565, 109 DRM_FORMAT_XRGB8888, 110 DRM_FORMAT_XBGR8888, 111 DRM_FORMAT_XRGB2101010, 112 DRM_FORMAT_XBGR2101010, 113 DRM_FORMAT_XBGR16161616F, 114 }; 115 116 /* Primary plane formats for vlv/chv */ 117 static const u32 vlv_primary_formats[] = { 118 DRM_FORMAT_C8, 119 DRM_FORMAT_RGB565, 120 DRM_FORMAT_XRGB8888, 121 DRM_FORMAT_XBGR8888, 122 DRM_FORMAT_ARGB8888, 123 DRM_FORMAT_ABGR8888, 124 DRM_FORMAT_XRGB2101010, 125 DRM_FORMAT_XBGR2101010, 126 DRM_FORMAT_ARGB2101010, 127 DRM_FORMAT_ABGR2101010, 128 DRM_FORMAT_XBGR16161616F, 129 }; 130 131 static const u64 i9xx_format_modifiers[] = { 132 I915_FORMAT_MOD_X_TILED, 133 DRM_FORMAT_MOD_LINEAR, 134 DRM_FORMAT_MOD_INVALID 135 }; 136 137 /* Cursor formats */ 138 static const u32 intel_cursor_formats[] = { 139 DRM_FORMAT_ARGB8888, 140 }; 141 142 static const u64 cursor_format_modifiers[] = { 143 DRM_FORMAT_MOD_LINEAR, 144 DRM_FORMAT_MOD_INVALID 145 }; 146 147 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 148 struct intel_crtc_state *pipe_config); 149 static void ilk_pch_clock_get(struct intel_crtc *crtc, 150 struct intel_crtc_state *pipe_config); 151 152 static int intel_framebuffer_init(struct intel_framebuffer *ifb, 153 struct drm_i915_gem_object *obj, 154 struct drm_mode_fb_cmd2 *mode_cmd); 155 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state); 156 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 157 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 158 const struct intel_link_m_n *m_n, 159 const struct intel_link_m_n *m2_n2); 160 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); 161 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state); 162 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state); 163 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); 164 static void vlv_prepare_pll(struct intel_crtc *crtc, 165 const struct intel_crtc_state *pipe_config); 166 static void chv_prepare_pll(struct intel_crtc *crtc, 167 const struct intel_crtc_state *pipe_config); 168 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state); 169 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); 170 static void intel_modeset_setup_hw_state(struct drm_device *dev, 171 struct drm_modeset_acquire_ctx *ctx); 172 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc); 173 174 struct intel_limit { 175 struct { 176 int min, max; 177 } dot, vco, n, m, m1, m2, p, p1; 178 179 struct { 180 int dot_limit; 181 int p2_slow, p2_fast; 182 } p2; 183 }; 184 185 /* returns HPLL frequency in kHz */ 186 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 187 { 188 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 189 190 /* Obtain SKU information */ 191 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 192 CCK_FUSE_HPLL_FREQ_MASK; 193 194 return vco_freq[hpll_freq] * 1000; 195 } 196 197 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 198 const char *name, u32 reg, int ref_freq) 199 { 200 u32 val; 201 int divider; 202 203 val = vlv_cck_read(dev_priv, reg); 204 divider = val & CCK_FREQUENCY_VALUES; 205 206 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) != 207 (divider << CCK_FREQUENCY_STATUS_SHIFT), 208 "%s change in progress\n", name); 209 210 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 211 } 212 213 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 214 const char *name, u32 reg) 215 { 216 int hpll; 217 218 vlv_cck_get(dev_priv); 219 220 if (dev_priv->hpll_freq == 0) 221 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 222 223 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); 224 225 vlv_cck_put(dev_priv); 226 227 return hpll; 228 } 229 230 static void intel_update_czclk(struct drm_i915_private *dev_priv) 231 { 232 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 233 return; 234 235 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 236 CCK_CZ_CLOCK_CONTROL); 237 238 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n", 239 dev_priv->czclk_freq); 240 } 241 242 /* units of 100MHz */ 243 static u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv, 244 const struct intel_crtc_state *pipe_config) 245 { 246 if (HAS_DDI(dev_priv)) 247 return pipe_config->port_clock; /* SPLL */ 248 else 249 return dev_priv->fdi_pll_freq; 250 } 251 252 static const struct intel_limit intel_limits_i8xx_dac = { 253 .dot = { .min = 25000, .max = 350000 }, 254 .vco = { .min = 908000, .max = 1512000 }, 255 .n = { .min = 2, .max = 16 }, 256 .m = { .min = 96, .max = 140 }, 257 .m1 = { .min = 18, .max = 26 }, 258 .m2 = { .min = 6, .max = 16 }, 259 .p = { .min = 4, .max = 128 }, 260 .p1 = { .min = 2, .max = 33 }, 261 .p2 = { .dot_limit = 165000, 262 .p2_slow = 4, .p2_fast = 2 }, 263 }; 264 265 static const struct intel_limit intel_limits_i8xx_dvo = { 266 .dot = { .min = 25000, .max = 350000 }, 267 .vco = { .min = 908000, .max = 1512000 }, 268 .n = { .min = 2, .max = 16 }, 269 .m = { .min = 96, .max = 140 }, 270 .m1 = { .min = 18, .max = 26 }, 271 .m2 = { .min = 6, .max = 16 }, 272 .p = { .min = 4, .max = 128 }, 273 .p1 = { .min = 2, .max = 33 }, 274 .p2 = { .dot_limit = 165000, 275 .p2_slow = 4, .p2_fast = 4 }, 276 }; 277 278 static const struct intel_limit intel_limits_i8xx_lvds = { 279 .dot = { .min = 25000, .max = 350000 }, 280 .vco = { .min = 908000, .max = 1512000 }, 281 .n = { .min = 2, .max = 16 }, 282 .m = { .min = 96, .max = 140 }, 283 .m1 = { .min = 18, .max = 26 }, 284 .m2 = { .min = 6, .max = 16 }, 285 .p = { .min = 4, .max = 128 }, 286 .p1 = { .min = 1, .max = 6 }, 287 .p2 = { .dot_limit = 165000, 288 .p2_slow = 14, .p2_fast = 7 }, 289 }; 290 291 static const struct intel_limit intel_limits_i9xx_sdvo = { 292 .dot = { .min = 20000, .max = 400000 }, 293 .vco = { .min = 1400000, .max = 2800000 }, 294 .n = { .min = 1, .max = 6 }, 295 .m = { .min = 70, .max = 120 }, 296 .m1 = { .min = 8, .max = 18 }, 297 .m2 = { .min = 3, .max = 7 }, 298 .p = { .min = 5, .max = 80 }, 299 .p1 = { .min = 1, .max = 8 }, 300 .p2 = { .dot_limit = 200000, 301 .p2_slow = 10, .p2_fast = 5 }, 302 }; 303 304 static const struct intel_limit intel_limits_i9xx_lvds = { 305 .dot = { .min = 20000, .max = 400000 }, 306 .vco = { .min = 1400000, .max = 2800000 }, 307 .n = { .min = 1, .max = 6 }, 308 .m = { .min = 70, .max = 120 }, 309 .m1 = { .min = 8, .max = 18 }, 310 .m2 = { .min = 3, .max = 7 }, 311 .p = { .min = 7, .max = 98 }, 312 .p1 = { .min = 1, .max = 8 }, 313 .p2 = { .dot_limit = 112000, 314 .p2_slow = 14, .p2_fast = 7 }, 315 }; 316 317 318 static const struct intel_limit intel_limits_g4x_sdvo = { 319 .dot = { .min = 25000, .max = 270000 }, 320 .vco = { .min = 1750000, .max = 3500000}, 321 .n = { .min = 1, .max = 4 }, 322 .m = { .min = 104, .max = 138 }, 323 .m1 = { .min = 17, .max = 23 }, 324 .m2 = { .min = 5, .max = 11 }, 325 .p = { .min = 10, .max = 30 }, 326 .p1 = { .min = 1, .max = 3}, 327 .p2 = { .dot_limit = 270000, 328 .p2_slow = 10, 329 .p2_fast = 10 330 }, 331 }; 332 333 static const struct intel_limit intel_limits_g4x_hdmi = { 334 .dot = { .min = 22000, .max = 400000 }, 335 .vco = { .min = 1750000, .max = 3500000}, 336 .n = { .min = 1, .max = 4 }, 337 .m = { .min = 104, .max = 138 }, 338 .m1 = { .min = 16, .max = 23 }, 339 .m2 = { .min = 5, .max = 11 }, 340 .p = { .min = 5, .max = 80 }, 341 .p1 = { .min = 1, .max = 8}, 342 .p2 = { .dot_limit = 165000, 343 .p2_slow = 10, .p2_fast = 5 }, 344 }; 345 346 static const struct intel_limit intel_limits_g4x_single_channel_lvds = { 347 .dot = { .min = 20000, .max = 115000 }, 348 .vco = { .min = 1750000, .max = 3500000 }, 349 .n = { .min = 1, .max = 3 }, 350 .m = { .min = 104, .max = 138 }, 351 .m1 = { .min = 17, .max = 23 }, 352 .m2 = { .min = 5, .max = 11 }, 353 .p = { .min = 28, .max = 112 }, 354 .p1 = { .min = 2, .max = 8 }, 355 .p2 = { .dot_limit = 0, 356 .p2_slow = 14, .p2_fast = 14 357 }, 358 }; 359 360 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { 361 .dot = { .min = 80000, .max = 224000 }, 362 .vco = { .min = 1750000, .max = 3500000 }, 363 .n = { .min = 1, .max = 3 }, 364 .m = { .min = 104, .max = 138 }, 365 .m1 = { .min = 17, .max = 23 }, 366 .m2 = { .min = 5, .max = 11 }, 367 .p = { .min = 14, .max = 42 }, 368 .p1 = { .min = 2, .max = 6 }, 369 .p2 = { .dot_limit = 0, 370 .p2_slow = 7, .p2_fast = 7 371 }, 372 }; 373 374 static const struct intel_limit pnv_limits_sdvo = { 375 .dot = { .min = 20000, .max = 400000}, 376 .vco = { .min = 1700000, .max = 3500000 }, 377 /* Pineview's Ncounter is a ring counter */ 378 .n = { .min = 3, .max = 6 }, 379 .m = { .min = 2, .max = 256 }, 380 /* Pineview only has one combined m divider, which we treat as m2. */ 381 .m1 = { .min = 0, .max = 0 }, 382 .m2 = { .min = 0, .max = 254 }, 383 .p = { .min = 5, .max = 80 }, 384 .p1 = { .min = 1, .max = 8 }, 385 .p2 = { .dot_limit = 200000, 386 .p2_slow = 10, .p2_fast = 5 }, 387 }; 388 389 static const struct intel_limit pnv_limits_lvds = { 390 .dot = { .min = 20000, .max = 400000 }, 391 .vco = { .min = 1700000, .max = 3500000 }, 392 .n = { .min = 3, .max = 6 }, 393 .m = { .min = 2, .max = 256 }, 394 .m1 = { .min = 0, .max = 0 }, 395 .m2 = { .min = 0, .max = 254 }, 396 .p = { .min = 7, .max = 112 }, 397 .p1 = { .min = 1, .max = 8 }, 398 .p2 = { .dot_limit = 112000, 399 .p2_slow = 14, .p2_fast = 14 }, 400 }; 401 402 /* Ironlake / Sandybridge 403 * 404 * We calculate clock using (register_value + 2) for N/M1/M2, so here 405 * the range value for them is (actual_value - 2). 406 */ 407 static const struct intel_limit ilk_limits_dac = { 408 .dot = { .min = 25000, .max = 350000 }, 409 .vco = { .min = 1760000, .max = 3510000 }, 410 .n = { .min = 1, .max = 5 }, 411 .m = { .min = 79, .max = 127 }, 412 .m1 = { .min = 12, .max = 22 }, 413 .m2 = { .min = 5, .max = 9 }, 414 .p = { .min = 5, .max = 80 }, 415 .p1 = { .min = 1, .max = 8 }, 416 .p2 = { .dot_limit = 225000, 417 .p2_slow = 10, .p2_fast = 5 }, 418 }; 419 420 static const struct intel_limit ilk_limits_single_lvds = { 421 .dot = { .min = 25000, .max = 350000 }, 422 .vco = { .min = 1760000, .max = 3510000 }, 423 .n = { .min = 1, .max = 3 }, 424 .m = { .min = 79, .max = 118 }, 425 .m1 = { .min = 12, .max = 22 }, 426 .m2 = { .min = 5, .max = 9 }, 427 .p = { .min = 28, .max = 112 }, 428 .p1 = { .min = 2, .max = 8 }, 429 .p2 = { .dot_limit = 225000, 430 .p2_slow = 14, .p2_fast = 14 }, 431 }; 432 433 static const struct intel_limit ilk_limits_dual_lvds = { 434 .dot = { .min = 25000, .max = 350000 }, 435 .vco = { .min = 1760000, .max = 3510000 }, 436 .n = { .min = 1, .max = 3 }, 437 .m = { .min = 79, .max = 127 }, 438 .m1 = { .min = 12, .max = 22 }, 439 .m2 = { .min = 5, .max = 9 }, 440 .p = { .min = 14, .max = 56 }, 441 .p1 = { .min = 2, .max = 8 }, 442 .p2 = { .dot_limit = 225000, 443 .p2_slow = 7, .p2_fast = 7 }, 444 }; 445 446 /* LVDS 100mhz refclk limits. */ 447 static const struct intel_limit ilk_limits_single_lvds_100m = { 448 .dot = { .min = 25000, .max = 350000 }, 449 .vco = { .min = 1760000, .max = 3510000 }, 450 .n = { .min = 1, .max = 2 }, 451 .m = { .min = 79, .max = 126 }, 452 .m1 = { .min = 12, .max = 22 }, 453 .m2 = { .min = 5, .max = 9 }, 454 .p = { .min = 28, .max = 112 }, 455 .p1 = { .min = 2, .max = 8 }, 456 .p2 = { .dot_limit = 225000, 457 .p2_slow = 14, .p2_fast = 14 }, 458 }; 459 460 static const struct intel_limit ilk_limits_dual_lvds_100m = { 461 .dot = { .min = 25000, .max = 350000 }, 462 .vco = { .min = 1760000, .max = 3510000 }, 463 .n = { .min = 1, .max = 3 }, 464 .m = { .min = 79, .max = 126 }, 465 .m1 = { .min = 12, .max = 22 }, 466 .m2 = { .min = 5, .max = 9 }, 467 .p = { .min = 14, .max = 42 }, 468 .p1 = { .min = 2, .max = 6 }, 469 .p2 = { .dot_limit = 225000, 470 .p2_slow = 7, .p2_fast = 7 }, 471 }; 472 473 static const struct intel_limit intel_limits_vlv = { 474 /* 475 * These are the data rate limits (measured in fast clocks) 476 * since those are the strictest limits we have. The fast 477 * clock and actual rate limits are more relaxed, so checking 478 * them would make no difference. 479 */ 480 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 481 .vco = { .min = 4000000, .max = 6000000 }, 482 .n = { .min = 1, .max = 7 }, 483 .m1 = { .min = 2, .max = 3 }, 484 .m2 = { .min = 11, .max = 156 }, 485 .p1 = { .min = 2, .max = 3 }, 486 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 487 }; 488 489 static const struct intel_limit intel_limits_chv = { 490 /* 491 * These are the data rate limits (measured in fast clocks) 492 * since those are the strictest limits we have. The fast 493 * clock and actual rate limits are more relaxed, so checking 494 * them would make no difference. 495 */ 496 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 497 .vco = { .min = 4800000, .max = 6480000 }, 498 .n = { .min = 1, .max = 1 }, 499 .m1 = { .min = 2, .max = 2 }, 500 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 501 .p1 = { .min = 2, .max = 4 }, 502 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 503 }; 504 505 static const struct intel_limit intel_limits_bxt = { 506 /* FIXME: find real dot limits */ 507 .dot = { .min = 0, .max = INT_MAX }, 508 .vco = { .min = 4800000, .max = 6700000 }, 509 .n = { .min = 1, .max = 1 }, 510 .m1 = { .min = 2, .max = 2 }, 511 /* FIXME: find real m2 limits */ 512 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 513 .p1 = { .min = 2, .max = 4 }, 514 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 515 }; 516 517 /* WA Display #0827: Gen9:all */ 518 static void 519 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) 520 { 521 if (enable) 522 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 523 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS); 524 else 525 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 526 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS)); 527 } 528 529 /* Wa_2006604312:icl,ehl */ 530 static void 531 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 532 bool enable) 533 { 534 if (enable) 535 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 536 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS); 537 else 538 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 539 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS); 540 } 541 542 static bool 543 needs_modeset(const struct intel_crtc_state *state) 544 { 545 return drm_atomic_crtc_needs_modeset(&state->uapi); 546 } 547 548 static bool 549 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) 550 { 551 return crtc_state->master_transcoder != INVALID_TRANSCODER; 552 } 553 554 static bool 555 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) 556 { 557 return crtc_state->sync_mode_slaves_mask != 0; 558 } 559 560 bool 561 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) 562 { 563 return is_trans_port_sync_master(crtc_state) || 564 is_trans_port_sync_slave(crtc_state); 565 } 566 567 /* 568 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 569 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 570 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 571 * The helpers' return value is the rate of the clock that is fed to the 572 * display engine's pipe which can be the above fast dot clock rate or a 573 * divided-down version of it. 574 */ 575 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 576 static int pnv_calc_dpll_params(int refclk, struct dpll *clock) 577 { 578 clock->m = clock->m2 + 2; 579 clock->p = clock->p1 * clock->p2; 580 if (WARN_ON(clock->n == 0 || clock->p == 0)) 581 return 0; 582 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 583 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 584 585 return clock->dot; 586 } 587 588 static u32 i9xx_dpll_compute_m(struct dpll *dpll) 589 { 590 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 591 } 592 593 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock) 594 { 595 clock->m = i9xx_dpll_compute_m(clock); 596 clock->p = clock->p1 * clock->p2; 597 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 598 return 0; 599 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 600 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 601 602 return clock->dot; 603 } 604 605 static int vlv_calc_dpll_params(int refclk, struct dpll *clock) 606 { 607 clock->m = clock->m1 * clock->m2; 608 clock->p = clock->p1 * clock->p2; 609 if (WARN_ON(clock->n == 0 || clock->p == 0)) 610 return 0; 611 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 612 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 613 614 return clock->dot / 5; 615 } 616 617 int chv_calc_dpll_params(int refclk, struct dpll *clock) 618 { 619 clock->m = clock->m1 * clock->m2; 620 clock->p = clock->p1 * clock->p2; 621 if (WARN_ON(clock->n == 0 || clock->p == 0)) 622 return 0; 623 clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), 624 clock->n << 22); 625 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 626 627 return clock->dot / 5; 628 } 629 630 /* 631 * Returns whether the given set of divisors are valid for a given refclk with 632 * the given connectors. 633 */ 634 static bool intel_pll_is_valid(struct drm_i915_private *dev_priv, 635 const struct intel_limit *limit, 636 const struct dpll *clock) 637 { 638 if (clock->n < limit->n.min || limit->n.max < clock->n) 639 return false; 640 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 641 return false; 642 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 643 return false; 644 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 645 return false; 646 647 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) && 648 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv)) 649 if (clock->m1 <= clock->m2) 650 return false; 651 652 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 653 !IS_GEN9_LP(dev_priv)) { 654 if (clock->p < limit->p.min || limit->p.max < clock->p) 655 return false; 656 if (clock->m < limit->m.min || limit->m.max < clock->m) 657 return false; 658 } 659 660 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 661 return false; 662 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 663 * connector, etc., rather than just a single range. 664 */ 665 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 666 return false; 667 668 return true; 669 } 670 671 static int 672 i9xx_select_p2_div(const struct intel_limit *limit, 673 const struct intel_crtc_state *crtc_state, 674 int target) 675 { 676 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 677 678 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 679 /* 680 * For LVDS just rely on its current settings for dual-channel. 681 * We haven't figured out how to reliably set up different 682 * single/dual channel state, if we even can. 683 */ 684 if (intel_is_dual_link_lvds(dev_priv)) 685 return limit->p2.p2_fast; 686 else 687 return limit->p2.p2_slow; 688 } else { 689 if (target < limit->p2.dot_limit) 690 return limit->p2.p2_slow; 691 else 692 return limit->p2.p2_fast; 693 } 694 } 695 696 /* 697 * Returns a set of divisors for the desired target clock with the given 698 * refclk, or FALSE. The returned values represent the clock equation: 699 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 700 * 701 * Target and reference clocks are specified in kHz. 702 * 703 * If match_clock is provided, then best_clock P divider must match the P 704 * divider from @match_clock used for LVDS downclocking. 705 */ 706 static bool 707 i9xx_find_best_dpll(const struct intel_limit *limit, 708 struct intel_crtc_state *crtc_state, 709 int target, int refclk, struct dpll *match_clock, 710 struct dpll *best_clock) 711 { 712 struct drm_device *dev = crtc_state->uapi.crtc->dev; 713 struct dpll clock; 714 int err = target; 715 716 memset(best_clock, 0, sizeof(*best_clock)); 717 718 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 719 720 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 721 clock.m1++) { 722 for (clock.m2 = limit->m2.min; 723 clock.m2 <= limit->m2.max; clock.m2++) { 724 if (clock.m2 >= clock.m1) 725 break; 726 for (clock.n = limit->n.min; 727 clock.n <= limit->n.max; clock.n++) { 728 for (clock.p1 = limit->p1.min; 729 clock.p1 <= limit->p1.max; clock.p1++) { 730 int this_err; 731 732 i9xx_calc_dpll_params(refclk, &clock); 733 if (!intel_pll_is_valid(to_i915(dev), 734 limit, 735 &clock)) 736 continue; 737 if (match_clock && 738 clock.p != match_clock->p) 739 continue; 740 741 this_err = abs(clock.dot - target); 742 if (this_err < err) { 743 *best_clock = clock; 744 err = this_err; 745 } 746 } 747 } 748 } 749 } 750 751 return (err != target); 752 } 753 754 /* 755 * Returns a set of divisors for the desired target clock with the given 756 * refclk, or FALSE. The returned values represent the clock equation: 757 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 758 * 759 * Target and reference clocks are specified in kHz. 760 * 761 * If match_clock is provided, then best_clock P divider must match the P 762 * divider from @match_clock used for LVDS downclocking. 763 */ 764 static bool 765 pnv_find_best_dpll(const struct intel_limit *limit, 766 struct intel_crtc_state *crtc_state, 767 int target, int refclk, struct dpll *match_clock, 768 struct dpll *best_clock) 769 { 770 struct drm_device *dev = crtc_state->uapi.crtc->dev; 771 struct dpll clock; 772 int err = target; 773 774 memset(best_clock, 0, sizeof(*best_clock)); 775 776 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 777 778 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 779 clock.m1++) { 780 for (clock.m2 = limit->m2.min; 781 clock.m2 <= limit->m2.max; clock.m2++) { 782 for (clock.n = limit->n.min; 783 clock.n <= limit->n.max; clock.n++) { 784 for (clock.p1 = limit->p1.min; 785 clock.p1 <= limit->p1.max; clock.p1++) { 786 int this_err; 787 788 pnv_calc_dpll_params(refclk, &clock); 789 if (!intel_pll_is_valid(to_i915(dev), 790 limit, 791 &clock)) 792 continue; 793 if (match_clock && 794 clock.p != match_clock->p) 795 continue; 796 797 this_err = abs(clock.dot - target); 798 if (this_err < err) { 799 *best_clock = clock; 800 err = this_err; 801 } 802 } 803 } 804 } 805 } 806 807 return (err != target); 808 } 809 810 /* 811 * Returns a set of divisors for the desired target clock with the given 812 * refclk, or FALSE. The returned values represent the clock equation: 813 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 814 * 815 * Target and reference clocks are specified in kHz. 816 * 817 * If match_clock is provided, then best_clock P divider must match the P 818 * divider from @match_clock used for LVDS downclocking. 819 */ 820 static bool 821 g4x_find_best_dpll(const struct intel_limit *limit, 822 struct intel_crtc_state *crtc_state, 823 int target, int refclk, struct dpll *match_clock, 824 struct dpll *best_clock) 825 { 826 struct drm_device *dev = crtc_state->uapi.crtc->dev; 827 struct dpll clock; 828 int max_n; 829 bool found = false; 830 /* approximately equals target * 0.00585 */ 831 int err_most = (target >> 8) + (target >> 9); 832 833 memset(best_clock, 0, sizeof(*best_clock)); 834 835 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 836 837 max_n = limit->n.max; 838 /* based on hardware requirement, prefer smaller n to precision */ 839 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 840 /* based on hardware requirement, prefere larger m1,m2 */ 841 for (clock.m1 = limit->m1.max; 842 clock.m1 >= limit->m1.min; clock.m1--) { 843 for (clock.m2 = limit->m2.max; 844 clock.m2 >= limit->m2.min; clock.m2--) { 845 for (clock.p1 = limit->p1.max; 846 clock.p1 >= limit->p1.min; clock.p1--) { 847 int this_err; 848 849 i9xx_calc_dpll_params(refclk, &clock); 850 if (!intel_pll_is_valid(to_i915(dev), 851 limit, 852 &clock)) 853 continue; 854 855 this_err = abs(clock.dot - target); 856 if (this_err < err_most) { 857 *best_clock = clock; 858 err_most = this_err; 859 max_n = clock.n; 860 found = true; 861 } 862 } 863 } 864 } 865 } 866 return found; 867 } 868 869 /* 870 * Check if the calculated PLL configuration is more optimal compared to the 871 * best configuration and error found so far. Return the calculated error. 872 */ 873 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 874 const struct dpll *calculated_clock, 875 const struct dpll *best_clock, 876 unsigned int best_error_ppm, 877 unsigned int *error_ppm) 878 { 879 /* 880 * For CHV ignore the error and consider only the P value. 881 * Prefer a bigger P value based on HW requirements. 882 */ 883 if (IS_CHERRYVIEW(to_i915(dev))) { 884 *error_ppm = 0; 885 886 return calculated_clock->p > best_clock->p; 887 } 888 889 if (drm_WARN_ON_ONCE(dev, !target_freq)) 890 return false; 891 892 *error_ppm = div_u64(1000000ULL * 893 abs(target_freq - calculated_clock->dot), 894 target_freq); 895 /* 896 * Prefer a better P value over a better (smaller) error if the error 897 * is small. Ensure this preference for future configurations too by 898 * setting the error to 0. 899 */ 900 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 901 *error_ppm = 0; 902 903 return true; 904 } 905 906 return *error_ppm + 10 < best_error_ppm; 907 } 908 909 /* 910 * Returns a set of divisors for the desired target clock with the given 911 * refclk, or FALSE. The returned values represent the clock equation: 912 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 913 */ 914 static bool 915 vlv_find_best_dpll(const struct intel_limit *limit, 916 struct intel_crtc_state *crtc_state, 917 int target, int refclk, struct dpll *match_clock, 918 struct dpll *best_clock) 919 { 920 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 921 struct drm_device *dev = crtc->base.dev; 922 struct dpll clock; 923 unsigned int bestppm = 1000000; 924 /* min update 19.2 MHz */ 925 int max_n = min(limit->n.max, refclk / 19200); 926 bool found = false; 927 928 target *= 5; /* fast clock */ 929 930 memset(best_clock, 0, sizeof(*best_clock)); 931 932 /* based on hardware requirement, prefer smaller n to precision */ 933 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 934 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 935 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 936 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 937 clock.p = clock.p1 * clock.p2; 938 /* based on hardware requirement, prefer bigger m1,m2 values */ 939 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 940 unsigned int ppm; 941 942 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 943 refclk * clock.m1); 944 945 vlv_calc_dpll_params(refclk, &clock); 946 947 if (!intel_pll_is_valid(to_i915(dev), 948 limit, 949 &clock)) 950 continue; 951 952 if (!vlv_PLL_is_optimal(dev, target, 953 &clock, 954 best_clock, 955 bestppm, &ppm)) 956 continue; 957 958 *best_clock = clock; 959 bestppm = ppm; 960 found = true; 961 } 962 } 963 } 964 } 965 966 return found; 967 } 968 969 /* 970 * Returns a set of divisors for the desired target clock with the given 971 * refclk, or FALSE. The returned values represent the clock equation: 972 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 973 */ 974 static bool 975 chv_find_best_dpll(const struct intel_limit *limit, 976 struct intel_crtc_state *crtc_state, 977 int target, int refclk, struct dpll *match_clock, 978 struct dpll *best_clock) 979 { 980 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 981 struct drm_device *dev = crtc->base.dev; 982 unsigned int best_error_ppm; 983 struct dpll clock; 984 u64 m2; 985 int found = false; 986 987 memset(best_clock, 0, sizeof(*best_clock)); 988 best_error_ppm = 1000000; 989 990 /* 991 * Based on hardware doc, the n always set to 1, and m1 always 992 * set to 2. If requires to support 200Mhz refclk, we need to 993 * revisit this because n may not 1 anymore. 994 */ 995 clock.n = 1, clock.m1 = 2; 996 target *= 5; /* fast clock */ 997 998 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 999 for (clock.p2 = limit->p2.p2_fast; 1000 clock.p2 >= limit->p2.p2_slow; 1001 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 1002 unsigned int error_ppm; 1003 1004 clock.p = clock.p1 * clock.p2; 1005 1006 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22, 1007 refclk * clock.m1); 1008 1009 if (m2 > INT_MAX/clock.m1) 1010 continue; 1011 1012 clock.m2 = m2; 1013 1014 chv_calc_dpll_params(refclk, &clock); 1015 1016 if (!intel_pll_is_valid(to_i915(dev), limit, &clock)) 1017 continue; 1018 1019 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 1020 best_error_ppm, &error_ppm)) 1021 continue; 1022 1023 *best_clock = clock; 1024 best_error_ppm = error_ppm; 1025 found = true; 1026 } 1027 } 1028 1029 return found; 1030 } 1031 1032 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, 1033 struct dpll *best_clock) 1034 { 1035 int refclk = 100000; 1036 const struct intel_limit *limit = &intel_limits_bxt; 1037 1038 return chv_find_best_dpll(limit, crtc_state, 1039 crtc_state->port_clock, refclk, 1040 NULL, best_clock); 1041 } 1042 1043 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, 1044 enum pipe pipe) 1045 { 1046 i915_reg_t reg = PIPEDSL(pipe); 1047 u32 line1, line2; 1048 u32 line_mask; 1049 1050 if (IS_GEN(dev_priv, 2)) 1051 line_mask = DSL_LINEMASK_GEN2; 1052 else 1053 line_mask = DSL_LINEMASK_GEN3; 1054 1055 line1 = intel_de_read(dev_priv, reg) & line_mask; 1056 msleep(5); 1057 line2 = intel_de_read(dev_priv, reg) & line_mask; 1058 1059 return line1 != line2; 1060 } 1061 1062 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state) 1063 { 1064 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1065 enum pipe pipe = crtc->pipe; 1066 1067 /* Wait for the display line to settle/start moving */ 1068 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100)) 1069 drm_err(&dev_priv->drm, 1070 "pipe %c scanline %s wait timed out\n", 1071 pipe_name(pipe), onoff(state)); 1072 } 1073 1074 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc) 1075 { 1076 wait_for_pipe_scanline_moving(crtc, false); 1077 } 1078 1079 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) 1080 { 1081 wait_for_pipe_scanline_moving(crtc, true); 1082 } 1083 1084 static void 1085 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 1086 { 1087 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1088 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1089 1090 if (INTEL_GEN(dev_priv) >= 4) { 1091 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 1092 i915_reg_t reg = PIPECONF(cpu_transcoder); 1093 1094 /* Wait for the Pipe State to go off */ 1095 if (intel_de_wait_for_clear(dev_priv, reg, 1096 I965_PIPECONF_ACTIVE, 100)) 1097 drm_WARN(&dev_priv->drm, 1, 1098 "pipe_off wait timed out\n"); 1099 } else { 1100 intel_wait_for_pipe_scanline_stopped(crtc); 1101 } 1102 } 1103 1104 /* Only for pre-ILK configs */ 1105 void assert_pll(struct drm_i915_private *dev_priv, 1106 enum pipe pipe, bool state) 1107 { 1108 u32 val; 1109 bool cur_state; 1110 1111 val = intel_de_read(dev_priv, DPLL(pipe)); 1112 cur_state = !!(val & DPLL_VCO_ENABLE); 1113 I915_STATE_WARN(cur_state != state, 1114 "PLL state assertion failure (expected %s, current %s)\n", 1115 onoff(state), onoff(cur_state)); 1116 } 1117 1118 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1119 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1120 { 1121 u32 val; 1122 bool cur_state; 1123 1124 vlv_cck_get(dev_priv); 1125 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1126 vlv_cck_put(dev_priv); 1127 1128 cur_state = val & DSI_PLL_VCO_EN; 1129 I915_STATE_WARN(cur_state != state, 1130 "DSI PLL state assertion failure (expected %s, current %s)\n", 1131 onoff(state), onoff(cur_state)); 1132 } 1133 1134 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1135 enum pipe pipe, bool state) 1136 { 1137 bool cur_state; 1138 1139 if (HAS_DDI(dev_priv)) { 1140 /* 1141 * DDI does not have a specific FDI_TX register. 1142 * 1143 * FDI is never fed from EDP transcoder 1144 * so pipe->transcoder cast is fine here. 1145 */ 1146 enum transcoder cpu_transcoder = (enum transcoder)pipe; 1147 u32 val = intel_de_read(dev_priv, 1148 TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1149 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1150 } else { 1151 u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe)); 1152 cur_state = !!(val & FDI_TX_ENABLE); 1153 } 1154 I915_STATE_WARN(cur_state != state, 1155 "FDI TX state assertion failure (expected %s, current %s)\n", 1156 onoff(state), onoff(cur_state)); 1157 } 1158 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1159 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1160 1161 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1162 enum pipe pipe, bool state) 1163 { 1164 u32 val; 1165 bool cur_state; 1166 1167 val = intel_de_read(dev_priv, FDI_RX_CTL(pipe)); 1168 cur_state = !!(val & FDI_RX_ENABLE); 1169 I915_STATE_WARN(cur_state != state, 1170 "FDI RX state assertion failure (expected %s, current %s)\n", 1171 onoff(state), onoff(cur_state)); 1172 } 1173 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1174 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1175 1176 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1177 enum pipe pipe) 1178 { 1179 u32 val; 1180 1181 /* ILK FDI PLL is always enabled */ 1182 if (IS_GEN(dev_priv, 5)) 1183 return; 1184 1185 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1186 if (HAS_DDI(dev_priv)) 1187 return; 1188 1189 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe)); 1190 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1191 } 1192 1193 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1194 enum pipe pipe, bool state) 1195 { 1196 u32 val; 1197 bool cur_state; 1198 1199 val = intel_de_read(dev_priv, FDI_RX_CTL(pipe)); 1200 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1201 I915_STATE_WARN(cur_state != state, 1202 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1203 onoff(state), onoff(cur_state)); 1204 } 1205 1206 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) 1207 { 1208 i915_reg_t pp_reg; 1209 u32 val; 1210 enum pipe panel_pipe = INVALID_PIPE; 1211 bool locked = true; 1212 1213 if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv))) 1214 return; 1215 1216 if (HAS_PCH_SPLIT(dev_priv)) { 1217 u32 port_sel; 1218 1219 pp_reg = PP_CONTROL(0); 1220 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1221 1222 switch (port_sel) { 1223 case PANEL_PORT_SELECT_LVDS: 1224 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe); 1225 break; 1226 case PANEL_PORT_SELECT_DPA: 1227 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe); 1228 break; 1229 case PANEL_PORT_SELECT_DPC: 1230 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe); 1231 break; 1232 case PANEL_PORT_SELECT_DPD: 1233 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe); 1234 break; 1235 default: 1236 MISSING_CASE(port_sel); 1237 break; 1238 } 1239 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1240 /* presumably write lock depends on pipe, not port select */ 1241 pp_reg = PP_CONTROL(pipe); 1242 panel_pipe = pipe; 1243 } else { 1244 u32 port_sel; 1245 1246 pp_reg = PP_CONTROL(0); 1247 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1248 1249 drm_WARN_ON(&dev_priv->drm, 1250 port_sel != PANEL_PORT_SELECT_LVDS); 1251 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe); 1252 } 1253 1254 val = intel_de_read(dev_priv, pp_reg); 1255 if (!(val & PANEL_POWER_ON) || 1256 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1257 locked = false; 1258 1259 I915_STATE_WARN(panel_pipe == pipe && locked, 1260 "panel assertion failure, pipe %c regs locked\n", 1261 pipe_name(pipe)); 1262 } 1263 1264 void assert_pipe(struct drm_i915_private *dev_priv, 1265 enum transcoder cpu_transcoder, bool state) 1266 { 1267 bool cur_state; 1268 enum intel_display_power_domain power_domain; 1269 intel_wakeref_t wakeref; 1270 1271 /* we keep both pipes enabled on 830 */ 1272 if (IS_I830(dev_priv)) 1273 state = true; 1274 1275 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 1276 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 1277 if (wakeref) { 1278 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); 1279 cur_state = !!(val & PIPECONF_ENABLE); 1280 1281 intel_display_power_put(dev_priv, power_domain, wakeref); 1282 } else { 1283 cur_state = false; 1284 } 1285 1286 I915_STATE_WARN(cur_state != state, 1287 "transcoder %s assertion failure (expected %s, current %s)\n", 1288 transcoder_name(cpu_transcoder), 1289 onoff(state), onoff(cur_state)); 1290 } 1291 1292 static void assert_plane(struct intel_plane *plane, bool state) 1293 { 1294 enum pipe pipe; 1295 bool cur_state; 1296 1297 cur_state = plane->get_hw_state(plane, &pipe); 1298 1299 I915_STATE_WARN(cur_state != state, 1300 "%s assertion failure (expected %s, current %s)\n", 1301 plane->base.name, onoff(state), onoff(cur_state)); 1302 } 1303 1304 #define assert_plane_enabled(p) assert_plane(p, true) 1305 #define assert_plane_disabled(p) assert_plane(p, false) 1306 1307 static void assert_planes_disabled(struct intel_crtc *crtc) 1308 { 1309 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1310 struct intel_plane *plane; 1311 1312 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 1313 assert_plane_disabled(plane); 1314 } 1315 1316 static void assert_vblank_disabled(struct drm_crtc *crtc) 1317 { 1318 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1319 drm_crtc_vblank_put(crtc); 1320 } 1321 1322 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1323 enum pipe pipe) 1324 { 1325 u32 val; 1326 bool enabled; 1327 1328 val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe)); 1329 enabled = !!(val & TRANS_ENABLE); 1330 I915_STATE_WARN(enabled, 1331 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1332 pipe_name(pipe)); 1333 } 1334 1335 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1336 enum pipe pipe, enum port port, 1337 i915_reg_t dp_reg) 1338 { 1339 enum pipe port_pipe; 1340 bool state; 1341 1342 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); 1343 1344 I915_STATE_WARN(state && port_pipe == pipe, 1345 "PCH DP %c enabled on transcoder %c, should be disabled\n", 1346 port_name(port), pipe_name(pipe)); 1347 1348 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 1349 "IBX PCH DP %c still using transcoder B\n", 1350 port_name(port)); 1351 } 1352 1353 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1354 enum pipe pipe, enum port port, 1355 i915_reg_t hdmi_reg) 1356 { 1357 enum pipe port_pipe; 1358 bool state; 1359 1360 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); 1361 1362 I915_STATE_WARN(state && port_pipe == pipe, 1363 "PCH HDMI %c enabled on transcoder %c, should be disabled\n", 1364 port_name(port), pipe_name(pipe)); 1365 1366 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 1367 "IBX PCH HDMI %c still using transcoder B\n", 1368 port_name(port)); 1369 } 1370 1371 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1372 enum pipe pipe) 1373 { 1374 enum pipe port_pipe; 1375 1376 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); 1377 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); 1378 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); 1379 1380 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && 1381 port_pipe == pipe, 1382 "PCH VGA enabled on transcoder %c, should be disabled\n", 1383 pipe_name(pipe)); 1384 1385 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && 1386 port_pipe == pipe, 1387 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1388 pipe_name(pipe)); 1389 1390 /* PCH SDVOB multiplex with HDMIB */ 1391 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); 1392 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); 1393 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); 1394 } 1395 1396 static void _vlv_enable_pll(struct intel_crtc *crtc, 1397 const struct intel_crtc_state *pipe_config) 1398 { 1399 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1400 enum pipe pipe = crtc->pipe; 1401 1402 intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1403 intel_de_posting_read(dev_priv, DPLL(pipe)); 1404 udelay(150); 1405 1406 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) 1407 drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe); 1408 } 1409 1410 static void vlv_enable_pll(struct intel_crtc *crtc, 1411 const struct intel_crtc_state *pipe_config) 1412 { 1413 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1414 enum pipe pipe = crtc->pipe; 1415 1416 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 1417 1418 /* PLL is protected by panel, make sure we can write it */ 1419 assert_panel_unlocked(dev_priv, pipe); 1420 1421 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1422 _vlv_enable_pll(crtc, pipe_config); 1423 1424 intel_de_write(dev_priv, DPLL_MD(pipe), 1425 pipe_config->dpll_hw_state.dpll_md); 1426 intel_de_posting_read(dev_priv, DPLL_MD(pipe)); 1427 } 1428 1429 1430 static void _chv_enable_pll(struct intel_crtc *crtc, 1431 const struct intel_crtc_state *pipe_config) 1432 { 1433 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1434 enum pipe pipe = crtc->pipe; 1435 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1436 u32 tmp; 1437 1438 vlv_dpio_get(dev_priv); 1439 1440 /* Enable back the 10bit clock to display controller */ 1441 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1442 tmp |= DPIO_DCLKP_EN; 1443 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1444 1445 vlv_dpio_put(dev_priv); 1446 1447 /* 1448 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1449 */ 1450 udelay(1); 1451 1452 /* Enable PLL */ 1453 intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1454 1455 /* Check PLL is locked */ 1456 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) 1457 drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe); 1458 } 1459 1460 static void chv_enable_pll(struct intel_crtc *crtc, 1461 const struct intel_crtc_state *pipe_config) 1462 { 1463 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1464 enum pipe pipe = crtc->pipe; 1465 1466 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 1467 1468 /* PLL is protected by panel, make sure we can write it */ 1469 assert_panel_unlocked(dev_priv, pipe); 1470 1471 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1472 _chv_enable_pll(crtc, pipe_config); 1473 1474 if (pipe != PIPE_A) { 1475 /* 1476 * WaPixelRepeatModeFixForC0:chv 1477 * 1478 * DPLLCMD is AWOL. Use chicken bits to propagate 1479 * the value from DPLLBMD to either pipe B or C. 1480 */ 1481 intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); 1482 intel_de_write(dev_priv, DPLL_MD(PIPE_B), 1483 pipe_config->dpll_hw_state.dpll_md); 1484 intel_de_write(dev_priv, CBR4_VLV, 0); 1485 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; 1486 1487 /* 1488 * DPLLB VGA mode also seems to cause problems. 1489 * We should always have it disabled. 1490 */ 1491 drm_WARN_ON(&dev_priv->drm, 1492 (intel_de_read(dev_priv, DPLL(PIPE_B)) & 1493 DPLL_VGA_MODE_DIS) == 0); 1494 } else { 1495 intel_de_write(dev_priv, DPLL_MD(pipe), 1496 pipe_config->dpll_hw_state.dpll_md); 1497 intel_de_posting_read(dev_priv, DPLL_MD(pipe)); 1498 } 1499 } 1500 1501 static bool i9xx_has_pps(struct drm_i915_private *dev_priv) 1502 { 1503 if (IS_I830(dev_priv)) 1504 return false; 1505 1506 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 1507 } 1508 1509 static void i9xx_enable_pll(struct intel_crtc *crtc, 1510 const struct intel_crtc_state *crtc_state) 1511 { 1512 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1513 i915_reg_t reg = DPLL(crtc->pipe); 1514 u32 dpll = crtc_state->dpll_hw_state.dpll; 1515 int i; 1516 1517 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); 1518 1519 /* PLL is protected by panel, make sure we can write it */ 1520 if (i9xx_has_pps(dev_priv)) 1521 assert_panel_unlocked(dev_priv, crtc->pipe); 1522 1523 /* 1524 * Apparently we need to have VGA mode enabled prior to changing 1525 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1526 * dividers, even though the register value does change. 1527 */ 1528 intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS); 1529 intel_de_write(dev_priv, reg, dpll); 1530 1531 /* Wait for the clocks to stabilize. */ 1532 intel_de_posting_read(dev_priv, reg); 1533 udelay(150); 1534 1535 if (INTEL_GEN(dev_priv) >= 4) { 1536 intel_de_write(dev_priv, DPLL_MD(crtc->pipe), 1537 crtc_state->dpll_hw_state.dpll_md); 1538 } else { 1539 /* The pixel multiplier can only be updated once the 1540 * DPLL is enabled and the clocks are stable. 1541 * 1542 * So write it again. 1543 */ 1544 intel_de_write(dev_priv, reg, dpll); 1545 } 1546 1547 /* We do this three times for luck */ 1548 for (i = 0; i < 3; i++) { 1549 intel_de_write(dev_priv, reg, dpll); 1550 intel_de_posting_read(dev_priv, reg); 1551 udelay(150); /* wait for warmup */ 1552 } 1553 } 1554 1555 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) 1556 { 1557 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1558 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1559 enum pipe pipe = crtc->pipe; 1560 1561 /* Don't disable pipe or pipe PLLs if needed */ 1562 if (IS_I830(dev_priv)) 1563 return; 1564 1565 /* Make sure the pipe isn't still relying on us */ 1566 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); 1567 1568 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS); 1569 intel_de_posting_read(dev_priv, DPLL(pipe)); 1570 } 1571 1572 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1573 { 1574 u32 val; 1575 1576 /* Make sure the pipe isn't still relying on us */ 1577 assert_pipe_disabled(dev_priv, (enum transcoder)pipe); 1578 1579 val = DPLL_INTEGRATED_REF_CLK_VLV | 1580 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1581 if (pipe != PIPE_A) 1582 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1583 1584 intel_de_write(dev_priv, DPLL(pipe), val); 1585 intel_de_posting_read(dev_priv, DPLL(pipe)); 1586 } 1587 1588 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1589 { 1590 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1591 u32 val; 1592 1593 /* Make sure the pipe isn't still relying on us */ 1594 assert_pipe_disabled(dev_priv, (enum transcoder)pipe); 1595 1596 val = DPLL_SSC_REF_CLK_CHV | 1597 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1598 if (pipe != PIPE_A) 1599 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1600 1601 intel_de_write(dev_priv, DPLL(pipe), val); 1602 intel_de_posting_read(dev_priv, DPLL(pipe)); 1603 1604 vlv_dpio_get(dev_priv); 1605 1606 /* Disable 10bit clock to display controller */ 1607 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1608 val &= ~DPIO_DCLKP_EN; 1609 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1610 1611 vlv_dpio_put(dev_priv); 1612 } 1613 1614 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1615 struct intel_digital_port *dig_port, 1616 unsigned int expected_mask) 1617 { 1618 u32 port_mask; 1619 i915_reg_t dpll_reg; 1620 1621 switch (dig_port->base.port) { 1622 case PORT_B: 1623 port_mask = DPLL_PORTB_READY_MASK; 1624 dpll_reg = DPLL(0); 1625 break; 1626 case PORT_C: 1627 port_mask = DPLL_PORTC_READY_MASK; 1628 dpll_reg = DPLL(0); 1629 expected_mask <<= 4; 1630 break; 1631 case PORT_D: 1632 port_mask = DPLL_PORTD_READY_MASK; 1633 dpll_reg = DPIO_PHY_STATUS; 1634 break; 1635 default: 1636 BUG(); 1637 } 1638 1639 if (intel_de_wait_for_register(dev_priv, dpll_reg, 1640 port_mask, expected_mask, 1000)) 1641 drm_WARN(&dev_priv->drm, 1, 1642 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", 1643 dig_port->base.base.base.id, dig_port->base.base.name, 1644 intel_de_read(dev_priv, dpll_reg) & port_mask, 1645 expected_mask); 1646 } 1647 1648 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) 1649 { 1650 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1651 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1652 enum pipe pipe = crtc->pipe; 1653 i915_reg_t reg; 1654 u32 val, pipeconf_val; 1655 1656 /* Make sure PCH DPLL is enabled */ 1657 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); 1658 1659 /* FDI must be feeding us bits for PCH ports */ 1660 assert_fdi_tx_enabled(dev_priv, pipe); 1661 assert_fdi_rx_enabled(dev_priv, pipe); 1662 1663 if (HAS_PCH_CPT(dev_priv)) { 1664 reg = TRANS_CHICKEN2(pipe); 1665 val = intel_de_read(dev_priv, reg); 1666 /* 1667 * Workaround: Set the timing override bit 1668 * before enabling the pch transcoder. 1669 */ 1670 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1671 /* Configure frame start delay to match the CPU */ 1672 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 1673 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); 1674 intel_de_write(dev_priv, reg, val); 1675 } 1676 1677 reg = PCH_TRANSCONF(pipe); 1678 val = intel_de_read(dev_priv, reg); 1679 pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe)); 1680 1681 if (HAS_PCH_IBX(dev_priv)) { 1682 /* Configure frame start delay to match the CPU */ 1683 val &= ~TRANS_FRAME_START_DELAY_MASK; 1684 val |= TRANS_FRAME_START_DELAY(0); 1685 1686 /* 1687 * Make the BPC in transcoder be consistent with 1688 * that in pipeconf reg. For HDMI we must use 8bpc 1689 * here for both 8bpc and 12bpc. 1690 */ 1691 val &= ~PIPECONF_BPC_MASK; 1692 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 1693 val |= PIPECONF_8BPC; 1694 else 1695 val |= pipeconf_val & PIPECONF_BPC_MASK; 1696 } 1697 1698 val &= ~TRANS_INTERLACE_MASK; 1699 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { 1700 if (HAS_PCH_IBX(dev_priv) && 1701 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 1702 val |= TRANS_LEGACY_INTERLACED_ILK; 1703 else 1704 val |= TRANS_INTERLACED; 1705 } else { 1706 val |= TRANS_PROGRESSIVE; 1707 } 1708 1709 intel_de_write(dev_priv, reg, val | TRANS_ENABLE); 1710 if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100)) 1711 drm_err(&dev_priv->drm, "failed to enable transcoder %c\n", 1712 pipe_name(pipe)); 1713 } 1714 1715 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1716 enum transcoder cpu_transcoder) 1717 { 1718 u32 val, pipeconf_val; 1719 1720 /* FDI must be feeding us bits for PCH ports */ 1721 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); 1722 assert_fdi_rx_enabled(dev_priv, PIPE_A); 1723 1724 val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); 1725 /* Workaround: set timing override bit. */ 1726 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1727 /* Configure frame start delay to match the CPU */ 1728 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 1729 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); 1730 intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); 1731 1732 val = TRANS_ENABLE; 1733 pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); 1734 1735 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 1736 PIPECONF_INTERLACED_ILK) 1737 val |= TRANS_INTERLACED; 1738 else 1739 val |= TRANS_PROGRESSIVE; 1740 1741 intel_de_write(dev_priv, LPT_TRANSCONF, val); 1742 if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, 1743 TRANS_STATE_ENABLE, 100)) 1744 drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n"); 1745 } 1746 1747 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1748 enum pipe pipe) 1749 { 1750 i915_reg_t reg; 1751 u32 val; 1752 1753 /* FDI relies on the transcoder */ 1754 assert_fdi_tx_disabled(dev_priv, pipe); 1755 assert_fdi_rx_disabled(dev_priv, pipe); 1756 1757 /* Ports must be off as well */ 1758 assert_pch_ports_disabled(dev_priv, pipe); 1759 1760 reg = PCH_TRANSCONF(pipe); 1761 val = intel_de_read(dev_priv, reg); 1762 val &= ~TRANS_ENABLE; 1763 intel_de_write(dev_priv, reg, val); 1764 /* wait for PCH transcoder off, transcoder state */ 1765 if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) 1766 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n", 1767 pipe_name(pipe)); 1768 1769 if (HAS_PCH_CPT(dev_priv)) { 1770 /* Workaround: Clear the timing override chicken bit again. */ 1771 reg = TRANS_CHICKEN2(pipe); 1772 val = intel_de_read(dev_priv, reg); 1773 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1774 intel_de_write(dev_priv, reg, val); 1775 } 1776 } 1777 1778 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 1779 { 1780 u32 val; 1781 1782 val = intel_de_read(dev_priv, LPT_TRANSCONF); 1783 val &= ~TRANS_ENABLE; 1784 intel_de_write(dev_priv, LPT_TRANSCONF, val); 1785 /* wait for PCH transcoder off, transcoder state */ 1786 if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, 1787 TRANS_STATE_ENABLE, 50)) 1788 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n"); 1789 1790 /* Workaround: clear timing override bit. */ 1791 val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); 1792 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1793 intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); 1794 } 1795 1796 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) 1797 { 1798 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1799 1800 if (HAS_PCH_LPT(dev_priv)) 1801 return PIPE_A; 1802 else 1803 return crtc->pipe; 1804 } 1805 1806 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state) 1807 { 1808 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1809 1810 /* 1811 * On i965gm the hardware frame counter reads 1812 * zero when the TV encoder is enabled :( 1813 */ 1814 if (IS_I965GM(dev_priv) && 1815 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT))) 1816 return 0; 1817 1818 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1819 return 0xffffffff; /* full 32 bit counter */ 1820 else if (INTEL_GEN(dev_priv) >= 3) 1821 return 0xffffff; /* only 24 bits of frame count */ 1822 else 1823 return 0; /* Gen2 doesn't have a hardware frame counter */ 1824 } 1825 1826 void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) 1827 { 1828 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1829 1830 assert_vblank_disabled(&crtc->base); 1831 drm_crtc_set_max_vblank_count(&crtc->base, 1832 intel_crtc_max_vblank_count(crtc_state)); 1833 drm_crtc_vblank_on(&crtc->base); 1834 } 1835 1836 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state) 1837 { 1838 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1839 1840 drm_crtc_vblank_off(&crtc->base); 1841 assert_vblank_disabled(&crtc->base); 1842 } 1843 1844 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) 1845 { 1846 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 1847 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1848 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 1849 enum pipe pipe = crtc->pipe; 1850 i915_reg_t reg; 1851 u32 val; 1852 1853 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe)); 1854 1855 assert_planes_disabled(crtc); 1856 1857 /* 1858 * A pipe without a PLL won't actually be able to drive bits from 1859 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1860 * need the check. 1861 */ 1862 if (HAS_GMCH(dev_priv)) { 1863 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 1864 assert_dsi_pll_enabled(dev_priv); 1865 else 1866 assert_pll_enabled(dev_priv, pipe); 1867 } else { 1868 if (new_crtc_state->has_pch_encoder) { 1869 /* if driving the PCH, we need FDI enabled */ 1870 assert_fdi_rx_pll_enabled(dev_priv, 1871 intel_crtc_pch_transcoder(crtc)); 1872 assert_fdi_tx_pll_enabled(dev_priv, 1873 (enum pipe) cpu_transcoder); 1874 } 1875 /* FIXME: assert CPU port conditions for SNB+ */ 1876 } 1877 1878 trace_intel_pipe_enable(crtc); 1879 1880 reg = PIPECONF(cpu_transcoder); 1881 val = intel_de_read(dev_priv, reg); 1882 if (val & PIPECONF_ENABLE) { 1883 /* we keep both pipes enabled on 830 */ 1884 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); 1885 return; 1886 } 1887 1888 intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE); 1889 intel_de_posting_read(dev_priv, reg); 1890 1891 /* 1892 * Until the pipe starts PIPEDSL reads will return a stale value, 1893 * which causes an apparent vblank timestamp jump when PIPEDSL 1894 * resets to its proper value. That also messes up the frame count 1895 * when it's derived from the timestamps. So let's wait for the 1896 * pipe to start properly before we call drm_crtc_vblank_on() 1897 */ 1898 if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 1899 intel_wait_for_pipe_scanline_moving(crtc); 1900 } 1901 1902 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) 1903 { 1904 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1905 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1906 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 1907 enum pipe pipe = crtc->pipe; 1908 i915_reg_t reg; 1909 u32 val; 1910 1911 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe)); 1912 1913 /* 1914 * Make sure planes won't keep trying to pump pixels to us, 1915 * or we might hang the display. 1916 */ 1917 assert_planes_disabled(crtc); 1918 1919 trace_intel_pipe_disable(crtc); 1920 1921 reg = PIPECONF(cpu_transcoder); 1922 val = intel_de_read(dev_priv, reg); 1923 if ((val & PIPECONF_ENABLE) == 0) 1924 return; 1925 1926 /* 1927 * Double wide has implications for planes 1928 * so best keep it disabled when not needed. 1929 */ 1930 if (old_crtc_state->double_wide) 1931 val &= ~PIPECONF_DOUBLE_WIDE; 1932 1933 /* Don't disable pipe or pipe PLLs if needed */ 1934 if (!IS_I830(dev_priv)) 1935 val &= ~PIPECONF_ENABLE; 1936 1937 intel_de_write(dev_priv, reg, val); 1938 if ((val & PIPECONF_ENABLE) == 0) 1939 intel_wait_for_pipe_off(old_crtc_state); 1940 } 1941 1942 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) 1943 { 1944 return IS_GEN(dev_priv, 2) ? 2048 : 4096; 1945 } 1946 1947 static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane) 1948 { 1949 if (!is_ccs_modifier(fb->modifier)) 1950 return false; 1951 1952 return plane >= fb->format->num_planes / 2; 1953 } 1954 1955 static bool is_gen12_ccs_modifier(u64 modifier) 1956 { 1957 return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || 1958 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; 1959 1960 } 1961 1962 static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane) 1963 { 1964 return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane); 1965 } 1966 1967 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane) 1968 { 1969 if (is_ccs_modifier(fb->modifier)) 1970 return is_ccs_plane(fb, plane); 1971 1972 return plane == 1; 1973 } 1974 1975 static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane) 1976 { 1977 drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || 1978 (main_plane && main_plane >= fb->format->num_planes / 2)); 1979 1980 return fb->format->num_planes / 2 + main_plane; 1981 } 1982 1983 static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) 1984 { 1985 drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || 1986 ccs_plane < fb->format->num_planes / 2); 1987 1988 return ccs_plane - fb->format->num_planes / 2; 1989 } 1990 1991 /* Return either the main plane's CCS or - if not a CCS FB - UV plane */ 1992 int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane) 1993 { 1994 if (is_ccs_modifier(fb->modifier)) 1995 return main_to_ccs_plane(fb, main_plane); 1996 1997 return 1; 1998 } 1999 2000 bool 2001 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, 2002 uint64_t modifier) 2003 { 2004 return info->is_yuv && 2005 info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2); 2006 } 2007 2008 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, 2009 int color_plane) 2010 { 2011 return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && 2012 color_plane == 1; 2013 } 2014 2015 static unsigned int 2016 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) 2017 { 2018 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2019 unsigned int cpp = fb->format->cpp[color_plane]; 2020 2021 switch (fb->modifier) { 2022 case DRM_FORMAT_MOD_LINEAR: 2023 return intel_tile_size(dev_priv); 2024 case I915_FORMAT_MOD_X_TILED: 2025 if (IS_GEN(dev_priv, 2)) 2026 return 128; 2027 else 2028 return 512; 2029 case I915_FORMAT_MOD_Y_TILED_CCS: 2030 if (is_ccs_plane(fb, color_plane)) 2031 return 128; 2032 fallthrough; 2033 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 2034 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 2035 if (is_ccs_plane(fb, color_plane)) 2036 return 64; 2037 fallthrough; 2038 case I915_FORMAT_MOD_Y_TILED: 2039 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv)) 2040 return 128; 2041 else 2042 return 512; 2043 case I915_FORMAT_MOD_Yf_TILED_CCS: 2044 if (is_ccs_plane(fb, color_plane)) 2045 return 128; 2046 fallthrough; 2047 case I915_FORMAT_MOD_Yf_TILED: 2048 switch (cpp) { 2049 case 1: 2050 return 64; 2051 case 2: 2052 case 4: 2053 return 128; 2054 case 8: 2055 case 16: 2056 return 256; 2057 default: 2058 MISSING_CASE(cpp); 2059 return cpp; 2060 } 2061 break; 2062 default: 2063 MISSING_CASE(fb->modifier); 2064 return cpp; 2065 } 2066 } 2067 2068 static unsigned int 2069 intel_tile_height(const struct drm_framebuffer *fb, int color_plane) 2070 { 2071 if (is_gen12_ccs_plane(fb, color_plane)) 2072 return 1; 2073 2074 return intel_tile_size(to_i915(fb->dev)) / 2075 intel_tile_width_bytes(fb, color_plane); 2076 } 2077 2078 /* Return the tile dimensions in pixel units */ 2079 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane, 2080 unsigned int *tile_width, 2081 unsigned int *tile_height) 2082 { 2083 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane); 2084 unsigned int cpp = fb->format->cpp[color_plane]; 2085 2086 *tile_width = tile_width_bytes / cpp; 2087 *tile_height = intel_tile_height(fb, color_plane); 2088 } 2089 2090 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, 2091 int color_plane) 2092 { 2093 unsigned int tile_width, tile_height; 2094 2095 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2096 2097 return fb->pitches[color_plane] * tile_height; 2098 } 2099 2100 unsigned int 2101 intel_fb_align_height(const struct drm_framebuffer *fb, 2102 int color_plane, unsigned int height) 2103 { 2104 unsigned int tile_height = intel_tile_height(fb, color_plane); 2105 2106 return ALIGN(height, tile_height); 2107 } 2108 2109 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 2110 { 2111 unsigned int size = 0; 2112 int i; 2113 2114 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 2115 size += rot_info->plane[i].width * rot_info->plane[i].height; 2116 2117 return size; 2118 } 2119 2120 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info) 2121 { 2122 unsigned int size = 0; 2123 int i; 2124 2125 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) 2126 size += rem_info->plane[i].width * rem_info->plane[i].height; 2127 2128 return size; 2129 } 2130 2131 static void 2132 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, 2133 const struct drm_framebuffer *fb, 2134 unsigned int rotation) 2135 { 2136 view->type = I915_GGTT_VIEW_NORMAL; 2137 if (drm_rotation_90_or_270(rotation)) { 2138 view->type = I915_GGTT_VIEW_ROTATED; 2139 view->rotated = to_intel_framebuffer(fb)->rot_info; 2140 } 2141 } 2142 2143 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv) 2144 { 2145 if (IS_I830(dev_priv)) 2146 return 16 * 1024; 2147 else if (IS_I85X(dev_priv)) 2148 return 256; 2149 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) 2150 return 32; 2151 else 2152 return 4 * 1024; 2153 } 2154 2155 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) 2156 { 2157 if (INTEL_GEN(dev_priv) >= 9) 2158 return 256 * 1024; 2159 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || 2160 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2161 return 128 * 1024; 2162 else if (INTEL_GEN(dev_priv) >= 4) 2163 return 4 * 1024; 2164 else 2165 return 0; 2166 } 2167 2168 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, 2169 int color_plane) 2170 { 2171 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2172 2173 /* AUX_DIST needs only 4K alignment */ 2174 if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) || 2175 is_ccs_plane(fb, color_plane)) 2176 return 4096; 2177 2178 switch (fb->modifier) { 2179 case DRM_FORMAT_MOD_LINEAR: 2180 return intel_linear_alignment(dev_priv); 2181 case I915_FORMAT_MOD_X_TILED: 2182 if (INTEL_GEN(dev_priv) >= 9) 2183 return 256 * 1024; 2184 return 0; 2185 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 2186 if (is_semiplanar_uv_plane(fb, color_plane)) 2187 return intel_tile_row_size(fb, color_plane); 2188 fallthrough; 2189 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 2190 return 16 * 1024; 2191 case I915_FORMAT_MOD_Y_TILED_CCS: 2192 case I915_FORMAT_MOD_Yf_TILED_CCS: 2193 case I915_FORMAT_MOD_Y_TILED: 2194 if (INTEL_GEN(dev_priv) >= 12 && 2195 is_semiplanar_uv_plane(fb, color_plane)) 2196 return intel_tile_row_size(fb, color_plane); 2197 fallthrough; 2198 case I915_FORMAT_MOD_Yf_TILED: 2199 return 1 * 1024 * 1024; 2200 default: 2201 MISSING_CASE(fb->modifier); 2202 return 0; 2203 } 2204 } 2205 2206 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) 2207 { 2208 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 2209 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 2210 2211 return INTEL_GEN(dev_priv) < 4 || 2212 (plane->has_fbc && 2213 plane_state->view.type == I915_GGTT_VIEW_NORMAL); 2214 } 2215 2216 struct i915_vma * 2217 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 2218 const struct i915_ggtt_view *view, 2219 bool uses_fence, 2220 unsigned long *out_flags) 2221 { 2222 struct drm_device *dev = fb->dev; 2223 struct drm_i915_private *dev_priv = to_i915(dev); 2224 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2225 intel_wakeref_t wakeref; 2226 struct i915_vma *vma; 2227 unsigned int pinctl; 2228 u32 alignment; 2229 2230 if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj))) 2231 return ERR_PTR(-EINVAL); 2232 2233 alignment = intel_surf_alignment(fb, 0); 2234 if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment))) 2235 return ERR_PTR(-EINVAL); 2236 2237 /* Note that the w/a also requires 64 PTE of padding following the 2238 * bo. We currently fill all unused PTE with the shadow page and so 2239 * we should always have valid PTE following the scanout preventing 2240 * the VT-d warning. 2241 */ 2242 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) 2243 alignment = 256 * 1024; 2244 2245 /* 2246 * Global gtt pte registers are special registers which actually forward 2247 * writes to a chunk of system memory. Which means that there is no risk 2248 * that the register values disappear as soon as we call 2249 * intel_runtime_pm_put(), so it is correct to wrap only the 2250 * pin/unpin/fence and not more. 2251 */ 2252 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2253 2254 atomic_inc(&dev_priv->gpu_error.pending_fb_pin); 2255 2256 /* 2257 * Valleyview is definitely limited to scanning out the first 2258 * 512MiB. Lets presume this behaviour was inherited from the 2259 * g4x display engine and that all earlier gen are similarly 2260 * limited. Testing suggests that it is a little more 2261 * complicated than this. For example, Cherryview appears quite 2262 * happy to scanout from anywhere within its global aperture. 2263 */ 2264 pinctl = 0; 2265 if (HAS_GMCH(dev_priv)) 2266 pinctl |= PIN_MAPPABLE; 2267 2268 vma = i915_gem_object_pin_to_display_plane(obj, 2269 alignment, view, pinctl); 2270 if (IS_ERR(vma)) 2271 goto err; 2272 2273 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) { 2274 int ret; 2275 2276 /* 2277 * Install a fence for tiled scan-out. Pre-i965 always needs a 2278 * fence, whereas 965+ only requires a fence if using 2279 * framebuffer compression. For simplicity, we always, when 2280 * possible, install a fence as the cost is not that onerous. 2281 * 2282 * If we fail to fence the tiled scanout, then either the 2283 * modeset will reject the change (which is highly unlikely as 2284 * the affected systems, all but one, do not have unmappable 2285 * space) or we will not be able to enable full powersaving 2286 * techniques (also likely not to apply due to various limits 2287 * FBC and the like impose on the size of the buffer, which 2288 * presumably we violated anyway with this unmappable buffer). 2289 * Anyway, it is presumably better to stumble onwards with 2290 * something and try to run the system in a "less than optimal" 2291 * mode that matches the user configuration. 2292 */ 2293 ret = i915_vma_pin_fence(vma); 2294 if (ret != 0 && INTEL_GEN(dev_priv) < 4) { 2295 i915_gem_object_unpin_from_display_plane(vma); 2296 vma = ERR_PTR(ret); 2297 goto err; 2298 } 2299 2300 if (ret == 0 && vma->fence) 2301 *out_flags |= PLANE_HAS_FENCE; 2302 } 2303 2304 i915_vma_get(vma); 2305 err: 2306 atomic_dec(&dev_priv->gpu_error.pending_fb_pin); 2307 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2308 return vma; 2309 } 2310 2311 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) 2312 { 2313 i915_gem_object_lock(vma->obj); 2314 if (flags & PLANE_HAS_FENCE) 2315 i915_vma_unpin_fence(vma); 2316 i915_gem_object_unpin_from_display_plane(vma); 2317 i915_gem_object_unlock(vma->obj); 2318 2319 i915_vma_put(vma); 2320 } 2321 2322 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane, 2323 unsigned int rotation) 2324 { 2325 if (drm_rotation_90_or_270(rotation)) 2326 return to_intel_framebuffer(fb)->rotated[color_plane].pitch; 2327 else 2328 return fb->pitches[color_plane]; 2329 } 2330 2331 /* 2332 * Convert the x/y offsets into a linear offset. 2333 * Only valid with 0/180 degree rotation, which is fine since linear 2334 * offset is only used with linear buffers on pre-hsw and tiled buffers 2335 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 2336 */ 2337 u32 intel_fb_xy_to_linear(int x, int y, 2338 const struct intel_plane_state *state, 2339 int color_plane) 2340 { 2341 const struct drm_framebuffer *fb = state->hw.fb; 2342 unsigned int cpp = fb->format->cpp[color_plane]; 2343 unsigned int pitch = state->color_plane[color_plane].stride; 2344 2345 return y * pitch + x * cpp; 2346 } 2347 2348 /* 2349 * Add the x/y offsets derived from fb->offsets[] to the user 2350 * specified plane src x/y offsets. The resulting x/y offsets 2351 * specify the start of scanout from the beginning of the gtt mapping. 2352 */ 2353 void intel_add_fb_offsets(int *x, int *y, 2354 const struct intel_plane_state *state, 2355 int color_plane) 2356 2357 { 2358 *x += state->color_plane[color_plane].x; 2359 *y += state->color_plane[color_plane].y; 2360 } 2361 2362 static u32 intel_adjust_tile_offset(int *x, int *y, 2363 unsigned int tile_width, 2364 unsigned int tile_height, 2365 unsigned int tile_size, 2366 unsigned int pitch_tiles, 2367 u32 old_offset, 2368 u32 new_offset) 2369 { 2370 unsigned int pitch_pixels = pitch_tiles * tile_width; 2371 unsigned int tiles; 2372 2373 WARN_ON(old_offset & (tile_size - 1)); 2374 WARN_ON(new_offset & (tile_size - 1)); 2375 WARN_ON(new_offset > old_offset); 2376 2377 tiles = (old_offset - new_offset) / tile_size; 2378 2379 *y += tiles / pitch_tiles * tile_height; 2380 *x += tiles % pitch_tiles * tile_width; 2381 2382 /* minimize x in case it got needlessly big */ 2383 *y += *x / pitch_pixels * tile_height; 2384 *x %= pitch_pixels; 2385 2386 return new_offset; 2387 } 2388 2389 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane) 2390 { 2391 return fb->modifier == DRM_FORMAT_MOD_LINEAR || 2392 is_gen12_ccs_plane(fb, color_plane); 2393 } 2394 2395 static u32 intel_adjust_aligned_offset(int *x, int *y, 2396 const struct drm_framebuffer *fb, 2397 int color_plane, 2398 unsigned int rotation, 2399 unsigned int pitch, 2400 u32 old_offset, u32 new_offset) 2401 { 2402 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2403 unsigned int cpp = fb->format->cpp[color_plane]; 2404 2405 drm_WARN_ON(&dev_priv->drm, new_offset > old_offset); 2406 2407 if (!is_surface_linear(fb, color_plane)) { 2408 unsigned int tile_size, tile_width, tile_height; 2409 unsigned int pitch_tiles; 2410 2411 tile_size = intel_tile_size(dev_priv); 2412 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2413 2414 if (drm_rotation_90_or_270(rotation)) { 2415 pitch_tiles = pitch / tile_height; 2416 swap(tile_width, tile_height); 2417 } else { 2418 pitch_tiles = pitch / (tile_width * cpp); 2419 } 2420 2421 intel_adjust_tile_offset(x, y, tile_width, tile_height, 2422 tile_size, pitch_tiles, 2423 old_offset, new_offset); 2424 } else { 2425 old_offset += *y * pitch + *x * cpp; 2426 2427 *y = (old_offset - new_offset) / pitch; 2428 *x = ((old_offset - new_offset) - *y * pitch) / cpp; 2429 } 2430 2431 return new_offset; 2432 } 2433 2434 /* 2435 * Adjust the tile offset by moving the difference into 2436 * the x/y offsets. 2437 */ 2438 static u32 intel_plane_adjust_aligned_offset(int *x, int *y, 2439 const struct intel_plane_state *state, 2440 int color_plane, 2441 u32 old_offset, u32 new_offset) 2442 { 2443 return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane, 2444 state->hw.rotation, 2445 state->color_plane[color_plane].stride, 2446 old_offset, new_offset); 2447 } 2448 2449 /* 2450 * Computes the aligned offset to the base tile and adjusts 2451 * x, y. bytes per pixel is assumed to be a power-of-two. 2452 * 2453 * In the 90/270 rotated case, x and y are assumed 2454 * to be already rotated to match the rotated GTT view, and 2455 * pitch is the tile_height aligned framebuffer height. 2456 * 2457 * This function is used when computing the derived information 2458 * under intel_framebuffer, so using any of that information 2459 * here is not allowed. Anything under drm_framebuffer can be 2460 * used. This is why the user has to pass in the pitch since it 2461 * is specified in the rotated orientation. 2462 */ 2463 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv, 2464 int *x, int *y, 2465 const struct drm_framebuffer *fb, 2466 int color_plane, 2467 unsigned int pitch, 2468 unsigned int rotation, 2469 u32 alignment) 2470 { 2471 unsigned int cpp = fb->format->cpp[color_plane]; 2472 u32 offset, offset_aligned; 2473 2474 if (!is_surface_linear(fb, color_plane)) { 2475 unsigned int tile_size, tile_width, tile_height; 2476 unsigned int tile_rows, tiles, pitch_tiles; 2477 2478 tile_size = intel_tile_size(dev_priv); 2479 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2480 2481 if (drm_rotation_90_or_270(rotation)) { 2482 pitch_tiles = pitch / tile_height; 2483 swap(tile_width, tile_height); 2484 } else { 2485 pitch_tiles = pitch / (tile_width * cpp); 2486 } 2487 2488 tile_rows = *y / tile_height; 2489 *y %= tile_height; 2490 2491 tiles = *x / tile_width; 2492 *x %= tile_width; 2493 2494 offset = (tile_rows * pitch_tiles + tiles) * tile_size; 2495 2496 offset_aligned = offset; 2497 if (alignment) 2498 offset_aligned = rounddown(offset_aligned, alignment); 2499 2500 intel_adjust_tile_offset(x, y, tile_width, tile_height, 2501 tile_size, pitch_tiles, 2502 offset, offset_aligned); 2503 } else { 2504 offset = *y * pitch + *x * cpp; 2505 offset_aligned = offset; 2506 if (alignment) { 2507 offset_aligned = rounddown(offset_aligned, alignment); 2508 *y = (offset % alignment) / pitch; 2509 *x = ((offset % alignment) - *y * pitch) / cpp; 2510 } else { 2511 *y = *x = 0; 2512 } 2513 } 2514 2515 return offset_aligned; 2516 } 2517 2518 static u32 intel_plane_compute_aligned_offset(int *x, int *y, 2519 const struct intel_plane_state *state, 2520 int color_plane) 2521 { 2522 struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane); 2523 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 2524 const struct drm_framebuffer *fb = state->hw.fb; 2525 unsigned int rotation = state->hw.rotation; 2526 int pitch = state->color_plane[color_plane].stride; 2527 u32 alignment; 2528 2529 if (intel_plane->id == PLANE_CURSOR) 2530 alignment = intel_cursor_alignment(dev_priv); 2531 else 2532 alignment = intel_surf_alignment(fb, color_plane); 2533 2534 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane, 2535 pitch, rotation, alignment); 2536 } 2537 2538 /* Convert the fb->offset[] into x/y offsets */ 2539 static int intel_fb_offset_to_xy(int *x, int *y, 2540 const struct drm_framebuffer *fb, 2541 int color_plane) 2542 { 2543 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2544 unsigned int height; 2545 u32 alignment; 2546 2547 if (INTEL_GEN(dev_priv) >= 12 && 2548 is_semiplanar_uv_plane(fb, color_plane)) 2549 alignment = intel_tile_row_size(fb, color_plane); 2550 else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) 2551 alignment = intel_tile_size(dev_priv); 2552 else 2553 alignment = 0; 2554 2555 if (alignment != 0 && fb->offsets[color_plane] % alignment) { 2556 drm_dbg_kms(&dev_priv->drm, 2557 "Misaligned offset 0x%08x for color plane %d\n", 2558 fb->offsets[color_plane], color_plane); 2559 return -EINVAL; 2560 } 2561 2562 height = drm_framebuffer_plane_height(fb->height, fb, color_plane); 2563 height = ALIGN(height, intel_tile_height(fb, color_plane)); 2564 2565 /* Catch potential overflows early */ 2566 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]), 2567 fb->offsets[color_plane])) { 2568 drm_dbg_kms(&dev_priv->drm, 2569 "Bad offset 0x%08x or pitch %d for color plane %d\n", 2570 fb->offsets[color_plane], fb->pitches[color_plane], 2571 color_plane); 2572 return -ERANGE; 2573 } 2574 2575 *x = 0; 2576 *y = 0; 2577 2578 intel_adjust_aligned_offset(x, y, 2579 fb, color_plane, DRM_MODE_ROTATE_0, 2580 fb->pitches[color_plane], 2581 fb->offsets[color_plane], 0); 2582 2583 return 0; 2584 } 2585 2586 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) 2587 { 2588 switch (fb_modifier) { 2589 case I915_FORMAT_MOD_X_TILED: 2590 return I915_TILING_X; 2591 case I915_FORMAT_MOD_Y_TILED: 2592 case I915_FORMAT_MOD_Y_TILED_CCS: 2593 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 2594 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 2595 return I915_TILING_Y; 2596 default: 2597 return I915_TILING_NONE; 2598 } 2599 } 2600 2601 /* 2602 * From the Sky Lake PRM: 2603 * "The Color Control Surface (CCS) contains the compression status of 2604 * the cache-line pairs. The compression state of the cache-line pair 2605 * is specified by 2 bits in the CCS. Each CCS cache-line represents 2606 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled 2607 * cache-line-pairs. CCS is always Y tiled." 2608 * 2609 * Since cache line pairs refers to horizontally adjacent cache lines, 2610 * each cache line in the CCS corresponds to an area of 32x16 cache 2611 * lines on the main surface. Since each pixel is 4 bytes, this gives 2612 * us a ratio of one byte in the CCS for each 8x16 pixels in the 2613 * main surface. 2614 */ 2615 static const struct drm_format_info skl_ccs_formats[] = { 2616 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 2617 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2618 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 2619 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2620 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 2621 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 2622 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 2623 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 2624 }; 2625 2626 /* 2627 * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the 2628 * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles 2629 * in the main surface. With 4 byte pixels and each Y-tile having dimensions of 2630 * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in 2631 * the main surface. 2632 */ 2633 static const struct drm_format_info gen12_ccs_formats[] = { 2634 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 2635 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2636 .hsub = 1, .vsub = 1, }, 2637 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 2638 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2639 .hsub = 1, .vsub = 1, }, 2640 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 2641 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2642 .hsub = 1, .vsub = 1, .has_alpha = true }, 2643 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 2644 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2645 .hsub = 1, .vsub = 1, .has_alpha = true }, 2646 { .format = DRM_FORMAT_YUYV, .num_planes = 2, 2647 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2648 .hsub = 2, .vsub = 1, .is_yuv = true }, 2649 { .format = DRM_FORMAT_YVYU, .num_planes = 2, 2650 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2651 .hsub = 2, .vsub = 1, .is_yuv = true }, 2652 { .format = DRM_FORMAT_UYVY, .num_planes = 2, 2653 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2654 .hsub = 2, .vsub = 1, .is_yuv = true }, 2655 { .format = DRM_FORMAT_VYUY, .num_planes = 2, 2656 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2657 .hsub = 2, .vsub = 1, .is_yuv = true }, 2658 { .format = DRM_FORMAT_NV12, .num_planes = 4, 2659 .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 }, 2660 .hsub = 2, .vsub = 2, .is_yuv = true }, 2661 { .format = DRM_FORMAT_P010, .num_planes = 4, 2662 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 2663 .hsub = 2, .vsub = 2, .is_yuv = true }, 2664 { .format = DRM_FORMAT_P012, .num_planes = 4, 2665 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 2666 .hsub = 2, .vsub = 2, .is_yuv = true }, 2667 { .format = DRM_FORMAT_P016, .num_planes = 4, 2668 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 2669 .hsub = 2, .vsub = 2, .is_yuv = true }, 2670 }; 2671 2672 static const struct drm_format_info * 2673 lookup_format_info(const struct drm_format_info formats[], 2674 int num_formats, u32 format) 2675 { 2676 int i; 2677 2678 for (i = 0; i < num_formats; i++) { 2679 if (formats[i].format == format) 2680 return &formats[i]; 2681 } 2682 2683 return NULL; 2684 } 2685 2686 static const struct drm_format_info * 2687 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 2688 { 2689 switch (cmd->modifier[0]) { 2690 case I915_FORMAT_MOD_Y_TILED_CCS: 2691 case I915_FORMAT_MOD_Yf_TILED_CCS: 2692 return lookup_format_info(skl_ccs_formats, 2693 ARRAY_SIZE(skl_ccs_formats), 2694 cmd->pixel_format); 2695 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 2696 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 2697 return lookup_format_info(gen12_ccs_formats, 2698 ARRAY_SIZE(gen12_ccs_formats), 2699 cmd->pixel_format); 2700 default: 2701 return NULL; 2702 } 2703 } 2704 2705 bool is_ccs_modifier(u64 modifier) 2706 { 2707 return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || 2708 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || 2709 modifier == I915_FORMAT_MOD_Y_TILED_CCS || 2710 modifier == I915_FORMAT_MOD_Yf_TILED_CCS; 2711 } 2712 2713 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane) 2714 { 2715 return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)], 2716 512) * 64; 2717 } 2718 2719 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 2720 u32 pixel_format, u64 modifier) 2721 { 2722 struct intel_crtc *crtc; 2723 struct intel_plane *plane; 2724 2725 /* 2726 * We assume the primary plane for pipe A has 2727 * the highest stride limits of them all, 2728 * if in case pipe A is disabled, use the first pipe from pipe_mask. 2729 */ 2730 crtc = intel_get_first_crtc(dev_priv); 2731 if (!crtc) 2732 return 0; 2733 2734 plane = to_intel_plane(crtc->base.primary); 2735 2736 return plane->max_stride(plane, pixel_format, modifier, 2737 DRM_MODE_ROTATE_0); 2738 } 2739 2740 static 2741 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv, 2742 u32 pixel_format, u64 modifier) 2743 { 2744 /* 2745 * Arbitrary limit for gen4+ chosen to match the 2746 * render engine max stride. 2747 * 2748 * The new CCS hash mode makes remapping impossible 2749 */ 2750 if (!is_ccs_modifier(modifier)) { 2751 if (INTEL_GEN(dev_priv) >= 7) 2752 return 256*1024; 2753 else if (INTEL_GEN(dev_priv) >= 4) 2754 return 128*1024; 2755 } 2756 2757 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier); 2758 } 2759 2760 static u32 2761 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) 2762 { 2763 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2764 u32 tile_width; 2765 2766 if (is_surface_linear(fb, color_plane)) { 2767 u32 max_stride = intel_plane_fb_max_stride(dev_priv, 2768 fb->format->format, 2769 fb->modifier); 2770 2771 /* 2772 * To make remapping with linear generally feasible 2773 * we need the stride to be page aligned. 2774 */ 2775 if (fb->pitches[color_plane] > max_stride && 2776 !is_ccs_modifier(fb->modifier)) 2777 return intel_tile_size(dev_priv); 2778 else 2779 return 64; 2780 } 2781 2782 tile_width = intel_tile_width_bytes(fb, color_plane); 2783 if (is_ccs_modifier(fb->modifier)) { 2784 /* 2785 * Display WA #0531: skl,bxt,kbl,glk 2786 * 2787 * Render decompression and plane width > 3840 2788 * combined with horizontal panning requires the 2789 * plane stride to be a multiple of 4. We'll just 2790 * require the entire fb to accommodate that to avoid 2791 * potential runtime errors at plane configuration time. 2792 */ 2793 if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840) 2794 tile_width *= 4; 2795 /* 2796 * The main surface pitch must be padded to a multiple of four 2797 * tile widths. 2798 */ 2799 else if (INTEL_GEN(dev_priv) >= 12) 2800 tile_width *= 4; 2801 } 2802 return tile_width; 2803 } 2804 2805 bool intel_plane_can_remap(const struct intel_plane_state *plane_state) 2806 { 2807 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 2808 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 2809 const struct drm_framebuffer *fb = plane_state->hw.fb; 2810 int i; 2811 2812 /* We don't want to deal with remapping with cursors */ 2813 if (plane->id == PLANE_CURSOR) 2814 return false; 2815 2816 /* 2817 * The display engine limits already match/exceed the 2818 * render engine limits, so not much point in remapping. 2819 * Would also need to deal with the fence POT alignment 2820 * and gen2 2KiB GTT tile size. 2821 */ 2822 if (INTEL_GEN(dev_priv) < 4) 2823 return false; 2824 2825 /* 2826 * The new CCS hash mode isn't compatible with remapping as 2827 * the virtual address of the pages affects the compressed data. 2828 */ 2829 if (is_ccs_modifier(fb->modifier)) 2830 return false; 2831 2832 /* Linear needs a page aligned stride for remapping */ 2833 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) { 2834 unsigned int alignment = intel_tile_size(dev_priv) - 1; 2835 2836 for (i = 0; i < fb->format->num_planes; i++) { 2837 if (fb->pitches[i] & alignment) 2838 return false; 2839 } 2840 } 2841 2842 return true; 2843 } 2844 2845 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) 2846 { 2847 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 2848 const struct drm_framebuffer *fb = plane_state->hw.fb; 2849 unsigned int rotation = plane_state->hw.rotation; 2850 u32 stride, max_stride; 2851 2852 /* 2853 * No remapping for invisible planes since we don't have 2854 * an actual source viewport to remap. 2855 */ 2856 if (!plane_state->uapi.visible) 2857 return false; 2858 2859 if (!intel_plane_can_remap(plane_state)) 2860 return false; 2861 2862 /* 2863 * FIXME: aux plane limits on gen9+ are 2864 * unclear in Bspec, for now no checking. 2865 */ 2866 stride = intel_fb_pitch(fb, 0, rotation); 2867 max_stride = plane->max_stride(plane, fb->format->format, 2868 fb->modifier, rotation); 2869 2870 return stride > max_stride; 2871 } 2872 2873 static void 2874 intel_fb_plane_get_subsampling(int *hsub, int *vsub, 2875 const struct drm_framebuffer *fb, 2876 int color_plane) 2877 { 2878 int main_plane; 2879 2880 if (color_plane == 0) { 2881 *hsub = 1; 2882 *vsub = 1; 2883 2884 return; 2885 } 2886 2887 /* 2888 * TODO: Deduct the subsampling from the char block for all CCS 2889 * formats and planes. 2890 */ 2891 if (!is_gen12_ccs_plane(fb, color_plane)) { 2892 *hsub = fb->format->hsub; 2893 *vsub = fb->format->vsub; 2894 2895 return; 2896 } 2897 2898 main_plane = ccs_to_main_plane(fb, color_plane); 2899 *hsub = drm_format_info_block_width(fb->format, color_plane) / 2900 drm_format_info_block_width(fb->format, main_plane); 2901 2902 /* 2903 * The min stride check in the core framebuffer_check() function 2904 * assumes that format->hsub applies to every plane except for the 2905 * first plane. That's incorrect for the CCS AUX plane of the first 2906 * plane, but for the above check to pass we must define the block 2907 * width with that subsampling applied to it. Adjust the width here 2908 * accordingly, so we can calculate the actual subsampling factor. 2909 */ 2910 if (main_plane == 0) 2911 *hsub *= fb->format->hsub; 2912 2913 *vsub = 32; 2914 } 2915 static int 2916 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y) 2917 { 2918 struct drm_i915_private *i915 = to_i915(fb->dev); 2919 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2920 int main_plane; 2921 int hsub, vsub; 2922 int tile_width, tile_height; 2923 int ccs_x, ccs_y; 2924 int main_x, main_y; 2925 2926 if (!is_ccs_plane(fb, ccs_plane)) 2927 return 0; 2928 2929 intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height); 2930 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); 2931 2932 tile_width *= hsub; 2933 tile_height *= vsub; 2934 2935 ccs_x = (x * hsub) % tile_width; 2936 ccs_y = (y * vsub) % tile_height; 2937 2938 main_plane = ccs_to_main_plane(fb, ccs_plane); 2939 main_x = intel_fb->normal[main_plane].x % tile_width; 2940 main_y = intel_fb->normal[main_plane].y % tile_height; 2941 2942 /* 2943 * CCS doesn't have its own x/y offset register, so the intra CCS tile 2944 * x/y offsets must match between CCS and the main surface. 2945 */ 2946 if (main_x != ccs_x || main_y != ccs_y) { 2947 drm_dbg_kms(&i915->drm, 2948 "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n", 2949 main_x, main_y, 2950 ccs_x, ccs_y, 2951 intel_fb->normal[main_plane].x, 2952 intel_fb->normal[main_plane].y, 2953 x, y); 2954 return -EINVAL; 2955 } 2956 2957 return 0; 2958 } 2959 2960 static void 2961 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane) 2962 { 2963 int main_plane = is_ccs_plane(fb, color_plane) ? 2964 ccs_to_main_plane(fb, color_plane) : 0; 2965 int main_hsub, main_vsub; 2966 int hsub, vsub; 2967 2968 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane); 2969 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane); 2970 *w = fb->width / main_hsub / hsub; 2971 *h = fb->height / main_vsub / vsub; 2972 } 2973 2974 /* 2975 * Setup the rotated view for an FB plane and return the size the GTT mapping 2976 * requires for this view. 2977 */ 2978 static u32 2979 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info, 2980 u32 gtt_offset_rotated, int x, int y, 2981 unsigned int width, unsigned int height, 2982 unsigned int tile_size, 2983 unsigned int tile_width, unsigned int tile_height, 2984 struct drm_framebuffer *fb) 2985 { 2986 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2987 struct intel_rotation_info *rot_info = &intel_fb->rot_info; 2988 unsigned int pitch_tiles; 2989 struct drm_rect r; 2990 2991 /* Y or Yf modifiers required for 90/270 rotation */ 2992 if (fb->modifier != I915_FORMAT_MOD_Y_TILED && 2993 fb->modifier != I915_FORMAT_MOD_Yf_TILED) 2994 return 0; 2995 2996 if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane))) 2997 return 0; 2998 2999 rot_info->plane[plane] = *plane_info; 3000 3001 intel_fb->rotated[plane].pitch = plane_info->height * tile_height; 3002 3003 /* rotate the x/y offsets to match the GTT view */ 3004 drm_rect_init(&r, x, y, width, height); 3005 drm_rect_rotate(&r, 3006 plane_info->width * tile_width, 3007 plane_info->height * tile_height, 3008 DRM_MODE_ROTATE_270); 3009 x = r.x1; 3010 y = r.y1; 3011 3012 /* rotate the tile dimensions to match the GTT view */ 3013 pitch_tiles = intel_fb->rotated[plane].pitch / tile_height; 3014 swap(tile_width, tile_height); 3015 3016 /* 3017 * We only keep the x/y offsets, so push all of the 3018 * gtt offset into the x/y offsets. 3019 */ 3020 intel_adjust_tile_offset(&x, &y, 3021 tile_width, tile_height, 3022 tile_size, pitch_tiles, 3023 gtt_offset_rotated * tile_size, 0); 3024 3025 /* 3026 * First pixel of the framebuffer from 3027 * the start of the rotated gtt mapping. 3028 */ 3029 intel_fb->rotated[plane].x = x; 3030 intel_fb->rotated[plane].y = y; 3031 3032 return plane_info->width * plane_info->height; 3033 } 3034 3035 static int 3036 intel_fill_fb_info(struct drm_i915_private *dev_priv, 3037 struct drm_framebuffer *fb) 3038 { 3039 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 3040 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 3041 u32 gtt_offset_rotated = 0; 3042 unsigned int max_size = 0; 3043 int i, num_planes = fb->format->num_planes; 3044 unsigned int tile_size = intel_tile_size(dev_priv); 3045 3046 for (i = 0; i < num_planes; i++) { 3047 unsigned int width, height; 3048 unsigned int cpp, size; 3049 u32 offset; 3050 int x, y; 3051 int ret; 3052 3053 cpp = fb->format->cpp[i]; 3054 intel_fb_plane_dims(&width, &height, fb, i); 3055 3056 ret = intel_fb_offset_to_xy(&x, &y, fb, i); 3057 if (ret) { 3058 drm_dbg_kms(&dev_priv->drm, 3059 "bad fb plane %d offset: 0x%x\n", 3060 i, fb->offsets[i]); 3061 return ret; 3062 } 3063 3064 ret = intel_fb_check_ccs_xy(fb, i, x, y); 3065 if (ret) 3066 return ret; 3067 3068 /* 3069 * The fence (if used) is aligned to the start of the object 3070 * so having the framebuffer wrap around across the edge of the 3071 * fenced region doesn't really work. We have no API to configure 3072 * the fence start offset within the object (nor could we probably 3073 * on gen2/3). So it's just easier if we just require that the 3074 * fb layout agrees with the fence layout. We already check that the 3075 * fb stride matches the fence stride elsewhere. 3076 */ 3077 if (i == 0 && i915_gem_object_is_tiled(obj) && 3078 (x + width) * cpp > fb->pitches[i]) { 3079 drm_dbg_kms(&dev_priv->drm, 3080 "bad fb plane %d offset: 0x%x\n", 3081 i, fb->offsets[i]); 3082 return -EINVAL; 3083 } 3084 3085 /* 3086 * First pixel of the framebuffer from 3087 * the start of the normal gtt mapping. 3088 */ 3089 intel_fb->normal[i].x = x; 3090 intel_fb->normal[i].y = y; 3091 3092 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i, 3093 fb->pitches[i], 3094 DRM_MODE_ROTATE_0, 3095 tile_size); 3096 offset /= tile_size; 3097 3098 if (!is_surface_linear(fb, i)) { 3099 struct intel_remapped_plane_info plane_info; 3100 unsigned int tile_width, tile_height; 3101 3102 intel_tile_dims(fb, i, &tile_width, &tile_height); 3103 3104 plane_info.offset = offset; 3105 plane_info.stride = DIV_ROUND_UP(fb->pitches[i], 3106 tile_width * cpp); 3107 plane_info.width = DIV_ROUND_UP(x + width, tile_width); 3108 plane_info.height = DIV_ROUND_UP(y + height, 3109 tile_height); 3110 3111 /* how many tiles does this plane need */ 3112 size = plane_info.stride * plane_info.height; 3113 /* 3114 * If the plane isn't horizontally tile aligned, 3115 * we need one more tile. 3116 */ 3117 if (x != 0) 3118 size++; 3119 3120 gtt_offset_rotated += 3121 setup_fb_rotation(i, &plane_info, 3122 gtt_offset_rotated, 3123 x, y, width, height, 3124 tile_size, 3125 tile_width, tile_height, 3126 fb); 3127 } else { 3128 size = DIV_ROUND_UP((y + height) * fb->pitches[i] + 3129 x * cpp, tile_size); 3130 } 3131 3132 /* how many tiles in total needed in the bo */ 3133 max_size = max(max_size, offset + size); 3134 } 3135 3136 if (mul_u32_u32(max_size, tile_size) > obj->base.size) { 3137 drm_dbg_kms(&dev_priv->drm, 3138 "fb too big for bo (need %llu bytes, have %zu bytes)\n", 3139 mul_u32_u32(max_size, tile_size), obj->base.size); 3140 return -EINVAL; 3141 } 3142 3143 return 0; 3144 } 3145 3146 static void 3147 intel_plane_remap_gtt(struct intel_plane_state *plane_state) 3148 { 3149 struct drm_i915_private *dev_priv = 3150 to_i915(plane_state->uapi.plane->dev); 3151 struct drm_framebuffer *fb = plane_state->hw.fb; 3152 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 3153 struct intel_rotation_info *info = &plane_state->view.rotated; 3154 unsigned int rotation = plane_state->hw.rotation; 3155 int i, num_planes = fb->format->num_planes; 3156 unsigned int tile_size = intel_tile_size(dev_priv); 3157 unsigned int src_x, src_y; 3158 unsigned int src_w, src_h; 3159 u32 gtt_offset = 0; 3160 3161 memset(&plane_state->view, 0, sizeof(plane_state->view)); 3162 plane_state->view.type = drm_rotation_90_or_270(rotation) ? 3163 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED; 3164 3165 src_x = plane_state->uapi.src.x1 >> 16; 3166 src_y = plane_state->uapi.src.y1 >> 16; 3167 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 3168 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 3169 3170 drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier)); 3171 3172 /* Make src coordinates relative to the viewport */ 3173 drm_rect_translate(&plane_state->uapi.src, 3174 -(src_x << 16), -(src_y << 16)); 3175 3176 /* Rotate src coordinates to match rotated GTT view */ 3177 if (drm_rotation_90_or_270(rotation)) 3178 drm_rect_rotate(&plane_state->uapi.src, 3179 src_w << 16, src_h << 16, 3180 DRM_MODE_ROTATE_270); 3181 3182 for (i = 0; i < num_planes; i++) { 3183 unsigned int hsub = i ? fb->format->hsub : 1; 3184 unsigned int vsub = i ? fb->format->vsub : 1; 3185 unsigned int cpp = fb->format->cpp[i]; 3186 unsigned int tile_width, tile_height; 3187 unsigned int width, height; 3188 unsigned int pitch_tiles; 3189 unsigned int x, y; 3190 u32 offset; 3191 3192 intel_tile_dims(fb, i, &tile_width, &tile_height); 3193 3194 x = src_x / hsub; 3195 y = src_y / vsub; 3196 width = src_w / hsub; 3197 height = src_h / vsub; 3198 3199 /* 3200 * First pixel of the src viewport from the 3201 * start of the normal gtt mapping. 3202 */ 3203 x += intel_fb->normal[i].x; 3204 y += intel_fb->normal[i].y; 3205 3206 offset = intel_compute_aligned_offset(dev_priv, &x, &y, 3207 fb, i, fb->pitches[i], 3208 DRM_MODE_ROTATE_0, tile_size); 3209 offset /= tile_size; 3210 3211 drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane)); 3212 info->plane[i].offset = offset; 3213 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], 3214 tile_width * cpp); 3215 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); 3216 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); 3217 3218 if (drm_rotation_90_or_270(rotation)) { 3219 struct drm_rect r; 3220 3221 /* rotate the x/y offsets to match the GTT view */ 3222 drm_rect_init(&r, x, y, width, height); 3223 drm_rect_rotate(&r, 3224 info->plane[i].width * tile_width, 3225 info->plane[i].height * tile_height, 3226 DRM_MODE_ROTATE_270); 3227 x = r.x1; 3228 y = r.y1; 3229 3230 pitch_tiles = info->plane[i].height; 3231 plane_state->color_plane[i].stride = pitch_tiles * tile_height; 3232 3233 /* rotate the tile dimensions to match the GTT view */ 3234 swap(tile_width, tile_height); 3235 } else { 3236 pitch_tiles = info->plane[i].width; 3237 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp; 3238 } 3239 3240 /* 3241 * We only keep the x/y offsets, so push all of the 3242 * gtt offset into the x/y offsets. 3243 */ 3244 intel_adjust_tile_offset(&x, &y, 3245 tile_width, tile_height, 3246 tile_size, pitch_tiles, 3247 gtt_offset * tile_size, 0); 3248 3249 gtt_offset += info->plane[i].width * info->plane[i].height; 3250 3251 plane_state->color_plane[i].offset = 0; 3252 plane_state->color_plane[i].x = x; 3253 plane_state->color_plane[i].y = y; 3254 } 3255 } 3256 3257 static int 3258 intel_plane_compute_gtt(struct intel_plane_state *plane_state) 3259 { 3260 const struct intel_framebuffer *fb = 3261 to_intel_framebuffer(plane_state->hw.fb); 3262 unsigned int rotation = plane_state->hw.rotation; 3263 int i, num_planes; 3264 3265 if (!fb) 3266 return 0; 3267 3268 num_planes = fb->base.format->num_planes; 3269 3270 if (intel_plane_needs_remap(plane_state)) { 3271 intel_plane_remap_gtt(plane_state); 3272 3273 /* 3274 * Sometimes even remapping can't overcome 3275 * the stride limitations :( Can happen with 3276 * big plane sizes and suitably misaligned 3277 * offsets. 3278 */ 3279 return intel_plane_check_stride(plane_state); 3280 } 3281 3282 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation); 3283 3284 for (i = 0; i < num_planes; i++) { 3285 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation); 3286 plane_state->color_plane[i].offset = 0; 3287 3288 if (drm_rotation_90_or_270(rotation)) { 3289 plane_state->color_plane[i].x = fb->rotated[i].x; 3290 plane_state->color_plane[i].y = fb->rotated[i].y; 3291 } else { 3292 plane_state->color_plane[i].x = fb->normal[i].x; 3293 plane_state->color_plane[i].y = fb->normal[i].y; 3294 } 3295 } 3296 3297 /* Rotate src coordinates to match rotated GTT view */ 3298 if (drm_rotation_90_or_270(rotation)) 3299 drm_rect_rotate(&plane_state->uapi.src, 3300 fb->base.width << 16, fb->base.height << 16, 3301 DRM_MODE_ROTATE_270); 3302 3303 return intel_plane_check_stride(plane_state); 3304 } 3305 3306 static int i9xx_format_to_fourcc(int format) 3307 { 3308 switch (format) { 3309 case DISPPLANE_8BPP: 3310 return DRM_FORMAT_C8; 3311 case DISPPLANE_BGRA555: 3312 return DRM_FORMAT_ARGB1555; 3313 case DISPPLANE_BGRX555: 3314 return DRM_FORMAT_XRGB1555; 3315 case DISPPLANE_BGRX565: 3316 return DRM_FORMAT_RGB565; 3317 default: 3318 case DISPPLANE_BGRX888: 3319 return DRM_FORMAT_XRGB8888; 3320 case DISPPLANE_RGBX888: 3321 return DRM_FORMAT_XBGR8888; 3322 case DISPPLANE_BGRA888: 3323 return DRM_FORMAT_ARGB8888; 3324 case DISPPLANE_RGBA888: 3325 return DRM_FORMAT_ABGR8888; 3326 case DISPPLANE_BGRX101010: 3327 return DRM_FORMAT_XRGB2101010; 3328 case DISPPLANE_RGBX101010: 3329 return DRM_FORMAT_XBGR2101010; 3330 case DISPPLANE_BGRA101010: 3331 return DRM_FORMAT_ARGB2101010; 3332 case DISPPLANE_RGBA101010: 3333 return DRM_FORMAT_ABGR2101010; 3334 case DISPPLANE_RGBX161616: 3335 return DRM_FORMAT_XBGR16161616F; 3336 } 3337 } 3338 3339 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 3340 { 3341 switch (format) { 3342 case PLANE_CTL_FORMAT_RGB_565: 3343 return DRM_FORMAT_RGB565; 3344 case PLANE_CTL_FORMAT_NV12: 3345 return DRM_FORMAT_NV12; 3346 case PLANE_CTL_FORMAT_XYUV: 3347 return DRM_FORMAT_XYUV8888; 3348 case PLANE_CTL_FORMAT_P010: 3349 return DRM_FORMAT_P010; 3350 case PLANE_CTL_FORMAT_P012: 3351 return DRM_FORMAT_P012; 3352 case PLANE_CTL_FORMAT_P016: 3353 return DRM_FORMAT_P016; 3354 case PLANE_CTL_FORMAT_Y210: 3355 return DRM_FORMAT_Y210; 3356 case PLANE_CTL_FORMAT_Y212: 3357 return DRM_FORMAT_Y212; 3358 case PLANE_CTL_FORMAT_Y216: 3359 return DRM_FORMAT_Y216; 3360 case PLANE_CTL_FORMAT_Y410: 3361 return DRM_FORMAT_XVYU2101010; 3362 case PLANE_CTL_FORMAT_Y412: 3363 return DRM_FORMAT_XVYU12_16161616; 3364 case PLANE_CTL_FORMAT_Y416: 3365 return DRM_FORMAT_XVYU16161616; 3366 default: 3367 case PLANE_CTL_FORMAT_XRGB_8888: 3368 if (rgb_order) { 3369 if (alpha) 3370 return DRM_FORMAT_ABGR8888; 3371 else 3372 return DRM_FORMAT_XBGR8888; 3373 } else { 3374 if (alpha) 3375 return DRM_FORMAT_ARGB8888; 3376 else 3377 return DRM_FORMAT_XRGB8888; 3378 } 3379 case PLANE_CTL_FORMAT_XRGB_2101010: 3380 if (rgb_order) { 3381 if (alpha) 3382 return DRM_FORMAT_ABGR2101010; 3383 else 3384 return DRM_FORMAT_XBGR2101010; 3385 } else { 3386 if (alpha) 3387 return DRM_FORMAT_ARGB2101010; 3388 else 3389 return DRM_FORMAT_XRGB2101010; 3390 } 3391 case PLANE_CTL_FORMAT_XRGB_16161616F: 3392 if (rgb_order) { 3393 if (alpha) 3394 return DRM_FORMAT_ABGR16161616F; 3395 else 3396 return DRM_FORMAT_XBGR16161616F; 3397 } else { 3398 if (alpha) 3399 return DRM_FORMAT_ARGB16161616F; 3400 else 3401 return DRM_FORMAT_XRGB16161616F; 3402 } 3403 } 3404 } 3405 3406 static struct i915_vma * 3407 initial_plane_vma(struct drm_i915_private *i915, 3408 struct intel_initial_plane_config *plane_config) 3409 { 3410 struct drm_i915_gem_object *obj; 3411 struct i915_vma *vma; 3412 u32 base, size; 3413 3414 if (plane_config->size == 0) 3415 return NULL; 3416 3417 base = round_down(plane_config->base, 3418 I915_GTT_MIN_ALIGNMENT); 3419 size = round_up(plane_config->base + plane_config->size, 3420 I915_GTT_MIN_ALIGNMENT); 3421 size -= base; 3422 3423 /* 3424 * If the FB is too big, just don't use it since fbdev is not very 3425 * important and we should probably use that space with FBC or other 3426 * features. 3427 */ 3428 if (size * 2 > i915->stolen_usable_size) 3429 return NULL; 3430 3431 obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size); 3432 if (IS_ERR(obj)) 3433 return NULL; 3434 3435 switch (plane_config->tiling) { 3436 case I915_TILING_NONE: 3437 break; 3438 case I915_TILING_X: 3439 case I915_TILING_Y: 3440 obj->tiling_and_stride = 3441 plane_config->fb->base.pitches[0] | 3442 plane_config->tiling; 3443 break; 3444 default: 3445 MISSING_CASE(plane_config->tiling); 3446 goto err_obj; 3447 } 3448 3449 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); 3450 if (IS_ERR(vma)) 3451 goto err_obj; 3452 3453 if (i915_ggtt_pin(vma, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base)) 3454 goto err_obj; 3455 3456 if (i915_gem_object_is_tiled(obj) && 3457 !i915_vma_is_map_and_fenceable(vma)) 3458 goto err_obj; 3459 3460 return vma; 3461 3462 err_obj: 3463 i915_gem_object_put(obj); 3464 return NULL; 3465 } 3466 3467 static bool 3468 intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 3469 struct intel_initial_plane_config *plane_config) 3470 { 3471 struct drm_device *dev = crtc->base.dev; 3472 struct drm_i915_private *dev_priv = to_i915(dev); 3473 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 3474 struct drm_framebuffer *fb = &plane_config->fb->base; 3475 struct i915_vma *vma; 3476 3477 switch (fb->modifier) { 3478 case DRM_FORMAT_MOD_LINEAR: 3479 case I915_FORMAT_MOD_X_TILED: 3480 case I915_FORMAT_MOD_Y_TILED: 3481 break; 3482 default: 3483 drm_dbg(&dev_priv->drm, 3484 "Unsupported modifier for initial FB: 0x%llx\n", 3485 fb->modifier); 3486 return false; 3487 } 3488 3489 vma = initial_plane_vma(dev_priv, plane_config); 3490 if (!vma) 3491 return false; 3492 3493 mode_cmd.pixel_format = fb->format->format; 3494 mode_cmd.width = fb->width; 3495 mode_cmd.height = fb->height; 3496 mode_cmd.pitches[0] = fb->pitches[0]; 3497 mode_cmd.modifier[0] = fb->modifier; 3498 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 3499 3500 if (intel_framebuffer_init(to_intel_framebuffer(fb), 3501 vma->obj, &mode_cmd)) { 3502 drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n"); 3503 goto err_vma; 3504 } 3505 3506 plane_config->vma = vma; 3507 return true; 3508 3509 err_vma: 3510 i915_vma_put(vma); 3511 return false; 3512 } 3513 3514 static void 3515 intel_set_plane_visible(struct intel_crtc_state *crtc_state, 3516 struct intel_plane_state *plane_state, 3517 bool visible) 3518 { 3519 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 3520 3521 plane_state->uapi.visible = visible; 3522 3523 if (visible) 3524 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base); 3525 else 3526 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); 3527 } 3528 3529 static void fixup_active_planes(struct intel_crtc_state *crtc_state) 3530 { 3531 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 3532 struct drm_plane *plane; 3533 3534 /* 3535 * Active_planes aliases if multiple "primary" or cursor planes 3536 * have been used on the same (or wrong) pipe. plane_mask uses 3537 * unique ids, hence we can use that to reconstruct active_planes. 3538 */ 3539 crtc_state->active_planes = 0; 3540 3541 drm_for_each_plane_mask(plane, &dev_priv->drm, 3542 crtc_state->uapi.plane_mask) 3543 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 3544 } 3545 3546 static void intel_plane_disable_noatomic(struct intel_crtc *crtc, 3547 struct intel_plane *plane) 3548 { 3549 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3550 struct intel_crtc_state *crtc_state = 3551 to_intel_crtc_state(crtc->base.state); 3552 struct intel_plane_state *plane_state = 3553 to_intel_plane_state(plane->base.state); 3554 3555 drm_dbg_kms(&dev_priv->drm, 3556 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 3557 plane->base.base.id, plane->base.name, 3558 crtc->base.base.id, crtc->base.name); 3559 3560 intel_set_plane_visible(crtc_state, plane_state, false); 3561 fixup_active_planes(crtc_state); 3562 crtc_state->data_rate[plane->id] = 0; 3563 crtc_state->min_cdclk[plane->id] = 0; 3564 3565 if (plane->id == PLANE_PRIMARY) 3566 hsw_disable_ips(crtc_state); 3567 3568 /* 3569 * Vblank time updates from the shadow to live plane control register 3570 * are blocked if the memory self-refresh mode is active at that 3571 * moment. So to make sure the plane gets truly disabled, disable 3572 * first the self-refresh mode. The self-refresh enable bit in turn 3573 * will be checked/applied by the HW only at the next frame start 3574 * event which is after the vblank start event, so we need to have a 3575 * wait-for-vblank between disabling the plane and the pipe. 3576 */ 3577 if (HAS_GMCH(dev_priv) && 3578 intel_set_memory_cxsr(dev_priv, false)) 3579 intel_wait_for_vblank(dev_priv, crtc->pipe); 3580 3581 /* 3582 * Gen2 reports pipe underruns whenever all planes are disabled. 3583 * So disable underrun reporting before all the planes get disabled. 3584 */ 3585 if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes) 3586 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 3587 3588 intel_disable_plane(plane, crtc_state); 3589 } 3590 3591 static struct intel_frontbuffer * 3592 to_intel_frontbuffer(struct drm_framebuffer *fb) 3593 { 3594 return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL; 3595 } 3596 3597 static void 3598 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 3599 struct intel_initial_plane_config *plane_config) 3600 { 3601 struct drm_device *dev = intel_crtc->base.dev; 3602 struct drm_i915_private *dev_priv = to_i915(dev); 3603 struct drm_crtc *c; 3604 struct drm_plane *primary = intel_crtc->base.primary; 3605 struct drm_plane_state *plane_state = primary->state; 3606 struct intel_plane *intel_plane = to_intel_plane(primary); 3607 struct intel_plane_state *intel_state = 3608 to_intel_plane_state(plane_state); 3609 struct drm_framebuffer *fb; 3610 struct i915_vma *vma; 3611 3612 if (!plane_config->fb) 3613 return; 3614 3615 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 3616 fb = &plane_config->fb->base; 3617 vma = plane_config->vma; 3618 goto valid_fb; 3619 } 3620 3621 /* 3622 * Failed to alloc the obj, check to see if we should share 3623 * an fb with another CRTC instead 3624 */ 3625 for_each_crtc(dev, c) { 3626 struct intel_plane_state *state; 3627 3628 if (c == &intel_crtc->base) 3629 continue; 3630 3631 if (!to_intel_crtc(c)->active) 3632 continue; 3633 3634 state = to_intel_plane_state(c->primary->state); 3635 if (!state->vma) 3636 continue; 3637 3638 if (intel_plane_ggtt_offset(state) == plane_config->base) { 3639 fb = state->hw.fb; 3640 vma = state->vma; 3641 goto valid_fb; 3642 } 3643 } 3644 3645 /* 3646 * We've failed to reconstruct the BIOS FB. Current display state 3647 * indicates that the primary plane is visible, but has a NULL FB, 3648 * which will lead to problems later if we don't fix it up. The 3649 * simplest solution is to just disable the primary plane now and 3650 * pretend the BIOS never had it enabled. 3651 */ 3652 intel_plane_disable_noatomic(intel_crtc, intel_plane); 3653 3654 return; 3655 3656 valid_fb: 3657 intel_state->hw.rotation = plane_config->rotation; 3658 intel_fill_fb_ggtt_view(&intel_state->view, fb, 3659 intel_state->hw.rotation); 3660 intel_state->color_plane[0].stride = 3661 intel_fb_pitch(fb, 0, intel_state->hw.rotation); 3662 3663 __i915_vma_pin(vma); 3664 intel_state->vma = i915_vma_get(vma); 3665 if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0) 3666 if (vma->fence) 3667 intel_state->flags |= PLANE_HAS_FENCE; 3668 3669 plane_state->src_x = 0; 3670 plane_state->src_y = 0; 3671 plane_state->src_w = fb->width << 16; 3672 plane_state->src_h = fb->height << 16; 3673 3674 plane_state->crtc_x = 0; 3675 plane_state->crtc_y = 0; 3676 plane_state->crtc_w = fb->width; 3677 plane_state->crtc_h = fb->height; 3678 3679 intel_state->uapi.src = drm_plane_state_src(plane_state); 3680 intel_state->uapi.dst = drm_plane_state_dest(plane_state); 3681 3682 if (plane_config->tiling) 3683 dev_priv->preserve_bios_swizzle = true; 3684 3685 plane_state->fb = fb; 3686 drm_framebuffer_get(fb); 3687 3688 plane_state->crtc = &intel_crtc->base; 3689 intel_plane_copy_uapi_to_hw_state(intel_state, intel_state); 3690 3691 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); 3692 3693 atomic_or(to_intel_plane(primary)->frontbuffer_bit, 3694 &to_intel_frontbuffer(fb)->bits); 3695 } 3696 3697 static int skl_max_plane_width(const struct drm_framebuffer *fb, 3698 int color_plane, 3699 unsigned int rotation) 3700 { 3701 int cpp = fb->format->cpp[color_plane]; 3702 3703 switch (fb->modifier) { 3704 case DRM_FORMAT_MOD_LINEAR: 3705 case I915_FORMAT_MOD_X_TILED: 3706 /* 3707 * Validated limit is 4k, but has 5k should 3708 * work apart from the following features: 3709 * - Ytile (already limited to 4k) 3710 * - FP16 (already limited to 4k) 3711 * - render compression (already limited to 4k) 3712 * - KVMR sprite and cursor (don't care) 3713 * - horizontal panning (TODO verify this) 3714 * - pipe and plane scaling (TODO verify this) 3715 */ 3716 if (cpp == 8) 3717 return 4096; 3718 else 3719 return 5120; 3720 case I915_FORMAT_MOD_Y_TILED_CCS: 3721 case I915_FORMAT_MOD_Yf_TILED_CCS: 3722 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 3723 /* FIXME AUX plane? */ 3724 case I915_FORMAT_MOD_Y_TILED: 3725 case I915_FORMAT_MOD_Yf_TILED: 3726 if (cpp == 8) 3727 return 2048; 3728 else 3729 return 4096; 3730 default: 3731 MISSING_CASE(fb->modifier); 3732 return 2048; 3733 } 3734 } 3735 3736 static int glk_max_plane_width(const struct drm_framebuffer *fb, 3737 int color_plane, 3738 unsigned int rotation) 3739 { 3740 int cpp = fb->format->cpp[color_plane]; 3741 3742 switch (fb->modifier) { 3743 case DRM_FORMAT_MOD_LINEAR: 3744 case I915_FORMAT_MOD_X_TILED: 3745 if (cpp == 8) 3746 return 4096; 3747 else 3748 return 5120; 3749 case I915_FORMAT_MOD_Y_TILED_CCS: 3750 case I915_FORMAT_MOD_Yf_TILED_CCS: 3751 /* FIXME AUX plane? */ 3752 case I915_FORMAT_MOD_Y_TILED: 3753 case I915_FORMAT_MOD_Yf_TILED: 3754 if (cpp == 8) 3755 return 2048; 3756 else 3757 return 5120; 3758 default: 3759 MISSING_CASE(fb->modifier); 3760 return 2048; 3761 } 3762 } 3763 3764 static int icl_max_plane_width(const struct drm_framebuffer *fb, 3765 int color_plane, 3766 unsigned int rotation) 3767 { 3768 return 5120; 3769 } 3770 3771 static int skl_max_plane_height(void) 3772 { 3773 return 4096; 3774 } 3775 3776 static int icl_max_plane_height(void) 3777 { 3778 return 4320; 3779 } 3780 3781 static bool 3782 skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, 3783 int main_x, int main_y, u32 main_offset, 3784 int ccs_plane) 3785 { 3786 const struct drm_framebuffer *fb = plane_state->hw.fb; 3787 int aux_x = plane_state->color_plane[ccs_plane].x; 3788 int aux_y = plane_state->color_plane[ccs_plane].y; 3789 u32 aux_offset = plane_state->color_plane[ccs_plane].offset; 3790 u32 alignment = intel_surf_alignment(fb, ccs_plane); 3791 int hsub; 3792 int vsub; 3793 3794 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); 3795 while (aux_offset >= main_offset && aux_y <= main_y) { 3796 int x, y; 3797 3798 if (aux_x == main_x && aux_y == main_y) 3799 break; 3800 3801 if (aux_offset == 0) 3802 break; 3803 3804 x = aux_x / hsub; 3805 y = aux_y / vsub; 3806 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, 3807 plane_state, 3808 ccs_plane, 3809 aux_offset, 3810 aux_offset - 3811 alignment); 3812 aux_x = x * hsub + aux_x % hsub; 3813 aux_y = y * vsub + aux_y % vsub; 3814 } 3815 3816 if (aux_x != main_x || aux_y != main_y) 3817 return false; 3818 3819 plane_state->color_plane[ccs_plane].offset = aux_offset; 3820 plane_state->color_plane[ccs_plane].x = aux_x; 3821 plane_state->color_plane[ccs_plane].y = aux_y; 3822 3823 return true; 3824 } 3825 3826 unsigned int 3827 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) 3828 { 3829 int x = 0, y = 0; 3830 3831 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3832 plane_state->color_plane[0].offset, 0); 3833 3834 return y; 3835 } 3836 3837 static int skl_check_main_surface(struct intel_plane_state *plane_state) 3838 { 3839 struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); 3840 const struct drm_framebuffer *fb = plane_state->hw.fb; 3841 unsigned int rotation = plane_state->hw.rotation; 3842 int x = plane_state->uapi.src.x1 >> 16; 3843 int y = plane_state->uapi.src.y1 >> 16; 3844 int w = drm_rect_width(&plane_state->uapi.src) >> 16; 3845 int h = drm_rect_height(&plane_state->uapi.src) >> 16; 3846 int max_width; 3847 int max_height; 3848 u32 alignment; 3849 u32 offset; 3850 int aux_plane = intel_main_to_aux_plane(fb, 0); 3851 u32 aux_offset = plane_state->color_plane[aux_plane].offset; 3852 3853 if (INTEL_GEN(dev_priv) >= 11) 3854 max_width = icl_max_plane_width(fb, 0, rotation); 3855 else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 3856 max_width = glk_max_plane_width(fb, 0, rotation); 3857 else 3858 max_width = skl_max_plane_width(fb, 0, rotation); 3859 3860 if (INTEL_GEN(dev_priv) >= 11) 3861 max_height = icl_max_plane_height(); 3862 else 3863 max_height = skl_max_plane_height(); 3864 3865 if (w > max_width || h > max_height) { 3866 drm_dbg_kms(&dev_priv->drm, 3867 "requested Y/RGB source size %dx%d too big (limit %dx%d)\n", 3868 w, h, max_width, max_height); 3869 return -EINVAL; 3870 } 3871 3872 intel_add_fb_offsets(&x, &y, plane_state, 0); 3873 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0); 3874 alignment = intel_surf_alignment(fb, 0); 3875 if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment))) 3876 return -EINVAL; 3877 3878 /* 3879 * AUX surface offset is specified as the distance from the 3880 * main surface offset, and it must be non-negative. Make 3881 * sure that is what we will get. 3882 */ 3883 if (offset > aux_offset) 3884 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3885 offset, aux_offset & ~(alignment - 1)); 3886 3887 /* 3888 * When using an X-tiled surface, the plane blows up 3889 * if the x offset + width exceed the stride. 3890 * 3891 * TODO: linear and Y-tiled seem fine, Yf untested, 3892 */ 3893 if (fb->modifier == I915_FORMAT_MOD_X_TILED) { 3894 int cpp = fb->format->cpp[0]; 3895 3896 while ((x + w) * cpp > plane_state->color_plane[0].stride) { 3897 if (offset == 0) { 3898 drm_dbg_kms(&dev_priv->drm, 3899 "Unable to find suitable display surface offset due to X-tiling\n"); 3900 return -EINVAL; 3901 } 3902 3903 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3904 offset, offset - alignment); 3905 } 3906 } 3907 3908 /* 3909 * CCS AUX surface doesn't have its own x/y offsets, we must make sure 3910 * they match with the main surface x/y offsets. 3911 */ 3912 if (is_ccs_modifier(fb->modifier)) { 3913 while (!skl_check_main_ccs_coordinates(plane_state, x, y, 3914 offset, aux_plane)) { 3915 if (offset == 0) 3916 break; 3917 3918 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3919 offset, offset - alignment); 3920 } 3921 3922 if (x != plane_state->color_plane[aux_plane].x || 3923 y != plane_state->color_plane[aux_plane].y) { 3924 drm_dbg_kms(&dev_priv->drm, 3925 "Unable to find suitable display surface offset due to CCS\n"); 3926 return -EINVAL; 3927 } 3928 } 3929 3930 plane_state->color_plane[0].offset = offset; 3931 plane_state->color_plane[0].x = x; 3932 plane_state->color_plane[0].y = y; 3933 3934 /* 3935 * Put the final coordinates back so that the src 3936 * coordinate checks will see the right values. 3937 */ 3938 drm_rect_translate_to(&plane_state->uapi.src, 3939 x << 16, y << 16); 3940 3941 return 0; 3942 } 3943 3944 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) 3945 { 3946 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 3947 const struct drm_framebuffer *fb = plane_state->hw.fb; 3948 unsigned int rotation = plane_state->hw.rotation; 3949 int uv_plane = 1; 3950 int max_width = skl_max_plane_width(fb, uv_plane, rotation); 3951 int max_height = 4096; 3952 int x = plane_state->uapi.src.x1 >> 17; 3953 int y = plane_state->uapi.src.y1 >> 17; 3954 int w = drm_rect_width(&plane_state->uapi.src) >> 17; 3955 int h = drm_rect_height(&plane_state->uapi.src) >> 17; 3956 u32 offset; 3957 3958 intel_add_fb_offsets(&x, &y, plane_state, uv_plane); 3959 offset = intel_plane_compute_aligned_offset(&x, &y, 3960 plane_state, uv_plane); 3961 3962 /* FIXME not quite sure how/if these apply to the chroma plane */ 3963 if (w > max_width || h > max_height) { 3964 drm_dbg_kms(&i915->drm, 3965 "CbCr source size %dx%d too big (limit %dx%d)\n", 3966 w, h, max_width, max_height); 3967 return -EINVAL; 3968 } 3969 3970 if (is_ccs_modifier(fb->modifier)) { 3971 int ccs_plane = main_to_ccs_plane(fb, uv_plane); 3972 int aux_offset = plane_state->color_plane[ccs_plane].offset; 3973 int alignment = intel_surf_alignment(fb, uv_plane); 3974 3975 if (offset > aux_offset) 3976 offset = intel_plane_adjust_aligned_offset(&x, &y, 3977 plane_state, 3978 uv_plane, 3979 offset, 3980 aux_offset & ~(alignment - 1)); 3981 3982 while (!skl_check_main_ccs_coordinates(plane_state, x, y, 3983 offset, ccs_plane)) { 3984 if (offset == 0) 3985 break; 3986 3987 offset = intel_plane_adjust_aligned_offset(&x, &y, 3988 plane_state, 3989 uv_plane, 3990 offset, offset - alignment); 3991 } 3992 3993 if (x != plane_state->color_plane[ccs_plane].x || 3994 y != plane_state->color_plane[ccs_plane].y) { 3995 drm_dbg_kms(&i915->drm, 3996 "Unable to find suitable display surface offset due to CCS\n"); 3997 return -EINVAL; 3998 } 3999 } 4000 4001 plane_state->color_plane[uv_plane].offset = offset; 4002 plane_state->color_plane[uv_plane].x = x; 4003 plane_state->color_plane[uv_plane].y = y; 4004 4005 return 0; 4006 } 4007 4008 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) 4009 { 4010 const struct drm_framebuffer *fb = plane_state->hw.fb; 4011 int src_x = plane_state->uapi.src.x1 >> 16; 4012 int src_y = plane_state->uapi.src.y1 >> 16; 4013 u32 offset; 4014 int ccs_plane; 4015 4016 for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) { 4017 int main_hsub, main_vsub; 4018 int hsub, vsub; 4019 int x, y; 4020 4021 if (!is_ccs_plane(fb, ccs_plane)) 4022 continue; 4023 4024 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, 4025 ccs_to_main_plane(fb, ccs_plane)); 4026 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); 4027 4028 hsub *= main_hsub; 4029 vsub *= main_vsub; 4030 x = src_x / hsub; 4031 y = src_y / vsub; 4032 4033 intel_add_fb_offsets(&x, &y, plane_state, ccs_plane); 4034 4035 offset = intel_plane_compute_aligned_offset(&x, &y, 4036 plane_state, 4037 ccs_plane); 4038 4039 plane_state->color_plane[ccs_plane].offset = offset; 4040 plane_state->color_plane[ccs_plane].x = (x * hsub + 4041 src_x % hsub) / 4042 main_hsub; 4043 plane_state->color_plane[ccs_plane].y = (y * vsub + 4044 src_y % vsub) / 4045 main_vsub; 4046 } 4047 4048 return 0; 4049 } 4050 4051 int skl_check_plane_surface(struct intel_plane_state *plane_state) 4052 { 4053 const struct drm_framebuffer *fb = plane_state->hw.fb; 4054 int ret; 4055 bool needs_aux = false; 4056 4057 ret = intel_plane_compute_gtt(plane_state); 4058 if (ret) 4059 return ret; 4060 4061 if (!plane_state->uapi.visible) 4062 return 0; 4063 4064 /* 4065 * Handle the AUX surface first since the main surface setup depends on 4066 * it. 4067 */ 4068 if (is_ccs_modifier(fb->modifier)) { 4069 needs_aux = true; 4070 ret = skl_check_ccs_aux_surface(plane_state); 4071 if (ret) 4072 return ret; 4073 } 4074 4075 if (intel_format_info_is_yuv_semiplanar(fb->format, 4076 fb->modifier)) { 4077 needs_aux = true; 4078 ret = skl_check_nv12_aux_surface(plane_state); 4079 if (ret) 4080 return ret; 4081 } 4082 4083 if (!needs_aux) { 4084 int i; 4085 4086 for (i = 1; i < fb->format->num_planes; i++) { 4087 plane_state->color_plane[i].offset = ~0xfff; 4088 plane_state->color_plane[i].x = 0; 4089 plane_state->color_plane[i].y = 0; 4090 } 4091 } 4092 4093 ret = skl_check_main_surface(plane_state); 4094 if (ret) 4095 return ret; 4096 4097 return 0; 4098 } 4099 4100 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state, 4101 const struct intel_plane_state *plane_state, 4102 unsigned int *num, unsigned int *den) 4103 { 4104 const struct drm_framebuffer *fb = plane_state->hw.fb; 4105 unsigned int cpp = fb->format->cpp[0]; 4106 4107 /* 4108 * g4x bspec says 64bpp pixel rate can't exceed 80% 4109 * of cdclk when the sprite plane is enabled on the 4110 * same pipe. ilk/snb bspec says 64bpp pixel rate is 4111 * never allowed to exceed 80% of cdclk. Let's just go 4112 * with the ilk/snb limit always. 4113 */ 4114 if (cpp == 8) { 4115 *num = 10; 4116 *den = 8; 4117 } else { 4118 *num = 1; 4119 *den = 1; 4120 } 4121 } 4122 4123 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state, 4124 const struct intel_plane_state *plane_state) 4125 { 4126 unsigned int pixel_rate; 4127 unsigned int num, den; 4128 4129 /* 4130 * Note that crtc_state->pixel_rate accounts for both 4131 * horizontal and vertical panel fitter downscaling factors. 4132 * Pre-HSW bspec tells us to only consider the horizontal 4133 * downscaling factor here. We ignore that and just consider 4134 * both for simplicity. 4135 */ 4136 pixel_rate = crtc_state->pixel_rate; 4137 4138 i9xx_plane_ratio(crtc_state, plane_state, &num, &den); 4139 4140 /* two pixels per clock with double wide pipe */ 4141 if (crtc_state->double_wide) 4142 den *= 2; 4143 4144 return DIV_ROUND_UP(pixel_rate * num, den); 4145 } 4146 4147 unsigned int 4148 i9xx_plane_max_stride(struct intel_plane *plane, 4149 u32 pixel_format, u64 modifier, 4150 unsigned int rotation) 4151 { 4152 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4153 4154 if (!HAS_GMCH(dev_priv)) { 4155 return 32*1024; 4156 } else if (INTEL_GEN(dev_priv) >= 4) { 4157 if (modifier == I915_FORMAT_MOD_X_TILED) 4158 return 16*1024; 4159 else 4160 return 32*1024; 4161 } else if (INTEL_GEN(dev_priv) >= 3) { 4162 if (modifier == I915_FORMAT_MOD_X_TILED) 4163 return 8*1024; 4164 else 4165 return 16*1024; 4166 } else { 4167 if (plane->i9xx_plane == PLANE_C) 4168 return 4*1024; 4169 else 4170 return 8*1024; 4171 } 4172 } 4173 4174 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 4175 { 4176 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4177 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4178 u32 dspcntr = 0; 4179 4180 if (crtc_state->gamma_enable) 4181 dspcntr |= DISPPLANE_GAMMA_ENABLE; 4182 4183 if (crtc_state->csc_enable) 4184 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 4185 4186 if (INTEL_GEN(dev_priv) < 5) 4187 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); 4188 4189 return dspcntr; 4190 } 4191 4192 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, 4193 const struct intel_plane_state *plane_state) 4194 { 4195 struct drm_i915_private *dev_priv = 4196 to_i915(plane_state->uapi.plane->dev); 4197 const struct drm_framebuffer *fb = plane_state->hw.fb; 4198 unsigned int rotation = plane_state->hw.rotation; 4199 u32 dspcntr; 4200 4201 dspcntr = DISPLAY_PLANE_ENABLE; 4202 4203 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) || 4204 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 4205 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 4206 4207 switch (fb->format->format) { 4208 case DRM_FORMAT_C8: 4209 dspcntr |= DISPPLANE_8BPP; 4210 break; 4211 case DRM_FORMAT_XRGB1555: 4212 dspcntr |= DISPPLANE_BGRX555; 4213 break; 4214 case DRM_FORMAT_ARGB1555: 4215 dspcntr |= DISPPLANE_BGRA555; 4216 break; 4217 case DRM_FORMAT_RGB565: 4218 dspcntr |= DISPPLANE_BGRX565; 4219 break; 4220 case DRM_FORMAT_XRGB8888: 4221 dspcntr |= DISPPLANE_BGRX888; 4222 break; 4223 case DRM_FORMAT_XBGR8888: 4224 dspcntr |= DISPPLANE_RGBX888; 4225 break; 4226 case DRM_FORMAT_ARGB8888: 4227 dspcntr |= DISPPLANE_BGRA888; 4228 break; 4229 case DRM_FORMAT_ABGR8888: 4230 dspcntr |= DISPPLANE_RGBA888; 4231 break; 4232 case DRM_FORMAT_XRGB2101010: 4233 dspcntr |= DISPPLANE_BGRX101010; 4234 break; 4235 case DRM_FORMAT_XBGR2101010: 4236 dspcntr |= DISPPLANE_RGBX101010; 4237 break; 4238 case DRM_FORMAT_ARGB2101010: 4239 dspcntr |= DISPPLANE_BGRA101010; 4240 break; 4241 case DRM_FORMAT_ABGR2101010: 4242 dspcntr |= DISPPLANE_RGBA101010; 4243 break; 4244 case DRM_FORMAT_XBGR16161616F: 4245 dspcntr |= DISPPLANE_RGBX161616; 4246 break; 4247 default: 4248 MISSING_CASE(fb->format->format); 4249 return 0; 4250 } 4251 4252 if (INTEL_GEN(dev_priv) >= 4 && 4253 fb->modifier == I915_FORMAT_MOD_X_TILED) 4254 dspcntr |= DISPPLANE_TILED; 4255 4256 if (rotation & DRM_MODE_ROTATE_180) 4257 dspcntr |= DISPPLANE_ROTATE_180; 4258 4259 if (rotation & DRM_MODE_REFLECT_X) 4260 dspcntr |= DISPPLANE_MIRROR; 4261 4262 return dspcntr; 4263 } 4264 4265 int i9xx_check_plane_surface(struct intel_plane_state *plane_state) 4266 { 4267 struct drm_i915_private *dev_priv = 4268 to_i915(plane_state->uapi.plane->dev); 4269 const struct drm_framebuffer *fb = plane_state->hw.fb; 4270 int src_x, src_y, src_w; 4271 u32 offset; 4272 int ret; 4273 4274 ret = intel_plane_compute_gtt(plane_state); 4275 if (ret) 4276 return ret; 4277 4278 if (!plane_state->uapi.visible) 4279 return 0; 4280 4281 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 4282 src_x = plane_state->uapi.src.x1 >> 16; 4283 src_y = plane_state->uapi.src.y1 >> 16; 4284 4285 /* Undocumented hardware limit on i965/g4x/vlv/chv */ 4286 if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048) 4287 return -EINVAL; 4288 4289 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 4290 4291 if (INTEL_GEN(dev_priv) >= 4) 4292 offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 4293 plane_state, 0); 4294 else 4295 offset = 0; 4296 4297 /* 4298 * Put the final coordinates back so that the src 4299 * coordinate checks will see the right values. 4300 */ 4301 drm_rect_translate_to(&plane_state->uapi.src, 4302 src_x << 16, src_y << 16); 4303 4304 /* HSW/BDW do this automagically in hardware */ 4305 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { 4306 unsigned int rotation = plane_state->hw.rotation; 4307 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 4308 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 4309 4310 if (rotation & DRM_MODE_ROTATE_180) { 4311 src_x += src_w - 1; 4312 src_y += src_h - 1; 4313 } else if (rotation & DRM_MODE_REFLECT_X) { 4314 src_x += src_w - 1; 4315 } 4316 } 4317 4318 plane_state->color_plane[0].offset = offset; 4319 plane_state->color_plane[0].x = src_x; 4320 plane_state->color_plane[0].y = src_y; 4321 4322 return 0; 4323 } 4324 4325 static bool i9xx_plane_has_windowing(struct intel_plane *plane) 4326 { 4327 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4328 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4329 4330 if (IS_CHERRYVIEW(dev_priv)) 4331 return i9xx_plane == PLANE_B; 4332 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 4333 return false; 4334 else if (IS_GEN(dev_priv, 4)) 4335 return i9xx_plane == PLANE_C; 4336 else 4337 return i9xx_plane == PLANE_B || 4338 i9xx_plane == PLANE_C; 4339 } 4340 4341 static int 4342 i9xx_plane_check(struct intel_crtc_state *crtc_state, 4343 struct intel_plane_state *plane_state) 4344 { 4345 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 4346 int ret; 4347 4348 ret = chv_plane_check_rotation(plane_state); 4349 if (ret) 4350 return ret; 4351 4352 ret = drm_atomic_helper_check_plane_state(&plane_state->uapi, 4353 &crtc_state->uapi, 4354 DRM_PLANE_HELPER_NO_SCALING, 4355 DRM_PLANE_HELPER_NO_SCALING, 4356 i9xx_plane_has_windowing(plane), 4357 true); 4358 if (ret) 4359 return ret; 4360 4361 ret = i9xx_check_plane_surface(plane_state); 4362 if (ret) 4363 return ret; 4364 4365 if (!plane_state->uapi.visible) 4366 return 0; 4367 4368 ret = intel_plane_check_src_coordinates(plane_state); 4369 if (ret) 4370 return ret; 4371 4372 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state); 4373 4374 return 0; 4375 } 4376 4377 static void i9xx_update_plane(struct intel_plane *plane, 4378 const struct intel_crtc_state *crtc_state, 4379 const struct intel_plane_state *plane_state) 4380 { 4381 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4382 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4383 u32 linear_offset; 4384 int x = plane_state->color_plane[0].x; 4385 int y = plane_state->color_plane[0].y; 4386 int crtc_x = plane_state->uapi.dst.x1; 4387 int crtc_y = plane_state->uapi.dst.y1; 4388 int crtc_w = drm_rect_width(&plane_state->uapi.dst); 4389 int crtc_h = drm_rect_height(&plane_state->uapi.dst); 4390 unsigned long irqflags; 4391 u32 dspaddr_offset; 4392 u32 dspcntr; 4393 4394 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); 4395 4396 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 4397 4398 if (INTEL_GEN(dev_priv) >= 4) 4399 dspaddr_offset = plane_state->color_plane[0].offset; 4400 else 4401 dspaddr_offset = linear_offset; 4402 4403 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 4404 4405 intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane), 4406 plane_state->color_plane[0].stride); 4407 4408 if (INTEL_GEN(dev_priv) < 4) { 4409 /* 4410 * PLANE_A doesn't actually have a full window 4411 * generator but let's assume we still need to 4412 * program whatever is there. 4413 */ 4414 intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane), 4415 (crtc_y << 16) | crtc_x); 4416 intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane), 4417 ((crtc_h - 1) << 16) | (crtc_w - 1)); 4418 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { 4419 intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane), 4420 (crtc_y << 16) | crtc_x); 4421 intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane), 4422 ((crtc_h - 1) << 16) | (crtc_w - 1)); 4423 intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0); 4424 } 4425 4426 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 4427 intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane), 4428 (y << 16) | x); 4429 } else if (INTEL_GEN(dev_priv) >= 4) { 4430 intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane), 4431 linear_offset); 4432 intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane), 4433 (y << 16) | x); 4434 } 4435 4436 /* 4437 * The control register self-arms if the plane was previously 4438 * disabled. Try to make the plane enable atomic by writing 4439 * the control register just before the surface register. 4440 */ 4441 intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); 4442 if (INTEL_GEN(dev_priv) >= 4) 4443 intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 4444 intel_plane_ggtt_offset(plane_state) + dspaddr_offset); 4445 else 4446 intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 4447 intel_plane_ggtt_offset(plane_state) + dspaddr_offset); 4448 4449 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 4450 } 4451 4452 static void i9xx_disable_plane(struct intel_plane *plane, 4453 const struct intel_crtc_state *crtc_state) 4454 { 4455 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4456 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4457 unsigned long irqflags; 4458 u32 dspcntr; 4459 4460 /* 4461 * DSPCNTR pipe gamma enable on g4x+ and pipe csc 4462 * enable on ilk+ affect the pipe bottom color as 4463 * well, so we must configure them even if the plane 4464 * is disabled. 4465 * 4466 * On pre-g4x there is no way to gamma correct the 4467 * pipe bottom color but we'll keep on doing this 4468 * anyway so that the crtc state readout works correctly. 4469 */ 4470 dspcntr = i9xx_plane_ctl_crtc(crtc_state); 4471 4472 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 4473 4474 intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); 4475 if (INTEL_GEN(dev_priv) >= 4) 4476 intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0); 4477 else 4478 intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0); 4479 4480 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 4481 } 4482 4483 static bool i9xx_plane_get_hw_state(struct intel_plane *plane, 4484 enum pipe *pipe) 4485 { 4486 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4487 enum intel_display_power_domain power_domain; 4488 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4489 intel_wakeref_t wakeref; 4490 bool ret; 4491 u32 val; 4492 4493 /* 4494 * Not 100% correct for planes that can move between pipes, 4495 * but that's only the case for gen2-4 which don't have any 4496 * display power wells. 4497 */ 4498 power_domain = POWER_DOMAIN_PIPE(plane->pipe); 4499 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 4500 if (!wakeref) 4501 return false; 4502 4503 val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); 4504 4505 ret = val & DISPLAY_PLANE_ENABLE; 4506 4507 if (INTEL_GEN(dev_priv) >= 5) 4508 *pipe = plane->pipe; 4509 else 4510 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 4511 DISPPLANE_SEL_PIPE_SHIFT; 4512 4513 intel_display_power_put(dev_priv, power_domain, wakeref); 4514 4515 return ret; 4516 } 4517 4518 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 4519 { 4520 struct drm_device *dev = intel_crtc->base.dev; 4521 struct drm_i915_private *dev_priv = to_i915(dev); 4522 unsigned long irqflags; 4523 4524 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 4525 4526 intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0); 4527 intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 4528 intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); 4529 4530 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 4531 } 4532 4533 /* 4534 * This function detaches (aka. unbinds) unused scalers in hardware 4535 */ 4536 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state) 4537 { 4538 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 4539 const struct intel_crtc_scaler_state *scaler_state = 4540 &crtc_state->scaler_state; 4541 int i; 4542 4543 /* loop through and disable scalers that aren't in use */ 4544 for (i = 0; i < intel_crtc->num_scalers; i++) { 4545 if (!scaler_state->scalers[i].in_use) 4546 skl_detach_scaler(intel_crtc, i); 4547 } 4548 } 4549 4550 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb, 4551 int color_plane, unsigned int rotation) 4552 { 4553 /* 4554 * The stride is either expressed as a multiple of 64 bytes chunks for 4555 * linear buffers or in number of tiles for tiled buffers. 4556 */ 4557 if (is_surface_linear(fb, color_plane)) 4558 return 64; 4559 else if (drm_rotation_90_or_270(rotation)) 4560 return intel_tile_height(fb, color_plane); 4561 else 4562 return intel_tile_width_bytes(fb, color_plane); 4563 } 4564 4565 u32 skl_plane_stride(const struct intel_plane_state *plane_state, 4566 int color_plane) 4567 { 4568 const struct drm_framebuffer *fb = plane_state->hw.fb; 4569 unsigned int rotation = plane_state->hw.rotation; 4570 u32 stride = plane_state->color_plane[color_plane].stride; 4571 4572 if (color_plane >= fb->format->num_planes) 4573 return 0; 4574 4575 return stride / skl_plane_stride_mult(fb, color_plane, rotation); 4576 } 4577 4578 static u32 skl_plane_ctl_format(u32 pixel_format) 4579 { 4580 switch (pixel_format) { 4581 case DRM_FORMAT_C8: 4582 return PLANE_CTL_FORMAT_INDEXED; 4583 case DRM_FORMAT_RGB565: 4584 return PLANE_CTL_FORMAT_RGB_565; 4585 case DRM_FORMAT_XBGR8888: 4586 case DRM_FORMAT_ABGR8888: 4587 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; 4588 case DRM_FORMAT_XRGB8888: 4589 case DRM_FORMAT_ARGB8888: 4590 return PLANE_CTL_FORMAT_XRGB_8888; 4591 case DRM_FORMAT_XBGR2101010: 4592 case DRM_FORMAT_ABGR2101010: 4593 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX; 4594 case DRM_FORMAT_XRGB2101010: 4595 case DRM_FORMAT_ARGB2101010: 4596 return PLANE_CTL_FORMAT_XRGB_2101010; 4597 case DRM_FORMAT_XBGR16161616F: 4598 case DRM_FORMAT_ABGR16161616F: 4599 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX; 4600 case DRM_FORMAT_XRGB16161616F: 4601 case DRM_FORMAT_ARGB16161616F: 4602 return PLANE_CTL_FORMAT_XRGB_16161616F; 4603 case DRM_FORMAT_XYUV8888: 4604 return PLANE_CTL_FORMAT_XYUV; 4605 case DRM_FORMAT_YUYV: 4606 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 4607 case DRM_FORMAT_YVYU: 4608 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; 4609 case DRM_FORMAT_UYVY: 4610 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; 4611 case DRM_FORMAT_VYUY: 4612 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 4613 case DRM_FORMAT_NV12: 4614 return PLANE_CTL_FORMAT_NV12; 4615 case DRM_FORMAT_P010: 4616 return PLANE_CTL_FORMAT_P010; 4617 case DRM_FORMAT_P012: 4618 return PLANE_CTL_FORMAT_P012; 4619 case DRM_FORMAT_P016: 4620 return PLANE_CTL_FORMAT_P016; 4621 case DRM_FORMAT_Y210: 4622 return PLANE_CTL_FORMAT_Y210; 4623 case DRM_FORMAT_Y212: 4624 return PLANE_CTL_FORMAT_Y212; 4625 case DRM_FORMAT_Y216: 4626 return PLANE_CTL_FORMAT_Y216; 4627 case DRM_FORMAT_XVYU2101010: 4628 return PLANE_CTL_FORMAT_Y410; 4629 case DRM_FORMAT_XVYU12_16161616: 4630 return PLANE_CTL_FORMAT_Y412; 4631 case DRM_FORMAT_XVYU16161616: 4632 return PLANE_CTL_FORMAT_Y416; 4633 default: 4634 MISSING_CASE(pixel_format); 4635 } 4636 4637 return 0; 4638 } 4639 4640 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state) 4641 { 4642 if (!plane_state->hw.fb->format->has_alpha) 4643 return PLANE_CTL_ALPHA_DISABLE; 4644 4645 switch (plane_state->hw.pixel_blend_mode) { 4646 case DRM_MODE_BLEND_PIXEL_NONE: 4647 return PLANE_CTL_ALPHA_DISABLE; 4648 case DRM_MODE_BLEND_PREMULTI: 4649 return PLANE_CTL_ALPHA_SW_PREMULTIPLY; 4650 case DRM_MODE_BLEND_COVERAGE: 4651 return PLANE_CTL_ALPHA_HW_PREMULTIPLY; 4652 default: 4653 MISSING_CASE(plane_state->hw.pixel_blend_mode); 4654 return PLANE_CTL_ALPHA_DISABLE; 4655 } 4656 } 4657 4658 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state) 4659 { 4660 if (!plane_state->hw.fb->format->has_alpha) 4661 return PLANE_COLOR_ALPHA_DISABLE; 4662 4663 switch (plane_state->hw.pixel_blend_mode) { 4664 case DRM_MODE_BLEND_PIXEL_NONE: 4665 return PLANE_COLOR_ALPHA_DISABLE; 4666 case DRM_MODE_BLEND_PREMULTI: 4667 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY; 4668 case DRM_MODE_BLEND_COVERAGE: 4669 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY; 4670 default: 4671 MISSING_CASE(plane_state->hw.pixel_blend_mode); 4672 return PLANE_COLOR_ALPHA_DISABLE; 4673 } 4674 } 4675 4676 static u32 skl_plane_ctl_tiling(u64 fb_modifier) 4677 { 4678 switch (fb_modifier) { 4679 case DRM_FORMAT_MOD_LINEAR: 4680 break; 4681 case I915_FORMAT_MOD_X_TILED: 4682 return PLANE_CTL_TILED_X; 4683 case I915_FORMAT_MOD_Y_TILED: 4684 return PLANE_CTL_TILED_Y; 4685 case I915_FORMAT_MOD_Y_TILED_CCS: 4686 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; 4687 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 4688 return PLANE_CTL_TILED_Y | 4689 PLANE_CTL_RENDER_DECOMPRESSION_ENABLE | 4690 PLANE_CTL_CLEAR_COLOR_DISABLE; 4691 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 4692 return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE; 4693 case I915_FORMAT_MOD_Yf_TILED: 4694 return PLANE_CTL_TILED_YF; 4695 case I915_FORMAT_MOD_Yf_TILED_CCS: 4696 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; 4697 default: 4698 MISSING_CASE(fb_modifier); 4699 } 4700 4701 return 0; 4702 } 4703 4704 static u32 skl_plane_ctl_rotate(unsigned int rotate) 4705 { 4706 switch (rotate) { 4707 case DRM_MODE_ROTATE_0: 4708 break; 4709 /* 4710 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 4711 * while i915 HW rotation is clockwise, thats why this swapping. 4712 */ 4713 case DRM_MODE_ROTATE_90: 4714 return PLANE_CTL_ROTATE_270; 4715 case DRM_MODE_ROTATE_180: 4716 return PLANE_CTL_ROTATE_180; 4717 case DRM_MODE_ROTATE_270: 4718 return PLANE_CTL_ROTATE_90; 4719 default: 4720 MISSING_CASE(rotate); 4721 } 4722 4723 return 0; 4724 } 4725 4726 static u32 cnl_plane_ctl_flip(unsigned int reflect) 4727 { 4728 switch (reflect) { 4729 case 0: 4730 break; 4731 case DRM_MODE_REFLECT_X: 4732 return PLANE_CTL_FLIP_HORIZONTAL; 4733 case DRM_MODE_REFLECT_Y: 4734 default: 4735 MISSING_CASE(reflect); 4736 } 4737 4738 return 0; 4739 } 4740 4741 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 4742 { 4743 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 4744 u32 plane_ctl = 0; 4745 4746 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 4747 return plane_ctl; 4748 4749 if (crtc_state->gamma_enable) 4750 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE; 4751 4752 if (crtc_state->csc_enable) 4753 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE; 4754 4755 return plane_ctl; 4756 } 4757 4758 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, 4759 const struct intel_plane_state *plane_state) 4760 { 4761 struct drm_i915_private *dev_priv = 4762 to_i915(plane_state->uapi.plane->dev); 4763 const struct drm_framebuffer *fb = plane_state->hw.fb; 4764 unsigned int rotation = plane_state->hw.rotation; 4765 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 4766 u32 plane_ctl; 4767 4768 plane_ctl = PLANE_CTL_ENABLE; 4769 4770 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) { 4771 plane_ctl |= skl_plane_ctl_alpha(plane_state); 4772 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 4773 4774 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) 4775 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709; 4776 4777 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 4778 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE; 4779 } 4780 4781 plane_ctl |= skl_plane_ctl_format(fb->format->format); 4782 plane_ctl |= skl_plane_ctl_tiling(fb->modifier); 4783 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK); 4784 4785 if (INTEL_GEN(dev_priv) >= 10) 4786 plane_ctl |= cnl_plane_ctl_flip(rotation & 4787 DRM_MODE_REFLECT_MASK); 4788 4789 if (key->flags & I915_SET_COLORKEY_DESTINATION) 4790 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; 4791 else if (key->flags & I915_SET_COLORKEY_SOURCE) 4792 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; 4793 4794 return plane_ctl; 4795 } 4796 4797 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state) 4798 { 4799 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 4800 u32 plane_color_ctl = 0; 4801 4802 if (INTEL_GEN(dev_priv) >= 11) 4803 return plane_color_ctl; 4804 4805 if (crtc_state->gamma_enable) 4806 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE; 4807 4808 if (crtc_state->csc_enable) 4809 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; 4810 4811 return plane_color_ctl; 4812 } 4813 4814 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, 4815 const struct intel_plane_state *plane_state) 4816 { 4817 struct drm_i915_private *dev_priv = 4818 to_i915(plane_state->uapi.plane->dev); 4819 const struct drm_framebuffer *fb = plane_state->hw.fb; 4820 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 4821 u32 plane_color_ctl = 0; 4822 4823 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; 4824 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state); 4825 4826 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) { 4827 switch (plane_state->hw.color_encoding) { 4828 case DRM_COLOR_YCBCR_BT709: 4829 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; 4830 break; 4831 case DRM_COLOR_YCBCR_BT2020: 4832 plane_color_ctl |= 4833 PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020; 4834 break; 4835 default: 4836 plane_color_ctl |= 4837 PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601; 4838 } 4839 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 4840 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; 4841 } else if (fb->format->is_yuv) { 4842 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE; 4843 } 4844 4845 return plane_color_ctl; 4846 } 4847 4848 static int 4849 __intel_display_resume(struct drm_device *dev, 4850 struct drm_atomic_state *state, 4851 struct drm_modeset_acquire_ctx *ctx) 4852 { 4853 struct drm_crtc_state *crtc_state; 4854 struct drm_crtc *crtc; 4855 int i, ret; 4856 4857 intel_modeset_setup_hw_state(dev, ctx); 4858 intel_vga_redisable(to_i915(dev)); 4859 4860 if (!state) 4861 return 0; 4862 4863 /* 4864 * We've duplicated the state, pointers to the old state are invalid. 4865 * 4866 * Don't attempt to use the old state until we commit the duplicated state. 4867 */ 4868 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 4869 /* 4870 * Force recalculation even if we restore 4871 * current state. With fast modeset this may not result 4872 * in a modeset when the state is compatible. 4873 */ 4874 crtc_state->mode_changed = true; 4875 } 4876 4877 /* ignore any reset values/BIOS leftovers in the WM registers */ 4878 if (!HAS_GMCH(to_i915(dev))) 4879 to_intel_atomic_state(state)->skip_intermediate_wm = true; 4880 4881 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 4882 4883 drm_WARN_ON(dev, ret == -EDEADLK); 4884 return ret; 4885 } 4886 4887 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) 4888 { 4889 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display && 4890 intel_has_gpu_reset(&dev_priv->gt)); 4891 } 4892 4893 void intel_prepare_reset(struct drm_i915_private *dev_priv) 4894 { 4895 struct drm_device *dev = &dev_priv->drm; 4896 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 4897 struct drm_atomic_state *state; 4898 int ret; 4899 4900 /* reset doesn't touch the display */ 4901 if (!dev_priv->params.force_reset_modeset_test && 4902 !gpu_reset_clobbers_display(dev_priv)) 4903 return; 4904 4905 /* We have a modeset vs reset deadlock, defensively unbreak it. */ 4906 set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 4907 smp_mb__after_atomic(); 4908 wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET); 4909 4910 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) { 4911 drm_dbg_kms(&dev_priv->drm, 4912 "Modeset potentially stuck, unbreaking through wedging\n"); 4913 intel_gt_set_wedged(&dev_priv->gt); 4914 } 4915 4916 /* 4917 * Need mode_config.mutex so that we don't 4918 * trample ongoing ->detect() and whatnot. 4919 */ 4920 mutex_lock(&dev->mode_config.mutex); 4921 drm_modeset_acquire_init(ctx, 0); 4922 while (1) { 4923 ret = drm_modeset_lock_all_ctx(dev, ctx); 4924 if (ret != -EDEADLK) 4925 break; 4926 4927 drm_modeset_backoff(ctx); 4928 } 4929 /* 4930 * Disabling the crtcs gracefully seems nicer. Also the 4931 * g33 docs say we should at least disable all the planes. 4932 */ 4933 state = drm_atomic_helper_duplicate_state(dev, ctx); 4934 if (IS_ERR(state)) { 4935 ret = PTR_ERR(state); 4936 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n", 4937 ret); 4938 return; 4939 } 4940 4941 ret = drm_atomic_helper_disable_all(dev, ctx); 4942 if (ret) { 4943 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n", 4944 ret); 4945 drm_atomic_state_put(state); 4946 return; 4947 } 4948 4949 dev_priv->modeset_restore_state = state; 4950 state->acquire_ctx = ctx; 4951 } 4952 4953 void intel_finish_reset(struct drm_i915_private *dev_priv) 4954 { 4955 struct drm_device *dev = &dev_priv->drm; 4956 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 4957 struct drm_atomic_state *state; 4958 int ret; 4959 4960 /* reset doesn't touch the display */ 4961 if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) 4962 return; 4963 4964 state = fetch_and_zero(&dev_priv->modeset_restore_state); 4965 if (!state) 4966 goto unlock; 4967 4968 /* reset doesn't touch the display */ 4969 if (!gpu_reset_clobbers_display(dev_priv)) { 4970 /* for testing only restore the display */ 4971 ret = __intel_display_resume(dev, state, ctx); 4972 if (ret) 4973 drm_err(&dev_priv->drm, 4974 "Restoring old state failed with %i\n", ret); 4975 } else { 4976 /* 4977 * The display has been reset as well, 4978 * so need a full re-initialization. 4979 */ 4980 intel_pps_unlock_regs_wa(dev_priv); 4981 intel_modeset_init_hw(dev_priv); 4982 intel_init_clock_gating(dev_priv); 4983 4984 spin_lock_irq(&dev_priv->irq_lock); 4985 if (dev_priv->display.hpd_irq_setup) 4986 dev_priv->display.hpd_irq_setup(dev_priv); 4987 spin_unlock_irq(&dev_priv->irq_lock); 4988 4989 ret = __intel_display_resume(dev, state, ctx); 4990 if (ret) 4991 drm_err(&dev_priv->drm, 4992 "Restoring old state failed with %i\n", ret); 4993 4994 intel_hpd_init(dev_priv); 4995 } 4996 4997 drm_atomic_state_put(state); 4998 unlock: 4999 drm_modeset_drop_locks(ctx); 5000 drm_modeset_acquire_fini(ctx); 5001 mutex_unlock(&dev->mode_config.mutex); 5002 5003 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 5004 } 5005 5006 static void icl_set_pipe_chicken(struct intel_crtc *crtc) 5007 { 5008 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5009 enum pipe pipe = crtc->pipe; 5010 u32 tmp; 5011 5012 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe)); 5013 5014 /* 5015 * Display WA #1153: icl 5016 * enable hardware to bypass the alpha math 5017 * and rounding for per-pixel values 00 and 0xff 5018 */ 5019 tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 5020 /* 5021 * Display WA # 1605353570: icl 5022 * Set the pixel rounding bit to 1 for allowing 5023 * passthrough of Frame buffer pixels unmodified 5024 * across pipe 5025 */ 5026 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 5027 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); 5028 } 5029 5030 static void intel_fdi_normal_train(struct intel_crtc *crtc) 5031 { 5032 struct drm_device *dev = crtc->base.dev; 5033 struct drm_i915_private *dev_priv = to_i915(dev); 5034 enum pipe pipe = crtc->pipe; 5035 i915_reg_t reg; 5036 u32 temp; 5037 5038 /* enable normal train */ 5039 reg = FDI_TX_CTL(pipe); 5040 temp = intel_de_read(dev_priv, reg); 5041 if (IS_IVYBRIDGE(dev_priv)) { 5042 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 5043 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 5044 } else { 5045 temp &= ~FDI_LINK_TRAIN_NONE; 5046 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 5047 } 5048 intel_de_write(dev_priv, reg, temp); 5049 5050 reg = FDI_RX_CTL(pipe); 5051 temp = intel_de_read(dev_priv, reg); 5052 if (HAS_PCH_CPT(dev_priv)) { 5053 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5054 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 5055 } else { 5056 temp &= ~FDI_LINK_TRAIN_NONE; 5057 temp |= FDI_LINK_TRAIN_NONE; 5058 } 5059 intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 5060 5061 /* wait one idle pattern time */ 5062 intel_de_posting_read(dev_priv, reg); 5063 udelay(1000); 5064 5065 /* IVB wants error correction enabled */ 5066 if (IS_IVYBRIDGE(dev_priv)) 5067 intel_de_write(dev_priv, reg, 5068 intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE); 5069 } 5070 5071 /* The FDI link training functions for ILK/Ibexpeak. */ 5072 static void ilk_fdi_link_train(struct intel_crtc *crtc, 5073 const struct intel_crtc_state *crtc_state) 5074 { 5075 struct drm_device *dev = crtc->base.dev; 5076 struct drm_i915_private *dev_priv = to_i915(dev); 5077 enum pipe pipe = crtc->pipe; 5078 i915_reg_t reg; 5079 u32 temp, tries; 5080 5081 /* FDI needs bits from pipe first */ 5082 assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder); 5083 5084 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 5085 for train result */ 5086 reg = FDI_RX_IMR(pipe); 5087 temp = intel_de_read(dev_priv, reg); 5088 temp &= ~FDI_RX_SYMBOL_LOCK; 5089 temp &= ~FDI_RX_BIT_LOCK; 5090 intel_de_write(dev_priv, reg, temp); 5091 intel_de_read(dev_priv, reg); 5092 udelay(150); 5093 5094 /* enable CPU FDI TX and PCH FDI RX */ 5095 reg = FDI_TX_CTL(pipe); 5096 temp = intel_de_read(dev_priv, reg); 5097 temp &= ~FDI_DP_PORT_WIDTH_MASK; 5098 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 5099 temp &= ~FDI_LINK_TRAIN_NONE; 5100 temp |= FDI_LINK_TRAIN_PATTERN_1; 5101 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); 5102 5103 reg = FDI_RX_CTL(pipe); 5104 temp = intel_de_read(dev_priv, reg); 5105 temp &= ~FDI_LINK_TRAIN_NONE; 5106 temp |= FDI_LINK_TRAIN_PATTERN_1; 5107 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); 5108 5109 intel_de_posting_read(dev_priv, reg); 5110 udelay(150); 5111 5112 /* Ironlake workaround, enable clock pointer after FDI enable*/ 5113 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), 5114 FDI_RX_PHASE_SYNC_POINTER_OVR); 5115 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), 5116 FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN); 5117 5118 reg = FDI_RX_IIR(pipe); 5119 for (tries = 0; tries < 5; tries++) { 5120 temp = intel_de_read(dev_priv, reg); 5121 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 5122 5123 if ((temp & FDI_RX_BIT_LOCK)) { 5124 drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n"); 5125 intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK); 5126 break; 5127 } 5128 } 5129 if (tries == 5) 5130 drm_err(&dev_priv->drm, "FDI train 1 fail!\n"); 5131 5132 /* Train 2 */ 5133 reg = FDI_TX_CTL(pipe); 5134 temp = intel_de_read(dev_priv, reg); 5135 temp &= ~FDI_LINK_TRAIN_NONE; 5136 temp |= FDI_LINK_TRAIN_PATTERN_2; 5137 intel_de_write(dev_priv, reg, temp); 5138 5139 reg = FDI_RX_CTL(pipe); 5140 temp = intel_de_read(dev_priv, reg); 5141 temp &= ~FDI_LINK_TRAIN_NONE; 5142 temp |= FDI_LINK_TRAIN_PATTERN_2; 5143 intel_de_write(dev_priv, reg, temp); 5144 5145 intel_de_posting_read(dev_priv, reg); 5146 udelay(150); 5147 5148 reg = FDI_RX_IIR(pipe); 5149 for (tries = 0; tries < 5; tries++) { 5150 temp = intel_de_read(dev_priv, reg); 5151 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 5152 5153 if (temp & FDI_RX_SYMBOL_LOCK) { 5154 intel_de_write(dev_priv, reg, 5155 temp | FDI_RX_SYMBOL_LOCK); 5156 drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n"); 5157 break; 5158 } 5159 } 5160 if (tries == 5) 5161 drm_err(&dev_priv->drm, "FDI train 2 fail!\n"); 5162 5163 drm_dbg_kms(&dev_priv->drm, "FDI train done\n"); 5164 5165 } 5166 5167 static const int snb_b_fdi_train_param[] = { 5168 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 5169 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 5170 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 5171 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 5172 }; 5173 5174 /* The FDI link training functions for SNB/Cougarpoint. */ 5175 static void gen6_fdi_link_train(struct intel_crtc *crtc, 5176 const struct intel_crtc_state *crtc_state) 5177 { 5178 struct drm_device *dev = crtc->base.dev; 5179 struct drm_i915_private *dev_priv = to_i915(dev); 5180 enum pipe pipe = crtc->pipe; 5181 i915_reg_t reg; 5182 u32 temp, i, retry; 5183 5184 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 5185 for train result */ 5186 reg = FDI_RX_IMR(pipe); 5187 temp = intel_de_read(dev_priv, reg); 5188 temp &= ~FDI_RX_SYMBOL_LOCK; 5189 temp &= ~FDI_RX_BIT_LOCK; 5190 intel_de_write(dev_priv, reg, temp); 5191 5192 intel_de_posting_read(dev_priv, reg); 5193 udelay(150); 5194 5195 /* enable CPU FDI TX and PCH FDI RX */ 5196 reg = FDI_TX_CTL(pipe); 5197 temp = intel_de_read(dev_priv, reg); 5198 temp &= ~FDI_DP_PORT_WIDTH_MASK; 5199 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 5200 temp &= ~FDI_LINK_TRAIN_NONE; 5201 temp |= FDI_LINK_TRAIN_PATTERN_1; 5202 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5203 /* SNB-B */ 5204 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 5205 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); 5206 5207 intel_de_write(dev_priv, FDI_RX_MISC(pipe), 5208 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 5209 5210 reg = FDI_RX_CTL(pipe); 5211 temp = intel_de_read(dev_priv, reg); 5212 if (HAS_PCH_CPT(dev_priv)) { 5213 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5214 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 5215 } else { 5216 temp &= ~FDI_LINK_TRAIN_NONE; 5217 temp |= FDI_LINK_TRAIN_PATTERN_1; 5218 } 5219 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); 5220 5221 intel_de_posting_read(dev_priv, reg); 5222 udelay(150); 5223 5224 for (i = 0; i < 4; i++) { 5225 reg = FDI_TX_CTL(pipe); 5226 temp = intel_de_read(dev_priv, reg); 5227 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5228 temp |= snb_b_fdi_train_param[i]; 5229 intel_de_write(dev_priv, reg, temp); 5230 5231 intel_de_posting_read(dev_priv, reg); 5232 udelay(500); 5233 5234 for (retry = 0; retry < 5; retry++) { 5235 reg = FDI_RX_IIR(pipe); 5236 temp = intel_de_read(dev_priv, reg); 5237 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 5238 if (temp & FDI_RX_BIT_LOCK) { 5239 intel_de_write(dev_priv, reg, 5240 temp | FDI_RX_BIT_LOCK); 5241 drm_dbg_kms(&dev_priv->drm, 5242 "FDI train 1 done.\n"); 5243 break; 5244 } 5245 udelay(50); 5246 } 5247 if (retry < 5) 5248 break; 5249 } 5250 if (i == 4) 5251 drm_err(&dev_priv->drm, "FDI train 1 fail!\n"); 5252 5253 /* Train 2 */ 5254 reg = FDI_TX_CTL(pipe); 5255 temp = intel_de_read(dev_priv, reg); 5256 temp &= ~FDI_LINK_TRAIN_NONE; 5257 temp |= FDI_LINK_TRAIN_PATTERN_2; 5258 if (IS_GEN(dev_priv, 6)) { 5259 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5260 /* SNB-B */ 5261 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 5262 } 5263 intel_de_write(dev_priv, reg, temp); 5264 5265 reg = FDI_RX_CTL(pipe); 5266 temp = intel_de_read(dev_priv, reg); 5267 if (HAS_PCH_CPT(dev_priv)) { 5268 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5269 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 5270 } else { 5271 temp &= ~FDI_LINK_TRAIN_NONE; 5272 temp |= FDI_LINK_TRAIN_PATTERN_2; 5273 } 5274 intel_de_write(dev_priv, reg, temp); 5275 5276 intel_de_posting_read(dev_priv, reg); 5277 udelay(150); 5278 5279 for (i = 0; i < 4; i++) { 5280 reg = FDI_TX_CTL(pipe); 5281 temp = intel_de_read(dev_priv, reg); 5282 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5283 temp |= snb_b_fdi_train_param[i]; 5284 intel_de_write(dev_priv, reg, temp); 5285 5286 intel_de_posting_read(dev_priv, reg); 5287 udelay(500); 5288 5289 for (retry = 0; retry < 5; retry++) { 5290 reg = FDI_RX_IIR(pipe); 5291 temp = intel_de_read(dev_priv, reg); 5292 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 5293 if (temp & FDI_RX_SYMBOL_LOCK) { 5294 intel_de_write(dev_priv, reg, 5295 temp | FDI_RX_SYMBOL_LOCK); 5296 drm_dbg_kms(&dev_priv->drm, 5297 "FDI train 2 done.\n"); 5298 break; 5299 } 5300 udelay(50); 5301 } 5302 if (retry < 5) 5303 break; 5304 } 5305 if (i == 4) 5306 drm_err(&dev_priv->drm, "FDI train 2 fail!\n"); 5307 5308 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n"); 5309 } 5310 5311 /* Manual link training for Ivy Bridge A0 parts */ 5312 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, 5313 const struct intel_crtc_state *crtc_state) 5314 { 5315 struct drm_device *dev = crtc->base.dev; 5316 struct drm_i915_private *dev_priv = to_i915(dev); 5317 enum pipe pipe = crtc->pipe; 5318 i915_reg_t reg; 5319 u32 temp, i, j; 5320 5321 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 5322 for train result */ 5323 reg = FDI_RX_IMR(pipe); 5324 temp = intel_de_read(dev_priv, reg); 5325 temp &= ~FDI_RX_SYMBOL_LOCK; 5326 temp &= ~FDI_RX_BIT_LOCK; 5327 intel_de_write(dev_priv, reg, temp); 5328 5329 intel_de_posting_read(dev_priv, reg); 5330 udelay(150); 5331 5332 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n", 5333 intel_de_read(dev_priv, FDI_RX_IIR(pipe))); 5334 5335 /* Try each vswing and preemphasis setting twice before moving on */ 5336 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 5337 /* disable first in case we need to retry */ 5338 reg = FDI_TX_CTL(pipe); 5339 temp = intel_de_read(dev_priv, reg); 5340 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 5341 temp &= ~FDI_TX_ENABLE; 5342 intel_de_write(dev_priv, reg, temp); 5343 5344 reg = FDI_RX_CTL(pipe); 5345 temp = intel_de_read(dev_priv, reg); 5346 temp &= ~FDI_LINK_TRAIN_AUTO; 5347 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5348 temp &= ~FDI_RX_ENABLE; 5349 intel_de_write(dev_priv, reg, temp); 5350 5351 /* enable CPU FDI TX and PCH FDI RX */ 5352 reg = FDI_TX_CTL(pipe); 5353 temp = intel_de_read(dev_priv, reg); 5354 temp &= ~FDI_DP_PORT_WIDTH_MASK; 5355 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 5356 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 5357 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5358 temp |= snb_b_fdi_train_param[j/2]; 5359 temp |= FDI_COMPOSITE_SYNC; 5360 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); 5361 5362 intel_de_write(dev_priv, FDI_RX_MISC(pipe), 5363 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 5364 5365 reg = FDI_RX_CTL(pipe); 5366 temp = intel_de_read(dev_priv, reg); 5367 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 5368 temp |= FDI_COMPOSITE_SYNC; 5369 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); 5370 5371 intel_de_posting_read(dev_priv, reg); 5372 udelay(1); /* should be 0.5us */ 5373 5374 for (i = 0; i < 4; i++) { 5375 reg = FDI_RX_IIR(pipe); 5376 temp = intel_de_read(dev_priv, reg); 5377 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 5378 5379 if (temp & FDI_RX_BIT_LOCK || 5380 (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) { 5381 intel_de_write(dev_priv, reg, 5382 temp | FDI_RX_BIT_LOCK); 5383 drm_dbg_kms(&dev_priv->drm, 5384 "FDI train 1 done, level %i.\n", 5385 i); 5386 break; 5387 } 5388 udelay(1); /* should be 0.5us */ 5389 } 5390 if (i == 4) { 5391 drm_dbg_kms(&dev_priv->drm, 5392 "FDI train 1 fail on vswing %d\n", j / 2); 5393 continue; 5394 } 5395 5396 /* Train 2 */ 5397 reg = FDI_TX_CTL(pipe); 5398 temp = intel_de_read(dev_priv, reg); 5399 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 5400 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 5401 intel_de_write(dev_priv, reg, temp); 5402 5403 reg = FDI_RX_CTL(pipe); 5404 temp = intel_de_read(dev_priv, reg); 5405 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5406 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 5407 intel_de_write(dev_priv, reg, temp); 5408 5409 intel_de_posting_read(dev_priv, reg); 5410 udelay(2); /* should be 1.5us */ 5411 5412 for (i = 0; i < 4; i++) { 5413 reg = FDI_RX_IIR(pipe); 5414 temp = intel_de_read(dev_priv, reg); 5415 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 5416 5417 if (temp & FDI_RX_SYMBOL_LOCK || 5418 (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) { 5419 intel_de_write(dev_priv, reg, 5420 temp | FDI_RX_SYMBOL_LOCK); 5421 drm_dbg_kms(&dev_priv->drm, 5422 "FDI train 2 done, level %i.\n", 5423 i); 5424 goto train_done; 5425 } 5426 udelay(2); /* should be 1.5us */ 5427 } 5428 if (i == 4) 5429 drm_dbg_kms(&dev_priv->drm, 5430 "FDI train 2 fail on vswing %d\n", j / 2); 5431 } 5432 5433 train_done: 5434 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n"); 5435 } 5436 5437 static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) 5438 { 5439 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 5440 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 5441 enum pipe pipe = intel_crtc->pipe; 5442 i915_reg_t reg; 5443 u32 temp; 5444 5445 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 5446 reg = FDI_RX_CTL(pipe); 5447 temp = intel_de_read(dev_priv, reg); 5448 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 5449 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 5450 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 5451 intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE); 5452 5453 intel_de_posting_read(dev_priv, reg); 5454 udelay(200); 5455 5456 /* Switch from Rawclk to PCDclk */ 5457 temp = intel_de_read(dev_priv, reg); 5458 intel_de_write(dev_priv, reg, temp | FDI_PCDCLK); 5459 5460 intel_de_posting_read(dev_priv, reg); 5461 udelay(200); 5462 5463 /* Enable CPU FDI TX PLL, always on for Ironlake */ 5464 reg = FDI_TX_CTL(pipe); 5465 temp = intel_de_read(dev_priv, reg); 5466 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 5467 intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE); 5468 5469 intel_de_posting_read(dev_priv, reg); 5470 udelay(100); 5471 } 5472 } 5473 5474 static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc) 5475 { 5476 struct drm_device *dev = intel_crtc->base.dev; 5477 struct drm_i915_private *dev_priv = to_i915(dev); 5478 enum pipe pipe = intel_crtc->pipe; 5479 i915_reg_t reg; 5480 u32 temp; 5481 5482 /* Switch from PCDclk to Rawclk */ 5483 reg = FDI_RX_CTL(pipe); 5484 temp = intel_de_read(dev_priv, reg); 5485 intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK); 5486 5487 /* Disable CPU FDI TX PLL */ 5488 reg = FDI_TX_CTL(pipe); 5489 temp = intel_de_read(dev_priv, reg); 5490 intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE); 5491 5492 intel_de_posting_read(dev_priv, reg); 5493 udelay(100); 5494 5495 reg = FDI_RX_CTL(pipe); 5496 temp = intel_de_read(dev_priv, reg); 5497 intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE); 5498 5499 /* Wait for the clocks to turn off. */ 5500 intel_de_posting_read(dev_priv, reg); 5501 udelay(100); 5502 } 5503 5504 static void ilk_fdi_disable(struct intel_crtc *crtc) 5505 { 5506 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5507 enum pipe pipe = crtc->pipe; 5508 i915_reg_t reg; 5509 u32 temp; 5510 5511 /* disable CPU FDI tx and PCH FDI rx */ 5512 reg = FDI_TX_CTL(pipe); 5513 temp = intel_de_read(dev_priv, reg); 5514 intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE); 5515 intel_de_posting_read(dev_priv, reg); 5516 5517 reg = FDI_RX_CTL(pipe); 5518 temp = intel_de_read(dev_priv, reg); 5519 temp &= ~(0x7 << 16); 5520 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 5521 intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE); 5522 5523 intel_de_posting_read(dev_priv, reg); 5524 udelay(100); 5525 5526 /* Ironlake workaround, disable clock pointer after downing FDI */ 5527 if (HAS_PCH_IBX(dev_priv)) 5528 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), 5529 FDI_RX_PHASE_SYNC_POINTER_OVR); 5530 5531 /* still set train pattern 1 */ 5532 reg = FDI_TX_CTL(pipe); 5533 temp = intel_de_read(dev_priv, reg); 5534 temp &= ~FDI_LINK_TRAIN_NONE; 5535 temp |= FDI_LINK_TRAIN_PATTERN_1; 5536 intel_de_write(dev_priv, reg, temp); 5537 5538 reg = FDI_RX_CTL(pipe); 5539 temp = intel_de_read(dev_priv, reg); 5540 if (HAS_PCH_CPT(dev_priv)) { 5541 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5542 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 5543 } else { 5544 temp &= ~FDI_LINK_TRAIN_NONE; 5545 temp |= FDI_LINK_TRAIN_PATTERN_1; 5546 } 5547 /* BPC in FDI rx is consistent with that in PIPECONF */ 5548 temp &= ~(0x07 << 16); 5549 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 5550 intel_de_write(dev_priv, reg, temp); 5551 5552 intel_de_posting_read(dev_priv, reg); 5553 udelay(100); 5554 } 5555 5556 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 5557 { 5558 struct drm_crtc *crtc; 5559 bool cleanup_done; 5560 5561 drm_for_each_crtc(crtc, &dev_priv->drm) { 5562 struct drm_crtc_commit *commit; 5563 spin_lock(&crtc->commit_lock); 5564 commit = list_first_entry_or_null(&crtc->commit_list, 5565 struct drm_crtc_commit, commit_entry); 5566 cleanup_done = commit ? 5567 try_wait_for_completion(&commit->cleanup_done) : true; 5568 spin_unlock(&crtc->commit_lock); 5569 5570 if (cleanup_done) 5571 continue; 5572 5573 drm_crtc_wait_one_vblank(crtc); 5574 5575 return true; 5576 } 5577 5578 return false; 5579 } 5580 5581 void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 5582 { 5583 u32 temp; 5584 5585 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE); 5586 5587 mutex_lock(&dev_priv->sb_lock); 5588 5589 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5590 temp |= SBI_SSCCTL_DISABLE; 5591 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 5592 5593 mutex_unlock(&dev_priv->sb_lock); 5594 } 5595 5596 /* Program iCLKIP clock to the desired frequency */ 5597 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) 5598 { 5599 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5600 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5601 int clock = crtc_state->hw.adjusted_mode.crtc_clock; 5602 u32 divsel, phaseinc, auxdiv, phasedir = 0; 5603 u32 temp; 5604 5605 lpt_disable_iclkip(dev_priv); 5606 5607 /* The iCLK virtual clock root frequency is in MHz, 5608 * but the adjusted_mode->crtc_clock in in KHz. To get the 5609 * divisors, it is necessary to divide one by another, so we 5610 * convert the virtual clock precision to KHz here for higher 5611 * precision. 5612 */ 5613 for (auxdiv = 0; auxdiv < 2; auxdiv++) { 5614 u32 iclk_virtual_root_freq = 172800 * 1000; 5615 u32 iclk_pi_range = 64; 5616 u32 desired_divisor; 5617 5618 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 5619 clock << auxdiv); 5620 divsel = (desired_divisor / iclk_pi_range) - 2; 5621 phaseinc = desired_divisor % iclk_pi_range; 5622 5623 /* 5624 * Near 20MHz is a corner case which is 5625 * out of range for the 7-bit divisor 5626 */ 5627 if (divsel <= 0x7f) 5628 break; 5629 } 5630 5631 /* This should not happen with any sane values */ 5632 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 5633 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 5634 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) & 5635 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 5636 5637 drm_dbg_kms(&dev_priv->drm, 5638 "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 5639 clock, auxdiv, divsel, phasedir, phaseinc); 5640 5641 mutex_lock(&dev_priv->sb_lock); 5642 5643 /* Program SSCDIVINTPHASE6 */ 5644 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 5645 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 5646 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 5647 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 5648 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 5649 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 5650 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 5651 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 5652 5653 /* Program SSCAUXDIV */ 5654 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 5655 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 5656 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 5657 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 5658 5659 /* Enable modulator and associated divider */ 5660 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5661 temp &= ~SBI_SSCCTL_DISABLE; 5662 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 5663 5664 mutex_unlock(&dev_priv->sb_lock); 5665 5666 /* Wait for initialization time */ 5667 udelay(24); 5668 5669 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE); 5670 } 5671 5672 int lpt_get_iclkip(struct drm_i915_private *dev_priv) 5673 { 5674 u32 divsel, phaseinc, auxdiv; 5675 u32 iclk_virtual_root_freq = 172800 * 1000; 5676 u32 iclk_pi_range = 64; 5677 u32 desired_divisor; 5678 u32 temp; 5679 5680 if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) 5681 return 0; 5682 5683 mutex_lock(&dev_priv->sb_lock); 5684 5685 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5686 if (temp & SBI_SSCCTL_DISABLE) { 5687 mutex_unlock(&dev_priv->sb_lock); 5688 return 0; 5689 } 5690 5691 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 5692 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> 5693 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; 5694 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> 5695 SBI_SSCDIVINTPHASE_INCVAL_SHIFT; 5696 5697 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 5698 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> 5699 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; 5700 5701 mutex_unlock(&dev_priv->sb_lock); 5702 5703 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; 5704 5705 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 5706 desired_divisor << auxdiv); 5707 } 5708 5709 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, 5710 enum pipe pch_transcoder) 5711 { 5712 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5713 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5714 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 5715 5716 intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder), 5717 intel_de_read(dev_priv, HTOTAL(cpu_transcoder))); 5718 intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder), 5719 intel_de_read(dev_priv, HBLANK(cpu_transcoder))); 5720 intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder), 5721 intel_de_read(dev_priv, HSYNC(cpu_transcoder))); 5722 5723 intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder), 5724 intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); 5725 intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder), 5726 intel_de_read(dev_priv, VBLANK(cpu_transcoder))); 5727 intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder), 5728 intel_de_read(dev_priv, VSYNC(cpu_transcoder))); 5729 intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder), 5730 intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder))); 5731 } 5732 5733 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable) 5734 { 5735 u32 temp; 5736 5737 temp = intel_de_read(dev_priv, SOUTH_CHICKEN1); 5738 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 5739 return; 5740 5741 drm_WARN_ON(&dev_priv->drm, 5742 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) & 5743 FDI_RX_ENABLE); 5744 drm_WARN_ON(&dev_priv->drm, 5745 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) & 5746 FDI_RX_ENABLE); 5747 5748 temp &= ~FDI_BC_BIFURCATION_SELECT; 5749 if (enable) 5750 temp |= FDI_BC_BIFURCATION_SELECT; 5751 5752 drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n", 5753 enable ? "en" : "dis"); 5754 intel_de_write(dev_priv, SOUTH_CHICKEN1, temp); 5755 intel_de_posting_read(dev_priv, SOUTH_CHICKEN1); 5756 } 5757 5758 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) 5759 { 5760 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5761 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5762 5763 switch (crtc->pipe) { 5764 case PIPE_A: 5765 break; 5766 case PIPE_B: 5767 if (crtc_state->fdi_lanes > 2) 5768 cpt_set_fdi_bc_bifurcation(dev_priv, false); 5769 else 5770 cpt_set_fdi_bc_bifurcation(dev_priv, true); 5771 5772 break; 5773 case PIPE_C: 5774 cpt_set_fdi_bc_bifurcation(dev_priv, true); 5775 5776 break; 5777 default: 5778 BUG(); 5779 } 5780 } 5781 5782 /* 5783 * Finds the encoder associated with the given CRTC. This can only be 5784 * used when we know that the CRTC isn't feeding multiple encoders! 5785 */ 5786 static struct intel_encoder * 5787 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 5788 const struct intel_crtc_state *crtc_state) 5789 { 5790 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5791 const struct drm_connector_state *connector_state; 5792 const struct drm_connector *connector; 5793 struct intel_encoder *encoder = NULL; 5794 int num_encoders = 0; 5795 int i; 5796 5797 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 5798 if (connector_state->crtc != &crtc->base) 5799 continue; 5800 5801 encoder = to_intel_encoder(connector_state->best_encoder); 5802 num_encoders++; 5803 } 5804 5805 drm_WARN(encoder->base.dev, num_encoders != 1, 5806 "%d encoders for pipe %c\n", 5807 num_encoders, pipe_name(crtc->pipe)); 5808 5809 return encoder; 5810 } 5811 5812 /* 5813 * Enable PCH resources required for PCH ports: 5814 * - PCH PLLs 5815 * - FDI training & RX/TX 5816 * - update transcoder timings 5817 * - DP transcoding bits 5818 * - transcoder 5819 */ 5820 static void ilk_pch_enable(const struct intel_atomic_state *state, 5821 const struct intel_crtc_state *crtc_state) 5822 { 5823 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5824 struct drm_device *dev = crtc->base.dev; 5825 struct drm_i915_private *dev_priv = to_i915(dev); 5826 enum pipe pipe = crtc->pipe; 5827 u32 temp; 5828 5829 assert_pch_transcoder_disabled(dev_priv, pipe); 5830 5831 if (IS_IVYBRIDGE(dev_priv)) 5832 ivb_update_fdi_bc_bifurcation(crtc_state); 5833 5834 /* Write the TU size bits before fdi link training, so that error 5835 * detection works. */ 5836 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe), 5837 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 5838 5839 /* For PCH output, training FDI link */ 5840 dev_priv->display.fdi_link_train(crtc, crtc_state); 5841 5842 /* We need to program the right clock selection before writing the pixel 5843 * mutliplier into the DPLL. */ 5844 if (HAS_PCH_CPT(dev_priv)) { 5845 u32 sel; 5846 5847 temp = intel_de_read(dev_priv, PCH_DPLL_SEL); 5848 temp |= TRANS_DPLL_ENABLE(pipe); 5849 sel = TRANS_DPLLB_SEL(pipe); 5850 if (crtc_state->shared_dpll == 5851 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) 5852 temp |= sel; 5853 else 5854 temp &= ~sel; 5855 intel_de_write(dev_priv, PCH_DPLL_SEL, temp); 5856 } 5857 5858 /* XXX: pch pll's can be enabled any time before we enable the PCH 5859 * transcoder, and we actually should do this to not upset any PCH 5860 * transcoder that already use the clock when we share it. 5861 * 5862 * Note that enable_shared_dpll tries to do the right thing, but 5863 * get_shared_dpll unconditionally resets the pll - we need that to have 5864 * the right LVDS enable sequence. */ 5865 intel_enable_shared_dpll(crtc_state); 5866 5867 /* set transcoder timing, panel must allow it */ 5868 assert_panel_unlocked(dev_priv, pipe); 5869 ilk_pch_transcoder_set_timings(crtc_state, pipe); 5870 5871 intel_fdi_normal_train(crtc); 5872 5873 /* For PCH DP, enable TRANS_DP_CTL */ 5874 if (HAS_PCH_CPT(dev_priv) && 5875 intel_crtc_has_dp_encoder(crtc_state)) { 5876 const struct drm_display_mode *adjusted_mode = 5877 &crtc_state->hw.adjusted_mode; 5878 u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 5879 i915_reg_t reg = TRANS_DP_CTL(pipe); 5880 enum port port; 5881 5882 temp = intel_de_read(dev_priv, reg); 5883 temp &= ~(TRANS_DP_PORT_SEL_MASK | 5884 TRANS_DP_SYNC_MASK | 5885 TRANS_DP_BPC_MASK); 5886 temp |= TRANS_DP_OUTPUT_ENABLE; 5887 temp |= bpc << 9; /* same format but at 11:9 */ 5888 5889 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 5890 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 5891 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 5892 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 5893 5894 port = intel_get_crtc_new_encoder(state, crtc_state)->port; 5895 drm_WARN_ON(dev, port < PORT_B || port > PORT_D); 5896 temp |= TRANS_DP_PORT_SEL(port); 5897 5898 intel_de_write(dev_priv, reg, temp); 5899 } 5900 5901 ilk_enable_pch_transcoder(crtc_state); 5902 } 5903 5904 void lpt_pch_enable(const struct intel_crtc_state *crtc_state) 5905 { 5906 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5907 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5908 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 5909 5910 assert_pch_transcoder_disabled(dev_priv, PIPE_A); 5911 5912 lpt_program_iclkip(crtc_state); 5913 5914 /* Set transcoder timing. */ 5915 ilk_pch_transcoder_set_timings(crtc_state, PIPE_A); 5916 5917 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 5918 } 5919 5920 static void cpt_verify_modeset(struct drm_i915_private *dev_priv, 5921 enum pipe pipe) 5922 { 5923 i915_reg_t dslreg = PIPEDSL(pipe); 5924 u32 temp; 5925 5926 temp = intel_de_read(dev_priv, dslreg); 5927 udelay(500); 5928 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) { 5929 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) 5930 drm_err(&dev_priv->drm, 5931 "mode set failed: pipe %c stuck\n", 5932 pipe_name(pipe)); 5933 } 5934 } 5935 5936 /* 5937 * The hardware phase 0.0 refers to the center of the pixel. 5938 * We want to start from the top/left edge which is phase 5939 * -0.5. That matches how the hardware calculates the scaling 5940 * factors (from top-left of the first pixel to bottom-right 5941 * of the last pixel, as opposed to the pixel centers). 5942 * 5943 * For 4:2:0 subsampled chroma planes we obviously have to 5944 * adjust that so that the chroma sample position lands in 5945 * the right spot. 5946 * 5947 * Note that for packed YCbCr 4:2:2 formats there is no way to 5948 * control chroma siting. The hardware simply replicates the 5949 * chroma samples for both of the luma samples, and thus we don't 5950 * actually get the expected MPEG2 chroma siting convention :( 5951 * The same behaviour is observed on pre-SKL platforms as well. 5952 * 5953 * Theory behind the formula (note that we ignore sub-pixel 5954 * source coordinates): 5955 * s = source sample position 5956 * d = destination sample position 5957 * 5958 * Downscaling 4:1: 5959 * -0.5 5960 * | 0.0 5961 * | | 1.5 (initial phase) 5962 * | | | 5963 * v v v 5964 * | s | s | s | s | 5965 * | d | 5966 * 5967 * Upscaling 1:4: 5968 * -0.5 5969 * | -0.375 (initial phase) 5970 * | | 0.0 5971 * | | | 5972 * v v v 5973 * | s | 5974 * | d | d | d | d | 5975 */ 5976 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited) 5977 { 5978 int phase = -0x8000; 5979 u16 trip = 0; 5980 5981 if (chroma_cosited) 5982 phase += (sub - 1) * 0x8000 / sub; 5983 5984 phase += scale / (2 * sub); 5985 5986 /* 5987 * Hardware initial phase limited to [-0.5:1.5]. 5988 * Since the max hardware scale factor is 3.0, we 5989 * should never actually excdeed 1.0 here. 5990 */ 5991 WARN_ON(phase < -0x8000 || phase > 0x18000); 5992 5993 if (phase < 0) 5994 phase = 0x10000 + phase; 5995 else 5996 trip = PS_PHASE_TRIP; 5997 5998 return ((phase >> 2) & PS_PHASE_MASK) | trip; 5999 } 6000 6001 #define SKL_MIN_SRC_W 8 6002 #define SKL_MAX_SRC_W 4096 6003 #define SKL_MIN_SRC_H 8 6004 #define SKL_MAX_SRC_H 4096 6005 #define SKL_MIN_DST_W 8 6006 #define SKL_MAX_DST_W 4096 6007 #define SKL_MIN_DST_H 8 6008 #define SKL_MAX_DST_H 4096 6009 #define ICL_MAX_SRC_W 5120 6010 #define ICL_MAX_SRC_H 4096 6011 #define ICL_MAX_DST_W 5120 6012 #define ICL_MAX_DST_H 4096 6013 #define SKL_MIN_YUV_420_SRC_W 16 6014 #define SKL_MIN_YUV_420_SRC_H 16 6015 6016 static int 6017 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 6018 unsigned int scaler_user, int *scaler_id, 6019 int src_w, int src_h, int dst_w, int dst_h, 6020 const struct drm_format_info *format, 6021 u64 modifier, bool need_scaler) 6022 { 6023 struct intel_crtc_scaler_state *scaler_state = 6024 &crtc_state->scaler_state; 6025 struct intel_crtc *intel_crtc = 6026 to_intel_crtc(crtc_state->uapi.crtc); 6027 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 6028 const struct drm_display_mode *adjusted_mode = 6029 &crtc_state->hw.adjusted_mode; 6030 6031 /* 6032 * Src coordinates are already rotated by 270 degrees for 6033 * the 90/270 degree plane rotation cases (to match the 6034 * GTT mapping), hence no need to account for rotation here. 6035 */ 6036 if (src_w != dst_w || src_h != dst_h) 6037 need_scaler = true; 6038 6039 /* 6040 * Scaling/fitting not supported in IF-ID mode in GEN9+ 6041 * TODO: Interlace fetch mode doesn't support YUV420 planar formats. 6042 * Once NV12 is enabled, handle it here while allocating scaler 6043 * for NV12. 6044 */ 6045 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable && 6046 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 6047 drm_dbg_kms(&dev_priv->drm, 6048 "Pipe/Plane scaling not supported with IF-ID mode\n"); 6049 return -EINVAL; 6050 } 6051 6052 /* 6053 * if plane is being disabled or scaler is no more required or force detach 6054 * - free scaler binded to this plane/crtc 6055 * - in order to do this, update crtc->scaler_usage 6056 * 6057 * Here scaler state in crtc_state is set free so that 6058 * scaler can be assigned to other user. Actual register 6059 * update to free the scaler is done in plane/panel-fit programming. 6060 * For this purpose crtc/plane_state->scaler_id isn't reset here. 6061 */ 6062 if (force_detach || !need_scaler) { 6063 if (*scaler_id >= 0) { 6064 scaler_state->scaler_users &= ~(1 << scaler_user); 6065 scaler_state->scalers[*scaler_id].in_use = 0; 6066 6067 drm_dbg_kms(&dev_priv->drm, 6068 "scaler_user index %u.%u: " 6069 "Staged freeing scaler id %d scaler_users = 0x%x\n", 6070 intel_crtc->pipe, scaler_user, *scaler_id, 6071 scaler_state->scaler_users); 6072 *scaler_id = -1; 6073 } 6074 return 0; 6075 } 6076 6077 if (format && intel_format_info_is_yuv_semiplanar(format, modifier) && 6078 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { 6079 drm_dbg_kms(&dev_priv->drm, 6080 "Planar YUV: src dimensions not met\n"); 6081 return -EINVAL; 6082 } 6083 6084 /* range checks */ 6085 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 6086 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 6087 (INTEL_GEN(dev_priv) >= 11 && 6088 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H || 6089 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) || 6090 (INTEL_GEN(dev_priv) < 11 && 6091 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 6092 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) { 6093 drm_dbg_kms(&dev_priv->drm, 6094 "scaler_user index %u.%u: src %ux%u dst %ux%u " 6095 "size is out of scaler range\n", 6096 intel_crtc->pipe, scaler_user, src_w, src_h, 6097 dst_w, dst_h); 6098 return -EINVAL; 6099 } 6100 6101 /* mark this plane as a scaler user in crtc_state */ 6102 scaler_state->scaler_users |= (1 << scaler_user); 6103 drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: " 6104 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 6105 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 6106 scaler_state->scaler_users); 6107 6108 return 0; 6109 } 6110 6111 static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state) 6112 { 6113 const struct drm_display_mode *adjusted_mode = 6114 &crtc_state->hw.adjusted_mode; 6115 int width, height; 6116 6117 if (crtc_state->pch_pfit.enabled) { 6118 width = drm_rect_width(&crtc_state->pch_pfit.dst); 6119 height = drm_rect_height(&crtc_state->pch_pfit.dst); 6120 } else { 6121 width = adjusted_mode->crtc_hdisplay; 6122 height = adjusted_mode->crtc_vdisplay; 6123 } 6124 6125 return skl_update_scaler(crtc_state, !crtc_state->hw.active, 6126 SKL_CRTC_INDEX, 6127 &crtc_state->scaler_state.scaler_id, 6128 crtc_state->pipe_src_w, crtc_state->pipe_src_h, 6129 width, height, NULL, 0, 6130 crtc_state->pch_pfit.enabled); 6131 } 6132 6133 /** 6134 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 6135 * @crtc_state: crtc's scaler state 6136 * @plane_state: atomic plane state to update 6137 * 6138 * Return 6139 * 0 - scaler_usage updated successfully 6140 * error - requested scaling cannot be supported or other error condition 6141 */ 6142 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 6143 struct intel_plane_state *plane_state) 6144 { 6145 struct intel_plane *intel_plane = 6146 to_intel_plane(plane_state->uapi.plane); 6147 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 6148 struct drm_framebuffer *fb = plane_state->hw.fb; 6149 int ret; 6150 bool force_detach = !fb || !plane_state->uapi.visible; 6151 bool need_scaler = false; 6152 6153 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ 6154 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) && 6155 fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) 6156 need_scaler = true; 6157 6158 ret = skl_update_scaler(crtc_state, force_detach, 6159 drm_plane_index(&intel_plane->base), 6160 &plane_state->scaler_id, 6161 drm_rect_width(&plane_state->uapi.src) >> 16, 6162 drm_rect_height(&plane_state->uapi.src) >> 16, 6163 drm_rect_width(&plane_state->uapi.dst), 6164 drm_rect_height(&plane_state->uapi.dst), 6165 fb ? fb->format : NULL, 6166 fb ? fb->modifier : 0, 6167 need_scaler); 6168 6169 if (ret || plane_state->scaler_id < 0) 6170 return ret; 6171 6172 /* check colorkey */ 6173 if (plane_state->ckey.flags) { 6174 drm_dbg_kms(&dev_priv->drm, 6175 "[PLANE:%d:%s] scaling with color key not allowed", 6176 intel_plane->base.base.id, 6177 intel_plane->base.name); 6178 return -EINVAL; 6179 } 6180 6181 /* Check src format */ 6182 switch (fb->format->format) { 6183 case DRM_FORMAT_RGB565: 6184 case DRM_FORMAT_XBGR8888: 6185 case DRM_FORMAT_XRGB8888: 6186 case DRM_FORMAT_ABGR8888: 6187 case DRM_FORMAT_ARGB8888: 6188 case DRM_FORMAT_XRGB2101010: 6189 case DRM_FORMAT_XBGR2101010: 6190 case DRM_FORMAT_ARGB2101010: 6191 case DRM_FORMAT_ABGR2101010: 6192 case DRM_FORMAT_YUYV: 6193 case DRM_FORMAT_YVYU: 6194 case DRM_FORMAT_UYVY: 6195 case DRM_FORMAT_VYUY: 6196 case DRM_FORMAT_NV12: 6197 case DRM_FORMAT_XYUV8888: 6198 case DRM_FORMAT_P010: 6199 case DRM_FORMAT_P012: 6200 case DRM_FORMAT_P016: 6201 case DRM_FORMAT_Y210: 6202 case DRM_FORMAT_Y212: 6203 case DRM_FORMAT_Y216: 6204 case DRM_FORMAT_XVYU2101010: 6205 case DRM_FORMAT_XVYU12_16161616: 6206 case DRM_FORMAT_XVYU16161616: 6207 break; 6208 case DRM_FORMAT_XBGR16161616F: 6209 case DRM_FORMAT_ABGR16161616F: 6210 case DRM_FORMAT_XRGB16161616F: 6211 case DRM_FORMAT_ARGB16161616F: 6212 if (INTEL_GEN(dev_priv) >= 11) 6213 break; 6214 fallthrough; 6215 default: 6216 drm_dbg_kms(&dev_priv->drm, 6217 "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", 6218 intel_plane->base.base.id, intel_plane->base.name, 6219 fb->base.id, fb->format->format); 6220 return -EINVAL; 6221 } 6222 6223 return 0; 6224 } 6225 6226 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state) 6227 { 6228 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 6229 int i; 6230 6231 for (i = 0; i < crtc->num_scalers; i++) 6232 skl_detach_scaler(crtc, i); 6233 } 6234 6235 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state) 6236 { 6237 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6238 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6239 const struct intel_crtc_scaler_state *scaler_state = 6240 &crtc_state->scaler_state; 6241 struct drm_rect src = { 6242 .x2 = crtc_state->pipe_src_w << 16, 6243 .y2 = crtc_state->pipe_src_h << 16, 6244 }; 6245 const struct drm_rect *dst = &crtc_state->pch_pfit.dst; 6246 u16 uv_rgb_hphase, uv_rgb_vphase; 6247 enum pipe pipe = crtc->pipe; 6248 int width = drm_rect_width(dst); 6249 int height = drm_rect_height(dst); 6250 int x = dst->x1; 6251 int y = dst->y1; 6252 int hscale, vscale; 6253 unsigned long irqflags; 6254 int id; 6255 6256 if (!crtc_state->pch_pfit.enabled) 6257 return; 6258 6259 if (drm_WARN_ON(&dev_priv->drm, 6260 crtc_state->scaler_state.scaler_id < 0)) 6261 return; 6262 6263 hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX); 6264 vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX); 6265 6266 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); 6267 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); 6268 6269 id = scaler_state->scaler_id; 6270 6271 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 6272 6273 intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 6274 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 6275 intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id), 6276 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); 6277 intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id), 6278 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); 6279 intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id), 6280 x << 16 | y); 6281 intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id), 6282 width << 16 | height); 6283 6284 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 6285 } 6286 6287 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) 6288 { 6289 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6290 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6291 const struct drm_rect *dst = &crtc_state->pch_pfit.dst; 6292 enum pipe pipe = crtc->pipe; 6293 int width = drm_rect_width(dst); 6294 int height = drm_rect_height(dst); 6295 int x = dst->x1; 6296 int y = dst->y1; 6297 6298 if (!crtc_state->pch_pfit.enabled) 6299 return; 6300 6301 /* Force use of hard-coded filter coefficients 6302 * as some pre-programmed values are broken, 6303 * e.g. x201. 6304 */ 6305 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 6306 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE | 6307 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe)); 6308 else 6309 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE | 6310 PF_FILTER_MED_3x3); 6311 intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y); 6312 intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height); 6313 } 6314 6315 void hsw_enable_ips(const struct intel_crtc_state *crtc_state) 6316 { 6317 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6318 struct drm_device *dev = crtc->base.dev; 6319 struct drm_i915_private *dev_priv = to_i915(dev); 6320 6321 if (!crtc_state->ips_enabled) 6322 return; 6323 6324 /* 6325 * We can only enable IPS after we enable a plane and wait for a vblank 6326 * This function is called from post_plane_update, which is run after 6327 * a vblank wait. 6328 */ 6329 drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); 6330 6331 if (IS_BROADWELL(dev_priv)) { 6332 drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 6333 IPS_ENABLE | IPS_PCODE_CONTROL)); 6334 /* Quoting Art Runyan: "its not safe to expect any particular 6335 * value in IPS_CTL bit 31 after enabling IPS through the 6336 * mailbox." Moreover, the mailbox may return a bogus state, 6337 * so we need to just enable it and continue on. 6338 */ 6339 } else { 6340 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE); 6341 /* The bit only becomes 1 in the next vblank, so this wait here 6342 * is essentially intel_wait_for_vblank. If we don't have this 6343 * and don't wait for vblanks until the end of crtc_enable, then 6344 * the HW state readout code will complain that the expected 6345 * IPS_CTL value is not the one we read. */ 6346 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50)) 6347 drm_err(&dev_priv->drm, 6348 "Timed out waiting for IPS enable\n"); 6349 } 6350 } 6351 6352 void hsw_disable_ips(const struct intel_crtc_state *crtc_state) 6353 { 6354 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6355 struct drm_device *dev = crtc->base.dev; 6356 struct drm_i915_private *dev_priv = to_i915(dev); 6357 6358 if (!crtc_state->ips_enabled) 6359 return; 6360 6361 if (IS_BROADWELL(dev_priv)) { 6362 drm_WARN_ON(dev, 6363 sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 6364 /* 6365 * Wait for PCODE to finish disabling IPS. The BSpec specified 6366 * 42ms timeout value leads to occasional timeouts so use 100ms 6367 * instead. 6368 */ 6369 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100)) 6370 drm_err(&dev_priv->drm, 6371 "Timed out waiting for IPS disable\n"); 6372 } else { 6373 intel_de_write(dev_priv, IPS_CTL, 0); 6374 intel_de_posting_read(dev_priv, IPS_CTL); 6375 } 6376 6377 /* We need to wait for a vblank before we can disable the plane. */ 6378 intel_wait_for_vblank(dev_priv, crtc->pipe); 6379 } 6380 6381 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) 6382 { 6383 if (intel_crtc->overlay) 6384 (void) intel_overlay_switch_off(intel_crtc->overlay); 6385 6386 /* Let userspace switch the overlay on again. In most cases userspace 6387 * has to recompute where to put it anyway. 6388 */ 6389 } 6390 6391 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state, 6392 const struct intel_crtc_state *new_crtc_state) 6393 { 6394 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 6395 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6396 6397 if (!old_crtc_state->ips_enabled) 6398 return false; 6399 6400 if (needs_modeset(new_crtc_state)) 6401 return true; 6402 6403 /* 6404 * Workaround : Do not read or write the pipe palette/gamma data while 6405 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 6406 * 6407 * Disable IPS before we program the LUT. 6408 */ 6409 if (IS_HASWELL(dev_priv) && 6410 (new_crtc_state->uapi.color_mgmt_changed || 6411 new_crtc_state->update_pipe) && 6412 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) 6413 return true; 6414 6415 return !new_crtc_state->ips_enabled; 6416 } 6417 6418 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state, 6419 const struct intel_crtc_state *new_crtc_state) 6420 { 6421 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 6422 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6423 6424 if (!new_crtc_state->ips_enabled) 6425 return false; 6426 6427 if (needs_modeset(new_crtc_state)) 6428 return true; 6429 6430 /* 6431 * Workaround : Do not read or write the pipe palette/gamma data while 6432 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 6433 * 6434 * Re-enable IPS after the LUT has been programmed. 6435 */ 6436 if (IS_HASWELL(dev_priv) && 6437 (new_crtc_state->uapi.color_mgmt_changed || 6438 new_crtc_state->update_pipe) && 6439 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) 6440 return true; 6441 6442 /* 6443 * We can't read out IPS on broadwell, assume the worst and 6444 * forcibly enable IPS on the first fastset. 6445 */ 6446 if (new_crtc_state->update_pipe && old_crtc_state->inherited) 6447 return true; 6448 6449 return !old_crtc_state->ips_enabled; 6450 } 6451 6452 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) 6453 { 6454 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 6455 6456 if (!crtc_state->nv12_planes) 6457 return false; 6458 6459 /* WA Display #0827: Gen9:all */ 6460 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) 6461 return true; 6462 6463 return false; 6464 } 6465 6466 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) 6467 { 6468 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 6469 6470 /* Wa_2006604312:icl,ehl */ 6471 if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11)) 6472 return true; 6473 6474 return false; 6475 } 6476 6477 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, 6478 const struct intel_crtc_state *new_crtc_state) 6479 { 6480 return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) && 6481 new_crtc_state->active_planes; 6482 } 6483 6484 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, 6485 const struct intel_crtc_state *new_crtc_state) 6486 { 6487 return old_crtc_state->active_planes && 6488 (!new_crtc_state->active_planes || needs_modeset(new_crtc_state)); 6489 } 6490 6491 static void intel_post_plane_update(struct intel_atomic_state *state, 6492 struct intel_crtc *crtc) 6493 { 6494 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6495 const struct intel_crtc_state *old_crtc_state = 6496 intel_atomic_get_old_crtc_state(state, crtc); 6497 const struct intel_crtc_state *new_crtc_state = 6498 intel_atomic_get_new_crtc_state(state, crtc); 6499 enum pipe pipe = crtc->pipe; 6500 6501 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); 6502 6503 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) 6504 intel_update_watermarks(crtc); 6505 6506 if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state)) 6507 hsw_enable_ips(new_crtc_state); 6508 6509 intel_fbc_post_update(state, crtc); 6510 6511 if (needs_nv12_wa(old_crtc_state) && 6512 !needs_nv12_wa(new_crtc_state)) 6513 skl_wa_827(dev_priv, pipe, false); 6514 6515 if (needs_scalerclk_wa(old_crtc_state) && 6516 !needs_scalerclk_wa(new_crtc_state)) 6517 icl_wa_scalerclkgating(dev_priv, pipe, false); 6518 } 6519 6520 static void intel_pre_plane_update(struct intel_atomic_state *state, 6521 struct intel_crtc *crtc) 6522 { 6523 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6524 const struct intel_crtc_state *old_crtc_state = 6525 intel_atomic_get_old_crtc_state(state, crtc); 6526 const struct intel_crtc_state *new_crtc_state = 6527 intel_atomic_get_new_crtc_state(state, crtc); 6528 enum pipe pipe = crtc->pipe; 6529 6530 if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state)) 6531 hsw_disable_ips(old_crtc_state); 6532 6533 if (intel_fbc_pre_update(state, crtc)) 6534 intel_wait_for_vblank(dev_priv, pipe); 6535 6536 /* Display WA 827 */ 6537 if (!needs_nv12_wa(old_crtc_state) && 6538 needs_nv12_wa(new_crtc_state)) 6539 skl_wa_827(dev_priv, pipe, true); 6540 6541 /* Wa_2006604312:icl,ehl */ 6542 if (!needs_scalerclk_wa(old_crtc_state) && 6543 needs_scalerclk_wa(new_crtc_state)) 6544 icl_wa_scalerclkgating(dev_priv, pipe, true); 6545 6546 /* 6547 * Vblank time updates from the shadow to live plane control register 6548 * are blocked if the memory self-refresh mode is active at that 6549 * moment. So to make sure the plane gets truly disabled, disable 6550 * first the self-refresh mode. The self-refresh enable bit in turn 6551 * will be checked/applied by the HW only at the next frame start 6552 * event which is after the vblank start event, so we need to have a 6553 * wait-for-vblank between disabling the plane and the pipe. 6554 */ 6555 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && 6556 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 6557 intel_wait_for_vblank(dev_priv, pipe); 6558 6559 /* 6560 * IVB workaround: must disable low power watermarks for at least 6561 * one frame before enabling scaling. LP watermarks can be re-enabled 6562 * when scaling is disabled. 6563 * 6564 * WaCxSRDisabledForSpriteScaling:ivb 6565 */ 6566 if (old_crtc_state->hw.active && 6567 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv)) 6568 intel_wait_for_vblank(dev_priv, pipe); 6569 6570 /* 6571 * If we're doing a modeset we don't need to do any 6572 * pre-vblank watermark programming here. 6573 */ 6574 if (!needs_modeset(new_crtc_state)) { 6575 /* 6576 * For platforms that support atomic watermarks, program the 6577 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 6578 * will be the intermediate values that are safe for both pre- and 6579 * post- vblank; when vblank happens, the 'active' values will be set 6580 * to the final 'target' values and we'll do this again to get the 6581 * optimal watermarks. For gen9+ platforms, the values we program here 6582 * will be the final target values which will get automatically latched 6583 * at vblank time; no further programming will be necessary. 6584 * 6585 * If a platform hasn't been transitioned to atomic watermarks yet, 6586 * we'll continue to update watermarks the old way, if flags tell 6587 * us to. 6588 */ 6589 if (dev_priv->display.initial_watermarks) 6590 dev_priv->display.initial_watermarks(state, crtc); 6591 else if (new_crtc_state->update_wm_pre) 6592 intel_update_watermarks(crtc); 6593 } 6594 6595 /* 6596 * Gen2 reports pipe underruns whenever all planes are disabled. 6597 * So disable underrun reporting before all the planes get disabled. 6598 * 6599 * We do this after .initial_watermarks() so that we have a 6600 * chance of catching underruns with the intermediate watermarks 6601 * vs. the old plane configuration. 6602 */ 6603 if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state)) 6604 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6605 } 6606 6607 static void intel_crtc_disable_planes(struct intel_atomic_state *state, 6608 struct intel_crtc *crtc) 6609 { 6610 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6611 const struct intel_crtc_state *new_crtc_state = 6612 intel_atomic_get_new_crtc_state(state, crtc); 6613 unsigned int update_mask = new_crtc_state->update_planes; 6614 const struct intel_plane_state *old_plane_state; 6615 struct intel_plane *plane; 6616 unsigned fb_bits = 0; 6617 int i; 6618 6619 intel_crtc_dpms_overlay_disable(crtc); 6620 6621 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 6622 if (crtc->pipe != plane->pipe || 6623 !(update_mask & BIT(plane->id))) 6624 continue; 6625 6626 intel_disable_plane(plane, new_crtc_state); 6627 6628 if (old_plane_state->uapi.visible) 6629 fb_bits |= plane->frontbuffer_bit; 6630 } 6631 6632 intel_frontbuffer_flip(dev_priv, fb_bits); 6633 } 6634 6635 /* 6636 * intel_connector_primary_encoder - get the primary encoder for a connector 6637 * @connector: connector for which to return the encoder 6638 * 6639 * Returns the primary encoder for a connector. There is a 1:1 mapping from 6640 * all connectors to their encoder, except for DP-MST connectors which have 6641 * both a virtual and a primary encoder. These DP-MST primary encoders can be 6642 * pointed to by as many DP-MST connectors as there are pipes. 6643 */ 6644 static struct intel_encoder * 6645 intel_connector_primary_encoder(struct intel_connector *connector) 6646 { 6647 struct intel_encoder *encoder; 6648 6649 if (connector->mst_port) 6650 return &dp_to_dig_port(connector->mst_port)->base; 6651 6652 encoder = intel_attached_encoder(connector); 6653 drm_WARN_ON(connector->base.dev, !encoder); 6654 6655 return encoder; 6656 } 6657 6658 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 6659 { 6660 struct drm_connector_state *new_conn_state; 6661 struct drm_connector *connector; 6662 int i; 6663 6664 for_each_new_connector_in_state(&state->base, connector, new_conn_state, 6665 i) { 6666 struct intel_connector *intel_connector; 6667 struct intel_encoder *encoder; 6668 struct intel_crtc *crtc; 6669 6670 if (!intel_connector_needs_modeset(state, connector)) 6671 continue; 6672 6673 intel_connector = to_intel_connector(connector); 6674 encoder = intel_connector_primary_encoder(intel_connector); 6675 if (!encoder->update_prepare) 6676 continue; 6677 6678 crtc = new_conn_state->crtc ? 6679 to_intel_crtc(new_conn_state->crtc) : NULL; 6680 encoder->update_prepare(state, encoder, crtc); 6681 } 6682 } 6683 6684 static void intel_encoders_update_complete(struct intel_atomic_state *state) 6685 { 6686 struct drm_connector_state *new_conn_state; 6687 struct drm_connector *connector; 6688 int i; 6689 6690 for_each_new_connector_in_state(&state->base, connector, new_conn_state, 6691 i) { 6692 struct intel_connector *intel_connector; 6693 struct intel_encoder *encoder; 6694 struct intel_crtc *crtc; 6695 6696 if (!intel_connector_needs_modeset(state, connector)) 6697 continue; 6698 6699 intel_connector = to_intel_connector(connector); 6700 encoder = intel_connector_primary_encoder(intel_connector); 6701 if (!encoder->update_complete) 6702 continue; 6703 6704 crtc = new_conn_state->crtc ? 6705 to_intel_crtc(new_conn_state->crtc) : NULL; 6706 encoder->update_complete(state, encoder, crtc); 6707 } 6708 } 6709 6710 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, 6711 struct intel_crtc *crtc) 6712 { 6713 const struct intel_crtc_state *crtc_state = 6714 intel_atomic_get_new_crtc_state(state, crtc); 6715 const struct drm_connector_state *conn_state; 6716 struct drm_connector *conn; 6717 int i; 6718 6719 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6720 struct intel_encoder *encoder = 6721 to_intel_encoder(conn_state->best_encoder); 6722 6723 if (conn_state->crtc != &crtc->base) 6724 continue; 6725 6726 if (encoder->pre_pll_enable) 6727 encoder->pre_pll_enable(state, encoder, 6728 crtc_state, conn_state); 6729 } 6730 } 6731 6732 static void intel_encoders_pre_enable(struct intel_atomic_state *state, 6733 struct intel_crtc *crtc) 6734 { 6735 const struct intel_crtc_state *crtc_state = 6736 intel_atomic_get_new_crtc_state(state, crtc); 6737 const struct drm_connector_state *conn_state; 6738 struct drm_connector *conn; 6739 int i; 6740 6741 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6742 struct intel_encoder *encoder = 6743 to_intel_encoder(conn_state->best_encoder); 6744 6745 if (conn_state->crtc != &crtc->base) 6746 continue; 6747 6748 if (encoder->pre_enable) 6749 encoder->pre_enable(state, encoder, 6750 crtc_state, conn_state); 6751 } 6752 } 6753 6754 static void intel_encoders_enable(struct intel_atomic_state *state, 6755 struct intel_crtc *crtc) 6756 { 6757 const struct intel_crtc_state *crtc_state = 6758 intel_atomic_get_new_crtc_state(state, crtc); 6759 const struct drm_connector_state *conn_state; 6760 struct drm_connector *conn; 6761 int i; 6762 6763 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6764 struct intel_encoder *encoder = 6765 to_intel_encoder(conn_state->best_encoder); 6766 6767 if (conn_state->crtc != &crtc->base) 6768 continue; 6769 6770 if (encoder->enable) 6771 encoder->enable(state, encoder, 6772 crtc_state, conn_state); 6773 intel_opregion_notify_encoder(encoder, true); 6774 } 6775 } 6776 6777 static void intel_encoders_disable(struct intel_atomic_state *state, 6778 struct intel_crtc *crtc) 6779 { 6780 const struct intel_crtc_state *old_crtc_state = 6781 intel_atomic_get_old_crtc_state(state, crtc); 6782 const struct drm_connector_state *old_conn_state; 6783 struct drm_connector *conn; 6784 int i; 6785 6786 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6787 struct intel_encoder *encoder = 6788 to_intel_encoder(old_conn_state->best_encoder); 6789 6790 if (old_conn_state->crtc != &crtc->base) 6791 continue; 6792 6793 intel_opregion_notify_encoder(encoder, false); 6794 if (encoder->disable) 6795 encoder->disable(state, encoder, 6796 old_crtc_state, old_conn_state); 6797 } 6798 } 6799 6800 static void intel_encoders_post_disable(struct intel_atomic_state *state, 6801 struct intel_crtc *crtc) 6802 { 6803 const struct intel_crtc_state *old_crtc_state = 6804 intel_atomic_get_old_crtc_state(state, crtc); 6805 const struct drm_connector_state *old_conn_state; 6806 struct drm_connector *conn; 6807 int i; 6808 6809 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6810 struct intel_encoder *encoder = 6811 to_intel_encoder(old_conn_state->best_encoder); 6812 6813 if (old_conn_state->crtc != &crtc->base) 6814 continue; 6815 6816 if (encoder->post_disable) 6817 encoder->post_disable(state, encoder, 6818 old_crtc_state, old_conn_state); 6819 } 6820 } 6821 6822 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, 6823 struct intel_crtc *crtc) 6824 { 6825 const struct intel_crtc_state *old_crtc_state = 6826 intel_atomic_get_old_crtc_state(state, crtc); 6827 const struct drm_connector_state *old_conn_state; 6828 struct drm_connector *conn; 6829 int i; 6830 6831 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6832 struct intel_encoder *encoder = 6833 to_intel_encoder(old_conn_state->best_encoder); 6834 6835 if (old_conn_state->crtc != &crtc->base) 6836 continue; 6837 6838 if (encoder->post_pll_disable) 6839 encoder->post_pll_disable(state, encoder, 6840 old_crtc_state, old_conn_state); 6841 } 6842 } 6843 6844 static void intel_encoders_update_pipe(struct intel_atomic_state *state, 6845 struct intel_crtc *crtc) 6846 { 6847 const struct intel_crtc_state *crtc_state = 6848 intel_atomic_get_new_crtc_state(state, crtc); 6849 const struct drm_connector_state *conn_state; 6850 struct drm_connector *conn; 6851 int i; 6852 6853 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6854 struct intel_encoder *encoder = 6855 to_intel_encoder(conn_state->best_encoder); 6856 6857 if (conn_state->crtc != &crtc->base) 6858 continue; 6859 6860 if (encoder->update_pipe) 6861 encoder->update_pipe(state, encoder, 6862 crtc_state, conn_state); 6863 } 6864 } 6865 6866 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) 6867 { 6868 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6869 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 6870 6871 plane->disable_plane(plane, crtc_state); 6872 } 6873 6874 static void ilk_crtc_enable(struct intel_atomic_state *state, 6875 struct intel_crtc *crtc) 6876 { 6877 const struct intel_crtc_state *new_crtc_state = 6878 intel_atomic_get_new_crtc_state(state, crtc); 6879 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6880 enum pipe pipe = crtc->pipe; 6881 6882 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 6883 return; 6884 6885 /* 6886 * Sometimes spurious CPU pipe underruns happen during FDI 6887 * training, at least with VGA+HDMI cloning. Suppress them. 6888 * 6889 * On ILK we get an occasional spurious CPU pipe underruns 6890 * between eDP port A enable and vdd enable. Also PCH port 6891 * enable seems to result in the occasional CPU pipe underrun. 6892 * 6893 * Spurious PCH underruns also occur during PCH enabling. 6894 */ 6895 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6896 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 6897 6898 if (new_crtc_state->has_pch_encoder) 6899 intel_prepare_shared_dpll(new_crtc_state); 6900 6901 if (intel_crtc_has_dp_encoder(new_crtc_state)) 6902 intel_dp_set_m_n(new_crtc_state, M1_N1); 6903 6904 intel_set_pipe_timings(new_crtc_state); 6905 intel_set_pipe_src_size(new_crtc_state); 6906 6907 if (new_crtc_state->has_pch_encoder) 6908 intel_cpu_transcoder_set_m_n(new_crtc_state, 6909 &new_crtc_state->fdi_m_n, NULL); 6910 6911 ilk_set_pipeconf(new_crtc_state); 6912 6913 crtc->active = true; 6914 6915 intel_encoders_pre_enable(state, crtc); 6916 6917 if (new_crtc_state->has_pch_encoder) { 6918 /* Note: FDI PLL enabling _must_ be done before we enable the 6919 * cpu pipes, hence this is separate from all the other fdi/pch 6920 * enabling. */ 6921 ilk_fdi_pll_enable(new_crtc_state); 6922 } else { 6923 assert_fdi_tx_disabled(dev_priv, pipe); 6924 assert_fdi_rx_disabled(dev_priv, pipe); 6925 } 6926 6927 ilk_pfit_enable(new_crtc_state); 6928 6929 /* 6930 * On ILK+ LUT must be loaded before the pipe is running but with 6931 * clocks enabled 6932 */ 6933 intel_color_load_luts(new_crtc_state); 6934 intel_color_commit(new_crtc_state); 6935 /* update DSPCNTR to configure gamma for pipe bottom color */ 6936 intel_disable_primary_plane(new_crtc_state); 6937 6938 if (dev_priv->display.initial_watermarks) 6939 dev_priv->display.initial_watermarks(state, crtc); 6940 intel_enable_pipe(new_crtc_state); 6941 6942 if (new_crtc_state->has_pch_encoder) 6943 ilk_pch_enable(state, new_crtc_state); 6944 6945 intel_crtc_vblank_on(new_crtc_state); 6946 6947 intel_encoders_enable(state, crtc); 6948 6949 if (HAS_PCH_CPT(dev_priv)) 6950 cpt_verify_modeset(dev_priv, pipe); 6951 6952 /* 6953 * Must wait for vblank to avoid spurious PCH FIFO underruns. 6954 * And a second vblank wait is needed at least on ILK with 6955 * some interlaced HDMI modes. Let's do the double wait always 6956 * in case there are more corner cases we don't know about. 6957 */ 6958 if (new_crtc_state->has_pch_encoder) { 6959 intel_wait_for_vblank(dev_priv, pipe); 6960 intel_wait_for_vblank(dev_priv, pipe); 6961 } 6962 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6963 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 6964 } 6965 6966 /* IPS only exists on ULT machines and is tied to pipe A. */ 6967 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 6968 { 6969 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A; 6970 } 6971 6972 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, 6973 enum pipe pipe, bool apply) 6974 { 6975 u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)); 6976 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 6977 6978 if (apply) 6979 val |= mask; 6980 else 6981 val &= ~mask; 6982 6983 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val); 6984 } 6985 6986 static void icl_pipe_mbus_enable(struct intel_crtc *crtc) 6987 { 6988 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6989 enum pipe pipe = crtc->pipe; 6990 u32 val; 6991 6992 val = MBUS_DBOX_A_CREDIT(2); 6993 6994 if (INTEL_GEN(dev_priv) >= 12) { 6995 val |= MBUS_DBOX_BW_CREDIT(2); 6996 val |= MBUS_DBOX_B_CREDIT(12); 6997 } else { 6998 val |= MBUS_DBOX_BW_CREDIT(1); 6999 val |= MBUS_DBOX_B_CREDIT(8); 7000 } 7001 7002 intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val); 7003 } 7004 7005 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state) 7006 { 7007 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7008 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7009 7010 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe), 7011 HSW_LINETIME(crtc_state->linetime) | 7012 HSW_IPS_LINETIME(crtc_state->ips_linetime)); 7013 } 7014 7015 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) 7016 { 7017 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7018 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7019 i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder); 7020 u32 val; 7021 7022 val = intel_de_read(dev_priv, reg); 7023 val &= ~HSW_FRAME_START_DELAY_MASK; 7024 val |= HSW_FRAME_START_DELAY(0); 7025 intel_de_write(dev_priv, reg, val); 7026 } 7027 7028 static void hsw_crtc_enable(struct intel_atomic_state *state, 7029 struct intel_crtc *crtc) 7030 { 7031 const struct intel_crtc_state *new_crtc_state = 7032 intel_atomic_get_new_crtc_state(state, crtc); 7033 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7034 enum pipe pipe = crtc->pipe, hsw_workaround_pipe; 7035 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 7036 bool psl_clkgate_wa; 7037 7038 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 7039 return; 7040 7041 intel_encoders_pre_pll_enable(state, crtc); 7042 7043 if (new_crtc_state->shared_dpll) 7044 intel_enable_shared_dpll(new_crtc_state); 7045 7046 intel_encoders_pre_enable(state, crtc); 7047 7048 if (!transcoder_is_dsi(cpu_transcoder)) 7049 intel_set_pipe_timings(new_crtc_state); 7050 7051 intel_set_pipe_src_size(new_crtc_state); 7052 7053 if (cpu_transcoder != TRANSCODER_EDP && 7054 !transcoder_is_dsi(cpu_transcoder)) 7055 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder), 7056 new_crtc_state->pixel_multiplier - 1); 7057 7058 if (new_crtc_state->has_pch_encoder) 7059 intel_cpu_transcoder_set_m_n(new_crtc_state, 7060 &new_crtc_state->fdi_m_n, NULL); 7061 7062 if (!transcoder_is_dsi(cpu_transcoder)) { 7063 hsw_set_frame_start_delay(new_crtc_state); 7064 hsw_set_pipeconf(new_crtc_state); 7065 } 7066 7067 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 7068 bdw_set_pipemisc(new_crtc_state); 7069 7070 crtc->active = true; 7071 7072 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */ 7073 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && 7074 new_crtc_state->pch_pfit.enabled; 7075 if (psl_clkgate_wa) 7076 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); 7077 7078 if (INTEL_GEN(dev_priv) >= 9) 7079 skl_pfit_enable(new_crtc_state); 7080 else 7081 ilk_pfit_enable(new_crtc_state); 7082 7083 /* 7084 * On ILK+ LUT must be loaded before the pipe is running but with 7085 * clocks enabled 7086 */ 7087 intel_color_load_luts(new_crtc_state); 7088 intel_color_commit(new_crtc_state); 7089 /* update DSPCNTR to configure gamma/csc for pipe bottom color */ 7090 if (INTEL_GEN(dev_priv) < 9) 7091 intel_disable_primary_plane(new_crtc_state); 7092 7093 hsw_set_linetime_wm(new_crtc_state); 7094 7095 if (INTEL_GEN(dev_priv) >= 11) 7096 icl_set_pipe_chicken(crtc); 7097 7098 if (dev_priv->display.initial_watermarks) 7099 dev_priv->display.initial_watermarks(state, crtc); 7100 7101 if (INTEL_GEN(dev_priv) >= 11) 7102 icl_pipe_mbus_enable(crtc); 7103 7104 intel_encoders_enable(state, crtc); 7105 7106 if (psl_clkgate_wa) { 7107 intel_wait_for_vblank(dev_priv, pipe); 7108 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); 7109 } 7110 7111 /* If we change the relative order between pipe/planes enabling, we need 7112 * to change the workaround. */ 7113 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe; 7114 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 7115 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 7116 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 7117 } 7118 } 7119 7120 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state) 7121 { 7122 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 7123 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7124 enum pipe pipe = crtc->pipe; 7125 7126 /* To avoid upsetting the power well on haswell only disable the pfit if 7127 * it's in use. The hw state code will make sure we get this right. */ 7128 if (!old_crtc_state->pch_pfit.enabled) 7129 return; 7130 7131 intel_de_write(dev_priv, PF_CTL(pipe), 0); 7132 intel_de_write(dev_priv, PF_WIN_POS(pipe), 0); 7133 intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0); 7134 } 7135 7136 static void ilk_crtc_disable(struct intel_atomic_state *state, 7137 struct intel_crtc *crtc) 7138 { 7139 const struct intel_crtc_state *old_crtc_state = 7140 intel_atomic_get_old_crtc_state(state, crtc); 7141 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7142 enum pipe pipe = crtc->pipe; 7143 7144 /* 7145 * Sometimes spurious CPU pipe underruns happen when the 7146 * pipe is already disabled, but FDI RX/TX is still enabled. 7147 * Happens at least with VGA+HDMI cloning. Suppress them. 7148 */ 7149 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 7150 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 7151 7152 intel_encoders_disable(state, crtc); 7153 7154 intel_crtc_vblank_off(old_crtc_state); 7155 7156 intel_disable_pipe(old_crtc_state); 7157 7158 ilk_pfit_disable(old_crtc_state); 7159 7160 if (old_crtc_state->has_pch_encoder) 7161 ilk_fdi_disable(crtc); 7162 7163 intel_encoders_post_disable(state, crtc); 7164 7165 if (old_crtc_state->has_pch_encoder) { 7166 ilk_disable_pch_transcoder(dev_priv, pipe); 7167 7168 if (HAS_PCH_CPT(dev_priv)) { 7169 i915_reg_t reg; 7170 u32 temp; 7171 7172 /* disable TRANS_DP_CTL */ 7173 reg = TRANS_DP_CTL(pipe); 7174 temp = intel_de_read(dev_priv, reg); 7175 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 7176 TRANS_DP_PORT_SEL_MASK); 7177 temp |= TRANS_DP_PORT_SEL_NONE; 7178 intel_de_write(dev_priv, reg, temp); 7179 7180 /* disable DPLL_SEL */ 7181 temp = intel_de_read(dev_priv, PCH_DPLL_SEL); 7182 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 7183 intel_de_write(dev_priv, PCH_DPLL_SEL, temp); 7184 } 7185 7186 ilk_fdi_pll_disable(crtc); 7187 } 7188 7189 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 7190 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 7191 } 7192 7193 static void hsw_crtc_disable(struct intel_atomic_state *state, 7194 struct intel_crtc *crtc) 7195 { 7196 /* 7197 * FIXME collapse everything to one hook. 7198 * Need care with mst->ddi interactions. 7199 */ 7200 intel_encoders_disable(state, crtc); 7201 intel_encoders_post_disable(state, crtc); 7202 } 7203 7204 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) 7205 { 7206 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7207 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7208 7209 if (!crtc_state->gmch_pfit.control) 7210 return; 7211 7212 /* 7213 * The panel fitter should only be adjusted whilst the pipe is disabled, 7214 * according to register description and PRM. 7215 */ 7216 drm_WARN_ON(&dev_priv->drm, 7217 intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE); 7218 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); 7219 7220 intel_de_write(dev_priv, PFIT_PGM_RATIOS, 7221 crtc_state->gmch_pfit.pgm_ratios); 7222 intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control); 7223 7224 /* Border color in case we don't scale up to the full screen. Black by 7225 * default, change to something else for debugging. */ 7226 intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0); 7227 } 7228 7229 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) 7230 { 7231 if (phy == PHY_NONE) 7232 return false; 7233 else if (IS_ROCKETLAKE(dev_priv)) 7234 return phy <= PHY_D; 7235 else if (IS_ELKHARTLAKE(dev_priv)) 7236 return phy <= PHY_C; 7237 else if (INTEL_GEN(dev_priv) >= 11) 7238 return phy <= PHY_B; 7239 else 7240 return false; 7241 } 7242 7243 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) 7244 { 7245 if (IS_ROCKETLAKE(dev_priv)) 7246 return false; 7247 else if (INTEL_GEN(dev_priv) >= 12) 7248 return phy >= PHY_D && phy <= PHY_I; 7249 else if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv)) 7250 return phy >= PHY_C && phy <= PHY_F; 7251 else 7252 return false; 7253 } 7254 7255 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) 7256 { 7257 if (IS_ROCKETLAKE(i915) && port >= PORT_D) 7258 return (enum phy)port - 1; 7259 else if (IS_ELKHARTLAKE(i915) && port == PORT_D) 7260 return PHY_A; 7261 7262 return (enum phy)port; 7263 } 7264 7265 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) 7266 { 7267 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) 7268 return PORT_TC_NONE; 7269 7270 if (INTEL_GEN(dev_priv) >= 12) 7271 return port - PORT_D; 7272 7273 return port - PORT_C; 7274 } 7275 7276 enum intel_display_power_domain intel_port_to_power_domain(enum port port) 7277 { 7278 switch (port) { 7279 case PORT_A: 7280 return POWER_DOMAIN_PORT_DDI_A_LANES; 7281 case PORT_B: 7282 return POWER_DOMAIN_PORT_DDI_B_LANES; 7283 case PORT_C: 7284 return POWER_DOMAIN_PORT_DDI_C_LANES; 7285 case PORT_D: 7286 return POWER_DOMAIN_PORT_DDI_D_LANES; 7287 case PORT_E: 7288 return POWER_DOMAIN_PORT_DDI_E_LANES; 7289 case PORT_F: 7290 return POWER_DOMAIN_PORT_DDI_F_LANES; 7291 case PORT_G: 7292 return POWER_DOMAIN_PORT_DDI_G_LANES; 7293 default: 7294 MISSING_CASE(port); 7295 return POWER_DOMAIN_PORT_OTHER; 7296 } 7297 } 7298 7299 enum intel_display_power_domain 7300 intel_aux_power_domain(struct intel_digital_port *dig_port) 7301 { 7302 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 7303 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 7304 7305 if (intel_phy_is_tc(dev_priv, phy) && 7306 dig_port->tc_mode == TC_PORT_TBT_ALT) { 7307 switch (dig_port->aux_ch) { 7308 case AUX_CH_C: 7309 return POWER_DOMAIN_AUX_C_TBT; 7310 case AUX_CH_D: 7311 return POWER_DOMAIN_AUX_D_TBT; 7312 case AUX_CH_E: 7313 return POWER_DOMAIN_AUX_E_TBT; 7314 case AUX_CH_F: 7315 return POWER_DOMAIN_AUX_F_TBT; 7316 case AUX_CH_G: 7317 return POWER_DOMAIN_AUX_G_TBT; 7318 default: 7319 MISSING_CASE(dig_port->aux_ch); 7320 return POWER_DOMAIN_AUX_C_TBT; 7321 } 7322 } 7323 7324 return intel_legacy_aux_to_power_domain(dig_port->aux_ch); 7325 } 7326 7327 /* 7328 * Converts aux_ch to power_domain without caring about TBT ports for that use 7329 * intel_aux_power_domain() 7330 */ 7331 enum intel_display_power_domain 7332 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch) 7333 { 7334 switch (aux_ch) { 7335 case AUX_CH_A: 7336 return POWER_DOMAIN_AUX_A; 7337 case AUX_CH_B: 7338 return POWER_DOMAIN_AUX_B; 7339 case AUX_CH_C: 7340 return POWER_DOMAIN_AUX_C; 7341 case AUX_CH_D: 7342 return POWER_DOMAIN_AUX_D; 7343 case AUX_CH_E: 7344 return POWER_DOMAIN_AUX_E; 7345 case AUX_CH_F: 7346 return POWER_DOMAIN_AUX_F; 7347 case AUX_CH_G: 7348 return POWER_DOMAIN_AUX_G; 7349 default: 7350 MISSING_CASE(aux_ch); 7351 return POWER_DOMAIN_AUX_A; 7352 } 7353 } 7354 7355 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) 7356 { 7357 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7358 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7359 struct drm_encoder *encoder; 7360 enum pipe pipe = crtc->pipe; 7361 u64 mask; 7362 enum transcoder transcoder = crtc_state->cpu_transcoder; 7363 7364 if (!crtc_state->hw.active) 7365 return 0; 7366 7367 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe)); 7368 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder)); 7369 if (crtc_state->pch_pfit.enabled || 7370 crtc_state->pch_pfit.force_thru) 7371 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 7372 7373 drm_for_each_encoder_mask(encoder, &dev_priv->drm, 7374 crtc_state->uapi.encoder_mask) { 7375 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 7376 7377 mask |= BIT_ULL(intel_encoder->power_domain); 7378 } 7379 7380 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 7381 mask |= BIT_ULL(POWER_DOMAIN_AUDIO); 7382 7383 if (crtc_state->shared_dpll) 7384 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE); 7385 7386 return mask; 7387 } 7388 7389 static u64 7390 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state) 7391 { 7392 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7393 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7394 enum intel_display_power_domain domain; 7395 u64 domains, new_domains, old_domains; 7396 7397 old_domains = crtc->enabled_power_domains; 7398 crtc->enabled_power_domains = new_domains = 7399 get_crtc_power_domains(crtc_state); 7400 7401 domains = new_domains & ~old_domains; 7402 7403 for_each_power_domain(domain, domains) 7404 intel_display_power_get(dev_priv, domain); 7405 7406 return old_domains & ~new_domains; 7407 } 7408 7409 static void modeset_put_power_domains(struct drm_i915_private *dev_priv, 7410 u64 domains) 7411 { 7412 enum intel_display_power_domain domain; 7413 7414 for_each_power_domain(domain, domains) 7415 intel_display_power_put_unchecked(dev_priv, domain); 7416 } 7417 7418 static void valleyview_crtc_enable(struct intel_atomic_state *state, 7419 struct intel_crtc *crtc) 7420 { 7421 const struct intel_crtc_state *new_crtc_state = 7422 intel_atomic_get_new_crtc_state(state, crtc); 7423 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7424 enum pipe pipe = crtc->pipe; 7425 7426 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 7427 return; 7428 7429 if (intel_crtc_has_dp_encoder(new_crtc_state)) 7430 intel_dp_set_m_n(new_crtc_state, M1_N1); 7431 7432 intel_set_pipe_timings(new_crtc_state); 7433 intel_set_pipe_src_size(new_crtc_state); 7434 7435 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 7436 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY); 7437 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0); 7438 } 7439 7440 i9xx_set_pipeconf(new_crtc_state); 7441 7442 crtc->active = true; 7443 7444 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 7445 7446 intel_encoders_pre_pll_enable(state, crtc); 7447 7448 if (IS_CHERRYVIEW(dev_priv)) { 7449 chv_prepare_pll(crtc, new_crtc_state); 7450 chv_enable_pll(crtc, new_crtc_state); 7451 } else { 7452 vlv_prepare_pll(crtc, new_crtc_state); 7453 vlv_enable_pll(crtc, new_crtc_state); 7454 } 7455 7456 intel_encoders_pre_enable(state, crtc); 7457 7458 i9xx_pfit_enable(new_crtc_state); 7459 7460 intel_color_load_luts(new_crtc_state); 7461 intel_color_commit(new_crtc_state); 7462 /* update DSPCNTR to configure gamma for pipe bottom color */ 7463 intel_disable_primary_plane(new_crtc_state); 7464 7465 dev_priv->display.initial_watermarks(state, crtc); 7466 intel_enable_pipe(new_crtc_state); 7467 7468 intel_crtc_vblank_on(new_crtc_state); 7469 7470 intel_encoders_enable(state, crtc); 7471 } 7472 7473 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state) 7474 { 7475 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7476 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7477 7478 intel_de_write(dev_priv, FP0(crtc->pipe), 7479 crtc_state->dpll_hw_state.fp0); 7480 intel_de_write(dev_priv, FP1(crtc->pipe), 7481 crtc_state->dpll_hw_state.fp1); 7482 } 7483 7484 static void i9xx_crtc_enable(struct intel_atomic_state *state, 7485 struct intel_crtc *crtc) 7486 { 7487 const struct intel_crtc_state *new_crtc_state = 7488 intel_atomic_get_new_crtc_state(state, crtc); 7489 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7490 enum pipe pipe = crtc->pipe; 7491 7492 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 7493 return; 7494 7495 i9xx_set_pll_dividers(new_crtc_state); 7496 7497 if (intel_crtc_has_dp_encoder(new_crtc_state)) 7498 intel_dp_set_m_n(new_crtc_state, M1_N1); 7499 7500 intel_set_pipe_timings(new_crtc_state); 7501 intel_set_pipe_src_size(new_crtc_state); 7502 7503 i9xx_set_pipeconf(new_crtc_state); 7504 7505 crtc->active = true; 7506 7507 if (!IS_GEN(dev_priv, 2)) 7508 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 7509 7510 intel_encoders_pre_enable(state, crtc); 7511 7512 i9xx_enable_pll(crtc, new_crtc_state); 7513 7514 i9xx_pfit_enable(new_crtc_state); 7515 7516 intel_color_load_luts(new_crtc_state); 7517 intel_color_commit(new_crtc_state); 7518 /* update DSPCNTR to configure gamma for pipe bottom color */ 7519 intel_disable_primary_plane(new_crtc_state); 7520 7521 if (dev_priv->display.initial_watermarks) 7522 dev_priv->display.initial_watermarks(state, crtc); 7523 else 7524 intel_update_watermarks(crtc); 7525 intel_enable_pipe(new_crtc_state); 7526 7527 intel_crtc_vblank_on(new_crtc_state); 7528 7529 intel_encoders_enable(state, crtc); 7530 7531 /* prevents spurious underruns */ 7532 if (IS_GEN(dev_priv, 2)) 7533 intel_wait_for_vblank(dev_priv, pipe); 7534 } 7535 7536 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) 7537 { 7538 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 7539 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7540 7541 if (!old_crtc_state->gmch_pfit.control) 7542 return; 7543 7544 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); 7545 7546 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n", 7547 intel_de_read(dev_priv, PFIT_CONTROL)); 7548 intel_de_write(dev_priv, PFIT_CONTROL, 0); 7549 } 7550 7551 static void i9xx_crtc_disable(struct intel_atomic_state *state, 7552 struct intel_crtc *crtc) 7553 { 7554 struct intel_crtc_state *old_crtc_state = 7555 intel_atomic_get_old_crtc_state(state, crtc); 7556 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7557 enum pipe pipe = crtc->pipe; 7558 7559 /* 7560 * On gen2 planes are double buffered but the pipe isn't, so we must 7561 * wait for planes to fully turn off before disabling the pipe. 7562 */ 7563 if (IS_GEN(dev_priv, 2)) 7564 intel_wait_for_vblank(dev_priv, pipe); 7565 7566 intel_encoders_disable(state, crtc); 7567 7568 intel_crtc_vblank_off(old_crtc_state); 7569 7570 intel_disable_pipe(old_crtc_state); 7571 7572 i9xx_pfit_disable(old_crtc_state); 7573 7574 intel_encoders_post_disable(state, crtc); 7575 7576 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 7577 if (IS_CHERRYVIEW(dev_priv)) 7578 chv_disable_pll(dev_priv, pipe); 7579 else if (IS_VALLEYVIEW(dev_priv)) 7580 vlv_disable_pll(dev_priv, pipe); 7581 else 7582 i9xx_disable_pll(old_crtc_state); 7583 } 7584 7585 intel_encoders_post_pll_disable(state, crtc); 7586 7587 if (!IS_GEN(dev_priv, 2)) 7588 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 7589 7590 if (!dev_priv->display.initial_watermarks) 7591 intel_update_watermarks(crtc); 7592 7593 /* clock the pipe down to 640x480@60 to potentially save power */ 7594 if (IS_I830(dev_priv)) 7595 i830_enable_pipe(dev_priv, pipe); 7596 } 7597 7598 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, 7599 struct drm_modeset_acquire_ctx *ctx) 7600 { 7601 struct intel_encoder *encoder; 7602 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7603 struct intel_bw_state *bw_state = 7604 to_intel_bw_state(dev_priv->bw_obj.state); 7605 struct intel_cdclk_state *cdclk_state = 7606 to_intel_cdclk_state(dev_priv->cdclk.obj.state); 7607 struct intel_dbuf_state *dbuf_state = 7608 to_intel_dbuf_state(dev_priv->dbuf.obj.state); 7609 struct intel_crtc_state *crtc_state = 7610 to_intel_crtc_state(crtc->base.state); 7611 enum intel_display_power_domain domain; 7612 struct intel_plane *plane; 7613 struct drm_atomic_state *state; 7614 struct intel_crtc_state *temp_crtc_state; 7615 enum pipe pipe = crtc->pipe; 7616 u64 domains; 7617 int ret; 7618 7619 if (!crtc_state->hw.active) 7620 return; 7621 7622 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 7623 const struct intel_plane_state *plane_state = 7624 to_intel_plane_state(plane->base.state); 7625 7626 if (plane_state->uapi.visible) 7627 intel_plane_disable_noatomic(crtc, plane); 7628 } 7629 7630 state = drm_atomic_state_alloc(&dev_priv->drm); 7631 if (!state) { 7632 drm_dbg_kms(&dev_priv->drm, 7633 "failed to disable [CRTC:%d:%s], out of memory", 7634 crtc->base.base.id, crtc->base.name); 7635 return; 7636 } 7637 7638 state->acquire_ctx = ctx; 7639 7640 /* Everything's already locked, -EDEADLK can't happen. */ 7641 temp_crtc_state = intel_atomic_get_crtc_state(state, crtc); 7642 ret = drm_atomic_add_affected_connectors(state, &crtc->base); 7643 7644 drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret); 7645 7646 dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc); 7647 7648 drm_atomic_state_put(state); 7649 7650 drm_dbg_kms(&dev_priv->drm, 7651 "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", 7652 crtc->base.base.id, crtc->base.name); 7653 7654 crtc->active = false; 7655 crtc->base.enabled = false; 7656 7657 drm_WARN_ON(&dev_priv->drm, 7658 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0); 7659 crtc_state->uapi.active = false; 7660 crtc_state->uapi.connector_mask = 0; 7661 crtc_state->uapi.encoder_mask = 0; 7662 intel_crtc_free_hw_state(crtc_state); 7663 memset(&crtc_state->hw, 0, sizeof(crtc_state->hw)); 7664 7665 for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder) 7666 encoder->base.crtc = NULL; 7667 7668 intel_fbc_disable(crtc); 7669 intel_update_watermarks(crtc); 7670 intel_disable_shared_dpll(crtc_state); 7671 7672 domains = crtc->enabled_power_domains; 7673 for_each_power_domain(domain, domains) 7674 intel_display_power_put_unchecked(dev_priv, domain); 7675 crtc->enabled_power_domains = 0; 7676 7677 dev_priv->active_pipes &= ~BIT(pipe); 7678 cdclk_state->min_cdclk[pipe] = 0; 7679 cdclk_state->min_voltage_level[pipe] = 0; 7680 cdclk_state->active_pipes &= ~BIT(pipe); 7681 7682 dbuf_state->active_pipes &= ~BIT(pipe); 7683 7684 bw_state->data_rate[pipe] = 0; 7685 bw_state->num_active_planes[pipe] = 0; 7686 } 7687 7688 /* 7689 * turn all crtc's off, but do not adjust state 7690 * This has to be paired with a call to intel_modeset_setup_hw_state. 7691 */ 7692 int intel_display_suspend(struct drm_device *dev) 7693 { 7694 struct drm_i915_private *dev_priv = to_i915(dev); 7695 struct drm_atomic_state *state; 7696 int ret; 7697 7698 state = drm_atomic_helper_suspend(dev); 7699 ret = PTR_ERR_OR_ZERO(state); 7700 if (ret) 7701 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n", 7702 ret); 7703 else 7704 dev_priv->modeset_restore_state = state; 7705 return ret; 7706 } 7707 7708 void intel_encoder_destroy(struct drm_encoder *encoder) 7709 { 7710 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 7711 7712 drm_encoder_cleanup(encoder); 7713 kfree(intel_encoder); 7714 } 7715 7716 /* Cross check the actual hw state with our own modeset state tracking (and it's 7717 * internal consistency). */ 7718 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, 7719 struct drm_connector_state *conn_state) 7720 { 7721 struct intel_connector *connector = to_intel_connector(conn_state->connector); 7722 struct drm_i915_private *i915 = to_i915(connector->base.dev); 7723 7724 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n", 7725 connector->base.base.id, connector->base.name); 7726 7727 if (connector->get_hw_state(connector)) { 7728 struct intel_encoder *encoder = intel_attached_encoder(connector); 7729 7730 I915_STATE_WARN(!crtc_state, 7731 "connector enabled without attached crtc\n"); 7732 7733 if (!crtc_state) 7734 return; 7735 7736 I915_STATE_WARN(!crtc_state->hw.active, 7737 "connector is active, but attached crtc isn't\n"); 7738 7739 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 7740 return; 7741 7742 I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 7743 "atomic encoder doesn't match attached encoder\n"); 7744 7745 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 7746 "attached encoder crtc differs from connector crtc\n"); 7747 } else { 7748 I915_STATE_WARN(crtc_state && crtc_state->hw.active, 7749 "attached crtc is active, but connector isn't\n"); 7750 I915_STATE_WARN(!crtc_state && conn_state->best_encoder, 7751 "best encoder set without crtc!\n"); 7752 } 7753 } 7754 7755 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 7756 { 7757 if (crtc_state->hw.enable && crtc_state->has_pch_encoder) 7758 return crtc_state->fdi_lanes; 7759 7760 return 0; 7761 } 7762 7763 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, 7764 struct intel_crtc_state *pipe_config) 7765 { 7766 struct drm_i915_private *dev_priv = to_i915(dev); 7767 struct drm_atomic_state *state = pipe_config->uapi.state; 7768 struct intel_crtc *other_crtc; 7769 struct intel_crtc_state *other_crtc_state; 7770 7771 drm_dbg_kms(&dev_priv->drm, 7772 "checking fdi config on pipe %c, lanes %i\n", 7773 pipe_name(pipe), pipe_config->fdi_lanes); 7774 if (pipe_config->fdi_lanes > 4) { 7775 drm_dbg_kms(&dev_priv->drm, 7776 "invalid fdi lane config on pipe %c: %i lanes\n", 7777 pipe_name(pipe), pipe_config->fdi_lanes); 7778 return -EINVAL; 7779 } 7780 7781 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 7782 if (pipe_config->fdi_lanes > 2) { 7783 drm_dbg_kms(&dev_priv->drm, 7784 "only 2 lanes on haswell, required: %i lanes\n", 7785 pipe_config->fdi_lanes); 7786 return -EINVAL; 7787 } else { 7788 return 0; 7789 } 7790 } 7791 7792 if (INTEL_NUM_PIPES(dev_priv) == 2) 7793 return 0; 7794 7795 /* Ivybridge 3 pipe is really complicated */ 7796 switch (pipe) { 7797 case PIPE_A: 7798 return 0; 7799 case PIPE_B: 7800 if (pipe_config->fdi_lanes <= 2) 7801 return 0; 7802 7803 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C); 7804 other_crtc_state = 7805 intel_atomic_get_crtc_state(state, other_crtc); 7806 if (IS_ERR(other_crtc_state)) 7807 return PTR_ERR(other_crtc_state); 7808 7809 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 7810 drm_dbg_kms(&dev_priv->drm, 7811 "invalid shared fdi lane config on pipe %c: %i lanes\n", 7812 pipe_name(pipe), pipe_config->fdi_lanes); 7813 return -EINVAL; 7814 } 7815 return 0; 7816 case PIPE_C: 7817 if (pipe_config->fdi_lanes > 2) { 7818 drm_dbg_kms(&dev_priv->drm, 7819 "only 2 lanes on pipe %c: required %i lanes\n", 7820 pipe_name(pipe), pipe_config->fdi_lanes); 7821 return -EINVAL; 7822 } 7823 7824 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B); 7825 other_crtc_state = 7826 intel_atomic_get_crtc_state(state, other_crtc); 7827 if (IS_ERR(other_crtc_state)) 7828 return PTR_ERR(other_crtc_state); 7829 7830 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 7831 drm_dbg_kms(&dev_priv->drm, 7832 "fdi link B uses too many lanes to enable link C\n"); 7833 return -EINVAL; 7834 } 7835 return 0; 7836 default: 7837 BUG(); 7838 } 7839 } 7840 7841 #define RETRY 1 7842 static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc, 7843 struct intel_crtc_state *pipe_config) 7844 { 7845 struct drm_device *dev = intel_crtc->base.dev; 7846 struct drm_i915_private *i915 = to_i915(dev); 7847 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 7848 int lane, link_bw, fdi_dotclock, ret; 7849 bool needs_recompute = false; 7850 7851 retry: 7852 /* FDI is a binary signal running at ~2.7GHz, encoding 7853 * each output octet as 10 bits. The actual frequency 7854 * is stored as a divider into a 100MHz clock, and the 7855 * mode pixel clock is stored in units of 1KHz. 7856 * Hence the bw of each lane in terms of the mode signal 7857 * is: 7858 */ 7859 link_bw = intel_fdi_link_freq(i915, pipe_config); 7860 7861 fdi_dotclock = adjusted_mode->crtc_clock; 7862 7863 lane = ilk_get_lanes_required(fdi_dotclock, link_bw, 7864 pipe_config->pipe_bpp); 7865 7866 pipe_config->fdi_lanes = lane; 7867 7868 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 7869 link_bw, &pipe_config->fdi_m_n, false, false); 7870 7871 ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 7872 if (ret == -EDEADLK) 7873 return ret; 7874 7875 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 7876 pipe_config->pipe_bpp -= 2*3; 7877 drm_dbg_kms(&i915->drm, 7878 "fdi link bw constraint, reducing pipe bpp to %i\n", 7879 pipe_config->pipe_bpp); 7880 needs_recompute = true; 7881 pipe_config->bw_constrained = true; 7882 7883 goto retry; 7884 } 7885 7886 if (needs_recompute) 7887 return RETRY; 7888 7889 return ret; 7890 } 7891 7892 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) 7893 { 7894 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7895 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7896 7897 /* IPS only exists on ULT machines and is tied to pipe A. */ 7898 if (!hsw_crtc_supports_ips(crtc)) 7899 return false; 7900 7901 if (!dev_priv->params.enable_ips) 7902 return false; 7903 7904 if (crtc_state->pipe_bpp > 24) 7905 return false; 7906 7907 /* 7908 * We compare against max which means we must take 7909 * the increased cdclk requirement into account when 7910 * calculating the new cdclk. 7911 * 7912 * Should measure whether using a lower cdclk w/o IPS 7913 */ 7914 if (IS_BROADWELL(dev_priv) && 7915 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100) 7916 return false; 7917 7918 return true; 7919 } 7920 7921 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state) 7922 { 7923 struct drm_i915_private *dev_priv = 7924 to_i915(crtc_state->uapi.crtc->dev); 7925 struct intel_atomic_state *state = 7926 to_intel_atomic_state(crtc_state->uapi.state); 7927 7928 crtc_state->ips_enabled = false; 7929 7930 if (!hsw_crtc_state_ips_capable(crtc_state)) 7931 return 0; 7932 7933 /* 7934 * When IPS gets enabled, the pipe CRC changes. Since IPS gets 7935 * enabled and disabled dynamically based on package C states, 7936 * user space can't make reliable use of the CRCs, so let's just 7937 * completely disable it. 7938 */ 7939 if (crtc_state->crc_enabled) 7940 return 0; 7941 7942 /* IPS should be fine as long as at least one plane is enabled. */ 7943 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))) 7944 return 0; 7945 7946 if (IS_BROADWELL(dev_priv)) { 7947 const struct intel_cdclk_state *cdclk_state; 7948 7949 cdclk_state = intel_atomic_get_cdclk_state(state); 7950 if (IS_ERR(cdclk_state)) 7951 return PTR_ERR(cdclk_state); 7952 7953 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 7954 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100) 7955 return 0; 7956 } 7957 7958 crtc_state->ips_enabled = true; 7959 7960 return 0; 7961 } 7962 7963 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 7964 { 7965 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7966 7967 /* GDG double wide on either pipe, otherwise pipe A only */ 7968 return INTEL_GEN(dev_priv) < 4 && 7969 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 7970 } 7971 7972 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) 7973 { 7974 u32 pixel_rate = crtc_state->hw.adjusted_mode.crtc_clock; 7975 unsigned int pipe_w, pipe_h, pfit_w, pfit_h; 7976 7977 /* 7978 * We only use IF-ID interlacing. If we ever use 7979 * PF-ID we'll need to adjust the pixel_rate here. 7980 */ 7981 7982 if (!crtc_state->pch_pfit.enabled) 7983 return pixel_rate; 7984 7985 pipe_w = crtc_state->pipe_src_w; 7986 pipe_h = crtc_state->pipe_src_h; 7987 7988 pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst); 7989 pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst); 7990 7991 if (pipe_w < pfit_w) 7992 pipe_w = pfit_w; 7993 if (pipe_h < pfit_h) 7994 pipe_h = pfit_h; 7995 7996 if (drm_WARN_ON(crtc_state->uapi.crtc->dev, 7997 !pfit_w || !pfit_h)) 7998 return pixel_rate; 7999 8000 return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h), 8001 pfit_w * pfit_h); 8002 } 8003 8004 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 8005 { 8006 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 8007 8008 if (HAS_GMCH(dev_priv)) 8009 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 8010 crtc_state->pixel_rate = 8011 crtc_state->hw.adjusted_mode.crtc_clock; 8012 else 8013 crtc_state->pixel_rate = 8014 ilk_pipe_pixel_rate(crtc_state); 8015 } 8016 8017 static int intel_crtc_compute_config(struct intel_crtc *crtc, 8018 struct intel_crtc_state *pipe_config) 8019 { 8020 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8021 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 8022 int clock_limit = dev_priv->max_dotclk_freq; 8023 8024 if (INTEL_GEN(dev_priv) < 4) { 8025 clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 8026 8027 /* 8028 * Enable double wide mode when the dot clock 8029 * is > 90% of the (display) core speed. 8030 */ 8031 if (intel_crtc_supports_double_wide(crtc) && 8032 adjusted_mode->crtc_clock > clock_limit) { 8033 clock_limit = dev_priv->max_dotclk_freq; 8034 pipe_config->double_wide = true; 8035 } 8036 } 8037 8038 if (adjusted_mode->crtc_clock > clock_limit) { 8039 drm_dbg_kms(&dev_priv->drm, 8040 "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 8041 adjusted_mode->crtc_clock, clock_limit, 8042 yesno(pipe_config->double_wide)); 8043 return -EINVAL; 8044 } 8045 8046 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 8047 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) && 8048 pipe_config->hw.ctm) { 8049 /* 8050 * There is only one pipe CSC unit per pipe, and we need that 8051 * for output conversion from RGB->YCBCR. So if CTM is already 8052 * applied we can't support YCBCR420 output. 8053 */ 8054 drm_dbg_kms(&dev_priv->drm, 8055 "YCBCR420 and CTM together are not possible\n"); 8056 return -EINVAL; 8057 } 8058 8059 /* 8060 * Pipe horizontal size must be even in: 8061 * - DVO ganged mode 8062 * - LVDS dual channel mode 8063 * - Double wide pipe 8064 */ 8065 if (pipe_config->pipe_src_w & 1) { 8066 if (pipe_config->double_wide) { 8067 drm_dbg_kms(&dev_priv->drm, 8068 "Odd pipe source width not supported with double wide pipe\n"); 8069 return -EINVAL; 8070 } 8071 8072 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && 8073 intel_is_dual_link_lvds(dev_priv)) { 8074 drm_dbg_kms(&dev_priv->drm, 8075 "Odd pipe source width not supported with dual link LVDS\n"); 8076 return -EINVAL; 8077 } 8078 } 8079 8080 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 8081 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 8082 */ 8083 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) && 8084 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay) 8085 return -EINVAL; 8086 8087 intel_crtc_compute_pixel_rate(pipe_config); 8088 8089 if (pipe_config->has_pch_encoder) 8090 return ilk_fdi_compute_config(crtc, pipe_config); 8091 8092 return 0; 8093 } 8094 8095 static void 8096 intel_reduce_m_n_ratio(u32 *num, u32 *den) 8097 { 8098 while (*num > DATA_LINK_M_N_MASK || 8099 *den > DATA_LINK_M_N_MASK) { 8100 *num >>= 1; 8101 *den >>= 1; 8102 } 8103 } 8104 8105 static void compute_m_n(unsigned int m, unsigned int n, 8106 u32 *ret_m, u32 *ret_n, 8107 bool constant_n) 8108 { 8109 /* 8110 * Several DP dongles in particular seem to be fussy about 8111 * too large link M/N values. Give N value as 0x8000 that 8112 * should be acceptable by specific devices. 0x8000 is the 8113 * specified fixed N value for asynchronous clock mode, 8114 * which the devices expect also in synchronous clock mode. 8115 */ 8116 if (constant_n) 8117 *ret_n = 0x8000; 8118 else 8119 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 8120 8121 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 8122 intel_reduce_m_n_ratio(ret_m, ret_n); 8123 } 8124 8125 void 8126 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, 8127 int pixel_clock, int link_clock, 8128 struct intel_link_m_n *m_n, 8129 bool constant_n, bool fec_enable) 8130 { 8131 u32 data_clock = bits_per_pixel * pixel_clock; 8132 8133 if (fec_enable) 8134 data_clock = intel_dp_mode_to_fec_clock(data_clock); 8135 8136 m_n->tu = 64; 8137 compute_m_n(data_clock, 8138 link_clock * nlanes * 8, 8139 &m_n->gmch_m, &m_n->gmch_n, 8140 constant_n); 8141 8142 compute_m_n(pixel_clock, link_clock, 8143 &m_n->link_m, &m_n->link_n, 8144 constant_n); 8145 } 8146 8147 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) 8148 { 8149 /* 8150 * There may be no VBT; and if the BIOS enabled SSC we can 8151 * just keep using it to avoid unnecessary flicker. Whereas if the 8152 * BIOS isn't using it, don't assume it will work even if the VBT 8153 * indicates as much. 8154 */ 8155 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 8156 bool bios_lvds_use_ssc = intel_de_read(dev_priv, 8157 PCH_DREF_CONTROL) & 8158 DREF_SSC1_ENABLE; 8159 8160 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 8161 drm_dbg_kms(&dev_priv->drm, 8162 "SSC %s by BIOS, overriding VBT which says %s\n", 8163 enableddisabled(bios_lvds_use_ssc), 8164 enableddisabled(dev_priv->vbt.lvds_use_ssc)); 8165 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 8166 } 8167 } 8168 } 8169 8170 static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 8171 { 8172 if (dev_priv->params.panel_use_ssc >= 0) 8173 return dev_priv->params.panel_use_ssc != 0; 8174 return dev_priv->vbt.lvds_use_ssc 8175 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 8176 } 8177 8178 static u32 pnv_dpll_compute_fp(struct dpll *dpll) 8179 { 8180 return (1 << dpll->n) << 16 | dpll->m2; 8181 } 8182 8183 static u32 i9xx_dpll_compute_fp(struct dpll *dpll) 8184 { 8185 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 8186 } 8187 8188 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 8189 struct intel_crtc_state *crtc_state, 8190 struct dpll *reduced_clock) 8191 { 8192 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8193 u32 fp, fp2 = 0; 8194 8195 if (IS_PINEVIEW(dev_priv)) { 8196 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 8197 if (reduced_clock) 8198 fp2 = pnv_dpll_compute_fp(reduced_clock); 8199 } else { 8200 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 8201 if (reduced_clock) 8202 fp2 = i9xx_dpll_compute_fp(reduced_clock); 8203 } 8204 8205 crtc_state->dpll_hw_state.fp0 = fp; 8206 8207 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8208 reduced_clock) { 8209 crtc_state->dpll_hw_state.fp1 = fp2; 8210 } else { 8211 crtc_state->dpll_hw_state.fp1 = fp; 8212 } 8213 } 8214 8215 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe 8216 pipe) 8217 { 8218 u32 reg_val; 8219 8220 /* 8221 * PLLB opamp always calibrates to max value of 0x3f, force enable it 8222 * and set it to a reasonable value instead. 8223 */ 8224 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 8225 reg_val &= 0xffffff00; 8226 reg_val |= 0x00000030; 8227 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 8228 8229 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 8230 reg_val &= 0x00ffffff; 8231 reg_val |= 0x8c000000; 8232 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 8233 8234 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 8235 reg_val &= 0xffffff00; 8236 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 8237 8238 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 8239 reg_val &= 0x00ffffff; 8240 reg_val |= 0xb0000000; 8241 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 8242 } 8243 8244 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 8245 const struct intel_link_m_n *m_n) 8246 { 8247 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8248 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8249 enum pipe pipe = crtc->pipe; 8250 8251 intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe), 8252 TU_SIZE(m_n->tu) | m_n->gmch_m); 8253 intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 8254 intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m); 8255 intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n); 8256 } 8257 8258 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, 8259 enum transcoder transcoder) 8260 { 8261 if (IS_HASWELL(dev_priv)) 8262 return transcoder == TRANSCODER_EDP; 8263 8264 /* 8265 * Strictly speaking some registers are available before 8266 * gen7, but we only support DRRS on gen7+ 8267 */ 8268 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv); 8269 } 8270 8271 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 8272 const struct intel_link_m_n *m_n, 8273 const struct intel_link_m_n *m2_n2) 8274 { 8275 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8276 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8277 enum pipe pipe = crtc->pipe; 8278 enum transcoder transcoder = crtc_state->cpu_transcoder; 8279 8280 if (INTEL_GEN(dev_priv) >= 5) { 8281 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder), 8282 TU_SIZE(m_n->tu) | m_n->gmch_m); 8283 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder), 8284 m_n->gmch_n); 8285 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder), 8286 m_n->link_m); 8287 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder), 8288 m_n->link_n); 8289 /* 8290 * M2_N2 registers are set only if DRRS is supported 8291 * (to make sure the registers are not unnecessarily accessed). 8292 */ 8293 if (m2_n2 && crtc_state->has_drrs && 8294 transcoder_has_m2_n2(dev_priv, transcoder)) { 8295 intel_de_write(dev_priv, PIPE_DATA_M2(transcoder), 8296 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 8297 intel_de_write(dev_priv, PIPE_DATA_N2(transcoder), 8298 m2_n2->gmch_n); 8299 intel_de_write(dev_priv, PIPE_LINK_M2(transcoder), 8300 m2_n2->link_m); 8301 intel_de_write(dev_priv, PIPE_LINK_N2(transcoder), 8302 m2_n2->link_n); 8303 } 8304 } else { 8305 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe), 8306 TU_SIZE(m_n->tu) | m_n->gmch_m); 8307 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 8308 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m); 8309 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n); 8310 } 8311 } 8312 8313 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n) 8314 { 8315 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 8316 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 8317 8318 if (m_n == M1_N1) { 8319 dp_m_n = &crtc_state->dp_m_n; 8320 dp_m2_n2 = &crtc_state->dp_m2_n2; 8321 } else if (m_n == M2_N2) { 8322 8323 /* 8324 * M2_N2 registers are not supported. Hence m2_n2 divider value 8325 * needs to be programmed into M1_N1. 8326 */ 8327 dp_m_n = &crtc_state->dp_m2_n2; 8328 } else { 8329 drm_err(&i915->drm, "Unsupported divider value\n"); 8330 return; 8331 } 8332 8333 if (crtc_state->has_pch_encoder) 8334 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n); 8335 else 8336 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2); 8337 } 8338 8339 static void vlv_compute_dpll(struct intel_crtc *crtc, 8340 struct intel_crtc_state *pipe_config) 8341 { 8342 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | 8343 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 8344 if (crtc->pipe != PIPE_A) 8345 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 8346 8347 /* DPLL not used with DSI, but still need the rest set up */ 8348 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 8349 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | 8350 DPLL_EXT_BUFFER_ENABLE_VLV; 8351 8352 pipe_config->dpll_hw_state.dpll_md = 8353 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 8354 } 8355 8356 static void chv_compute_dpll(struct intel_crtc *crtc, 8357 struct intel_crtc_state *pipe_config) 8358 { 8359 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | 8360 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 8361 if (crtc->pipe != PIPE_A) 8362 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 8363 8364 /* DPLL not used with DSI, but still need the rest set up */ 8365 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 8366 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; 8367 8368 pipe_config->dpll_hw_state.dpll_md = 8369 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 8370 } 8371 8372 static void vlv_prepare_pll(struct intel_crtc *crtc, 8373 const struct intel_crtc_state *pipe_config) 8374 { 8375 struct drm_device *dev = crtc->base.dev; 8376 struct drm_i915_private *dev_priv = to_i915(dev); 8377 enum pipe pipe = crtc->pipe; 8378 u32 mdiv; 8379 u32 bestn, bestm1, bestm2, bestp1, bestp2; 8380 u32 coreclk, reg_val; 8381 8382 /* Enable Refclk */ 8383 intel_de_write(dev_priv, DPLL(pipe), 8384 pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); 8385 8386 /* No need to actually set up the DPLL with DSI */ 8387 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8388 return; 8389 8390 vlv_dpio_get(dev_priv); 8391 8392 bestn = pipe_config->dpll.n; 8393 bestm1 = pipe_config->dpll.m1; 8394 bestm2 = pipe_config->dpll.m2; 8395 bestp1 = pipe_config->dpll.p1; 8396 bestp2 = pipe_config->dpll.p2; 8397 8398 /* See eDP HDMI DPIO driver vbios notes doc */ 8399 8400 /* PLL B needs special handling */ 8401 if (pipe == PIPE_B) 8402 vlv_pllb_recal_opamp(dev_priv, pipe); 8403 8404 /* Set up Tx target for periodic Rcomp update */ 8405 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 8406 8407 /* Disable target IRef on PLL */ 8408 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 8409 reg_val &= 0x00ffffff; 8410 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 8411 8412 /* Disable fast lock */ 8413 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 8414 8415 /* Set idtafcrecal before PLL is enabled */ 8416 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 8417 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 8418 mdiv |= ((bestn << DPIO_N_SHIFT)); 8419 mdiv |= (1 << DPIO_K_SHIFT); 8420 8421 /* 8422 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 8423 * but we don't support that). 8424 * Note: don't use the DAC post divider as it seems unstable. 8425 */ 8426 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 8427 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 8428 8429 mdiv |= DPIO_ENABLE_CALIBRATION; 8430 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 8431 8432 /* Set HBR and RBR LPF coefficients */ 8433 if (pipe_config->port_clock == 162000 || 8434 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) || 8435 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) 8436 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 8437 0x009f0003); 8438 else 8439 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 8440 0x00d0000f); 8441 8442 if (intel_crtc_has_dp_encoder(pipe_config)) { 8443 /* Use SSC source */ 8444 if (pipe == PIPE_A) 8445 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 8446 0x0df40000); 8447 else 8448 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 8449 0x0df70000); 8450 } else { /* HDMI or VGA */ 8451 /* Use bend source */ 8452 if (pipe == PIPE_A) 8453 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 8454 0x0df70000); 8455 else 8456 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 8457 0x0df40000); 8458 } 8459 8460 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 8461 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 8462 if (intel_crtc_has_dp_encoder(pipe_config)) 8463 coreclk |= 0x01000000; 8464 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 8465 8466 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 8467 8468 vlv_dpio_put(dev_priv); 8469 } 8470 8471 static void chv_prepare_pll(struct intel_crtc *crtc, 8472 const struct intel_crtc_state *pipe_config) 8473 { 8474 struct drm_device *dev = crtc->base.dev; 8475 struct drm_i915_private *dev_priv = to_i915(dev); 8476 enum pipe pipe = crtc->pipe; 8477 enum dpio_channel port = vlv_pipe_to_channel(pipe); 8478 u32 loopfilter, tribuf_calcntr; 8479 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 8480 u32 dpio_val; 8481 int vco; 8482 8483 /* Enable Refclk and SSC */ 8484 intel_de_write(dev_priv, DPLL(pipe), 8485 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 8486 8487 /* No need to actually set up the DPLL with DSI */ 8488 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8489 return; 8490 8491 bestn = pipe_config->dpll.n; 8492 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 8493 bestm1 = pipe_config->dpll.m1; 8494 bestm2 = pipe_config->dpll.m2 >> 22; 8495 bestp1 = pipe_config->dpll.p1; 8496 bestp2 = pipe_config->dpll.p2; 8497 vco = pipe_config->dpll.vco; 8498 dpio_val = 0; 8499 loopfilter = 0; 8500 8501 vlv_dpio_get(dev_priv); 8502 8503 /* p1 and p2 divider */ 8504 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 8505 5 << DPIO_CHV_S1_DIV_SHIFT | 8506 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 8507 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 8508 1 << DPIO_CHV_K_DIV_SHIFT); 8509 8510 /* Feedback post-divider - m2 */ 8511 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 8512 8513 /* Feedback refclk divider - n and m1 */ 8514 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 8515 DPIO_CHV_M1_DIV_BY_2 | 8516 1 << DPIO_CHV_N_DIV_SHIFT); 8517 8518 /* M2 fraction division */ 8519 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 8520 8521 /* M2 fraction division enable */ 8522 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 8523 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 8524 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 8525 if (bestm2_frac) 8526 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 8527 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 8528 8529 /* Program digital lock detect threshold */ 8530 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 8531 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 8532 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 8533 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 8534 if (!bestm2_frac) 8535 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 8536 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 8537 8538 /* Loop filter */ 8539 if (vco == 5400000) { 8540 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 8541 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 8542 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 8543 tribuf_calcntr = 0x9; 8544 } else if (vco <= 6200000) { 8545 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 8546 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 8547 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 8548 tribuf_calcntr = 0x9; 8549 } else if (vco <= 6480000) { 8550 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 8551 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 8552 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 8553 tribuf_calcntr = 0x8; 8554 } else { 8555 /* Not supported. Apply the same limits as in the max case */ 8556 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 8557 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 8558 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 8559 tribuf_calcntr = 0; 8560 } 8561 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 8562 8563 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 8564 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 8565 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 8566 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 8567 8568 /* AFC Recal */ 8569 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 8570 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 8571 DPIO_AFC_RECAL); 8572 8573 vlv_dpio_put(dev_priv); 8574 } 8575 8576 /** 8577 * vlv_force_pll_on - forcibly enable just the PLL 8578 * @dev_priv: i915 private structure 8579 * @pipe: pipe PLL to enable 8580 * @dpll: PLL configuration 8581 * 8582 * Enable the PLL for @pipe using the supplied @dpll config. To be used 8583 * in cases where we need the PLL enabled even when @pipe is not going to 8584 * be enabled. 8585 */ 8586 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, 8587 const struct dpll *dpll) 8588 { 8589 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 8590 struct intel_crtc_state *pipe_config; 8591 8592 pipe_config = intel_crtc_state_alloc(crtc); 8593 if (!pipe_config) 8594 return -ENOMEM; 8595 8596 pipe_config->cpu_transcoder = (enum transcoder)pipe; 8597 pipe_config->pixel_multiplier = 1; 8598 pipe_config->dpll = *dpll; 8599 8600 if (IS_CHERRYVIEW(dev_priv)) { 8601 chv_compute_dpll(crtc, pipe_config); 8602 chv_prepare_pll(crtc, pipe_config); 8603 chv_enable_pll(crtc, pipe_config); 8604 } else { 8605 vlv_compute_dpll(crtc, pipe_config); 8606 vlv_prepare_pll(crtc, pipe_config); 8607 vlv_enable_pll(crtc, pipe_config); 8608 } 8609 8610 kfree(pipe_config); 8611 8612 return 0; 8613 } 8614 8615 /** 8616 * vlv_force_pll_off - forcibly disable just the PLL 8617 * @dev_priv: i915 private structure 8618 * @pipe: pipe PLL to disable 8619 * 8620 * Disable the PLL for @pipe. To be used in cases where we need 8621 * the PLL enabled even when @pipe is not going to be enabled. 8622 */ 8623 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) 8624 { 8625 if (IS_CHERRYVIEW(dev_priv)) 8626 chv_disable_pll(dev_priv, pipe); 8627 else 8628 vlv_disable_pll(dev_priv, pipe); 8629 } 8630 8631 static void i9xx_compute_dpll(struct intel_crtc *crtc, 8632 struct intel_crtc_state *crtc_state, 8633 struct dpll *reduced_clock) 8634 { 8635 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8636 u32 dpll; 8637 struct dpll *clock = &crtc_state->dpll; 8638 8639 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 8640 8641 dpll = DPLL_VGA_MODE_DIS; 8642 8643 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 8644 dpll |= DPLLB_MODE_LVDS; 8645 else 8646 dpll |= DPLLB_MODE_DAC_SERIAL; 8647 8648 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 8649 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 8650 dpll |= (crtc_state->pixel_multiplier - 1) 8651 << SDVO_MULTIPLIER_SHIFT_HIRES; 8652 } 8653 8654 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 8655 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 8656 dpll |= DPLL_SDVO_HIGH_SPEED; 8657 8658 if (intel_crtc_has_dp_encoder(crtc_state)) 8659 dpll |= DPLL_SDVO_HIGH_SPEED; 8660 8661 /* compute bitmask from p1 value */ 8662 if (IS_PINEVIEW(dev_priv)) 8663 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 8664 else { 8665 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8666 if (IS_G4X(dev_priv) && reduced_clock) 8667 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 8668 } 8669 switch (clock->p2) { 8670 case 5: 8671 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 8672 break; 8673 case 7: 8674 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 8675 break; 8676 case 10: 8677 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 8678 break; 8679 case 14: 8680 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 8681 break; 8682 } 8683 if (INTEL_GEN(dev_priv) >= 4) 8684 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 8685 8686 if (crtc_state->sdvo_tv_clock) 8687 dpll |= PLL_REF_INPUT_TVCLKINBC; 8688 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8689 intel_panel_use_ssc(dev_priv)) 8690 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8691 else 8692 dpll |= PLL_REF_INPUT_DREFCLK; 8693 8694 dpll |= DPLL_VCO_ENABLE; 8695 crtc_state->dpll_hw_state.dpll = dpll; 8696 8697 if (INTEL_GEN(dev_priv) >= 4) { 8698 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 8699 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 8700 crtc_state->dpll_hw_state.dpll_md = dpll_md; 8701 } 8702 } 8703 8704 static void i8xx_compute_dpll(struct intel_crtc *crtc, 8705 struct intel_crtc_state *crtc_state, 8706 struct dpll *reduced_clock) 8707 { 8708 struct drm_device *dev = crtc->base.dev; 8709 struct drm_i915_private *dev_priv = to_i915(dev); 8710 u32 dpll; 8711 struct dpll *clock = &crtc_state->dpll; 8712 8713 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 8714 8715 dpll = DPLL_VGA_MODE_DIS; 8716 8717 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8718 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8719 } else { 8720 if (clock->p1 == 2) 8721 dpll |= PLL_P1_DIVIDE_BY_TWO; 8722 else 8723 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8724 if (clock->p2 == 4) 8725 dpll |= PLL_P2_DIVIDE_BY_4; 8726 } 8727 8728 /* 8729 * Bspec: 8730 * "[Almador Errata}: For the correct operation of the muxed DVO pins 8731 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data, 8732 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock 8733 * Enable) must be set to “1” in both the DPLL A Control Register 8734 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)." 8735 * 8736 * For simplicity We simply keep both bits always enabled in 8737 * both DPLLS. The spec says we should disable the DVO 2X clock 8738 * when not needed, but this seems to work fine in practice. 8739 */ 8740 if (IS_I830(dev_priv) || 8741 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) 8742 dpll |= DPLL_DVO_2X_MODE; 8743 8744 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8745 intel_panel_use_ssc(dev_priv)) 8746 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8747 else 8748 dpll |= PLL_REF_INPUT_DREFCLK; 8749 8750 dpll |= DPLL_VCO_ENABLE; 8751 crtc_state->dpll_hw_state.dpll = dpll; 8752 } 8753 8754 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state) 8755 { 8756 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8757 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8758 enum pipe pipe = crtc->pipe; 8759 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 8760 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 8761 u32 crtc_vtotal, crtc_vblank_end; 8762 int vsyncshift = 0; 8763 8764 /* We need to be careful not to changed the adjusted mode, for otherwise 8765 * the hw state checker will get angry at the mismatch. */ 8766 crtc_vtotal = adjusted_mode->crtc_vtotal; 8767 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 8768 8769 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 8770 /* the chip adds 2 halflines automatically */ 8771 crtc_vtotal -= 1; 8772 crtc_vblank_end -= 1; 8773 8774 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 8775 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 8776 else 8777 vsyncshift = adjusted_mode->crtc_hsync_start - 8778 adjusted_mode->crtc_htotal / 2; 8779 if (vsyncshift < 0) 8780 vsyncshift += adjusted_mode->crtc_htotal; 8781 } 8782 8783 if (INTEL_GEN(dev_priv) > 3) 8784 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder), 8785 vsyncshift); 8786 8787 intel_de_write(dev_priv, HTOTAL(cpu_transcoder), 8788 (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); 8789 intel_de_write(dev_priv, HBLANK(cpu_transcoder), 8790 (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); 8791 intel_de_write(dev_priv, HSYNC(cpu_transcoder), 8792 (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); 8793 8794 intel_de_write(dev_priv, VTOTAL(cpu_transcoder), 8795 (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16)); 8796 intel_de_write(dev_priv, VBLANK(cpu_transcoder), 8797 (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16)); 8798 intel_de_write(dev_priv, VSYNC(cpu_transcoder), 8799 (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); 8800 8801 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 8802 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 8803 * documented on the DDI_FUNC_CTL register description, EDP Input Select 8804 * bits. */ 8805 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 8806 (pipe == PIPE_B || pipe == PIPE_C)) 8807 intel_de_write(dev_priv, VTOTAL(pipe), 8808 intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); 8809 8810 } 8811 8812 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 8813 { 8814 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8815 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8816 enum pipe pipe = crtc->pipe; 8817 8818 /* pipesrc controls the size that is scaled from, which should 8819 * always be the user's requested size. 8820 */ 8821 intel_de_write(dev_priv, PIPESRC(pipe), 8822 ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1)); 8823 } 8824 8825 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) 8826 { 8827 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 8828 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 8829 8830 if (IS_GEN(dev_priv, 2)) 8831 return false; 8832 8833 if (INTEL_GEN(dev_priv) >= 9 || 8834 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 8835 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW; 8836 else 8837 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK; 8838 } 8839 8840 static void intel_get_pipe_timings(struct intel_crtc *crtc, 8841 struct intel_crtc_state *pipe_config) 8842 { 8843 struct drm_device *dev = crtc->base.dev; 8844 struct drm_i915_private *dev_priv = to_i915(dev); 8845 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 8846 u32 tmp; 8847 8848 tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder)); 8849 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 8850 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 8851 8852 if (!transcoder_is_dsi(cpu_transcoder)) { 8853 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder)); 8854 pipe_config->hw.adjusted_mode.crtc_hblank_start = 8855 (tmp & 0xffff) + 1; 8856 pipe_config->hw.adjusted_mode.crtc_hblank_end = 8857 ((tmp >> 16) & 0xffff) + 1; 8858 } 8859 tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder)); 8860 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 8861 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 8862 8863 tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder)); 8864 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 8865 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 8866 8867 if (!transcoder_is_dsi(cpu_transcoder)) { 8868 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder)); 8869 pipe_config->hw.adjusted_mode.crtc_vblank_start = 8870 (tmp & 0xffff) + 1; 8871 pipe_config->hw.adjusted_mode.crtc_vblank_end = 8872 ((tmp >> 16) & 0xffff) + 1; 8873 } 8874 tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder)); 8875 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 8876 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 8877 8878 if (intel_pipe_is_interlaced(pipe_config)) { 8879 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 8880 pipe_config->hw.adjusted_mode.crtc_vtotal += 1; 8881 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1; 8882 } 8883 } 8884 8885 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 8886 struct intel_crtc_state *pipe_config) 8887 { 8888 struct drm_device *dev = crtc->base.dev; 8889 struct drm_i915_private *dev_priv = to_i915(dev); 8890 u32 tmp; 8891 8892 tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe)); 8893 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 8894 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 8895 8896 pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h; 8897 pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w; 8898 } 8899 8900 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 8901 struct intel_crtc_state *pipe_config) 8902 { 8903 mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay; 8904 mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal; 8905 mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start; 8906 mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end; 8907 8908 mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay; 8909 mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal; 8910 mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start; 8911 mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end; 8912 8913 mode->flags = pipe_config->hw.adjusted_mode.flags; 8914 mode->type = DRM_MODE_TYPE_DRIVER; 8915 8916 mode->clock = pipe_config->hw.adjusted_mode.crtc_clock; 8917 8918 drm_mode_set_name(mode); 8919 } 8920 8921 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 8922 { 8923 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8924 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8925 u32 pipeconf; 8926 8927 pipeconf = 0; 8928 8929 /* we keep both pipes enabled on 830 */ 8930 if (IS_I830(dev_priv)) 8931 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE; 8932 8933 if (crtc_state->double_wide) 8934 pipeconf |= PIPECONF_DOUBLE_WIDE; 8935 8936 /* only g4x and later have fancy bpc/dither controls */ 8937 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 8938 IS_CHERRYVIEW(dev_priv)) { 8939 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 8940 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 8941 pipeconf |= PIPECONF_DITHER_EN | 8942 PIPECONF_DITHER_TYPE_SP; 8943 8944 switch (crtc_state->pipe_bpp) { 8945 case 18: 8946 pipeconf |= PIPECONF_6BPC; 8947 break; 8948 case 24: 8949 pipeconf |= PIPECONF_8BPC; 8950 break; 8951 case 30: 8952 pipeconf |= PIPECONF_10BPC; 8953 break; 8954 default: 8955 /* Case prevented by intel_choose_pipe_bpp_dither. */ 8956 BUG(); 8957 } 8958 } 8959 8960 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 8961 if (INTEL_GEN(dev_priv) < 4 || 8962 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 8963 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 8964 else 8965 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 8966 } else { 8967 pipeconf |= PIPECONF_PROGRESSIVE; 8968 } 8969 8970 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 8971 crtc_state->limited_color_range) 8972 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 8973 8974 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 8975 8976 pipeconf |= PIPECONF_FRAME_START_DELAY(0); 8977 8978 intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf); 8979 intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe)); 8980 } 8981 8982 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, 8983 struct intel_crtc_state *crtc_state) 8984 { 8985 struct drm_device *dev = crtc->base.dev; 8986 struct drm_i915_private *dev_priv = to_i915(dev); 8987 const struct intel_limit *limit; 8988 int refclk = 48000; 8989 8990 memset(&crtc_state->dpll_hw_state, 0, 8991 sizeof(crtc_state->dpll_hw_state)); 8992 8993 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8994 if (intel_panel_use_ssc(dev_priv)) { 8995 refclk = dev_priv->vbt.lvds_ssc_freq; 8996 drm_dbg_kms(&dev_priv->drm, 8997 "using SSC reference clock of %d kHz\n", 8998 refclk); 8999 } 9000 9001 limit = &intel_limits_i8xx_lvds; 9002 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) { 9003 limit = &intel_limits_i8xx_dvo; 9004 } else { 9005 limit = &intel_limits_i8xx_dac; 9006 } 9007 9008 if (!crtc_state->clock_set && 9009 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9010 refclk, NULL, &crtc_state->dpll)) { 9011 drm_err(&dev_priv->drm, 9012 "Couldn't find PLL settings for mode!\n"); 9013 return -EINVAL; 9014 } 9015 9016 i8xx_compute_dpll(crtc, crtc_state, NULL); 9017 9018 return 0; 9019 } 9020 9021 static int g4x_crtc_compute_clock(struct intel_crtc *crtc, 9022 struct intel_crtc_state *crtc_state) 9023 { 9024 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9025 const struct intel_limit *limit; 9026 int refclk = 96000; 9027 9028 memset(&crtc_state->dpll_hw_state, 0, 9029 sizeof(crtc_state->dpll_hw_state)); 9030 9031 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9032 if (intel_panel_use_ssc(dev_priv)) { 9033 refclk = dev_priv->vbt.lvds_ssc_freq; 9034 drm_dbg_kms(&dev_priv->drm, 9035 "using SSC reference clock of %d kHz\n", 9036 refclk); 9037 } 9038 9039 if (intel_is_dual_link_lvds(dev_priv)) 9040 limit = &intel_limits_g4x_dual_channel_lvds; 9041 else 9042 limit = &intel_limits_g4x_single_channel_lvds; 9043 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || 9044 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 9045 limit = &intel_limits_g4x_hdmi; 9046 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) { 9047 limit = &intel_limits_g4x_sdvo; 9048 } else { 9049 /* The option is for other outputs */ 9050 limit = &intel_limits_i9xx_sdvo; 9051 } 9052 9053 if (!crtc_state->clock_set && 9054 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9055 refclk, NULL, &crtc_state->dpll)) { 9056 drm_err(&dev_priv->drm, 9057 "Couldn't find PLL settings for mode!\n"); 9058 return -EINVAL; 9059 } 9060 9061 i9xx_compute_dpll(crtc, crtc_state, NULL); 9062 9063 return 0; 9064 } 9065 9066 static int pnv_crtc_compute_clock(struct intel_crtc *crtc, 9067 struct intel_crtc_state *crtc_state) 9068 { 9069 struct drm_device *dev = crtc->base.dev; 9070 struct drm_i915_private *dev_priv = to_i915(dev); 9071 const struct intel_limit *limit; 9072 int refclk = 96000; 9073 9074 memset(&crtc_state->dpll_hw_state, 0, 9075 sizeof(crtc_state->dpll_hw_state)); 9076 9077 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9078 if (intel_panel_use_ssc(dev_priv)) { 9079 refclk = dev_priv->vbt.lvds_ssc_freq; 9080 drm_dbg_kms(&dev_priv->drm, 9081 "using SSC reference clock of %d kHz\n", 9082 refclk); 9083 } 9084 9085 limit = &pnv_limits_lvds; 9086 } else { 9087 limit = &pnv_limits_sdvo; 9088 } 9089 9090 if (!crtc_state->clock_set && 9091 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9092 refclk, NULL, &crtc_state->dpll)) { 9093 drm_err(&dev_priv->drm, 9094 "Couldn't find PLL settings for mode!\n"); 9095 return -EINVAL; 9096 } 9097 9098 i9xx_compute_dpll(crtc, crtc_state, NULL); 9099 9100 return 0; 9101 } 9102 9103 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 9104 struct intel_crtc_state *crtc_state) 9105 { 9106 struct drm_device *dev = crtc->base.dev; 9107 struct drm_i915_private *dev_priv = to_i915(dev); 9108 const struct intel_limit *limit; 9109 int refclk = 96000; 9110 9111 memset(&crtc_state->dpll_hw_state, 0, 9112 sizeof(crtc_state->dpll_hw_state)); 9113 9114 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9115 if (intel_panel_use_ssc(dev_priv)) { 9116 refclk = dev_priv->vbt.lvds_ssc_freq; 9117 drm_dbg_kms(&dev_priv->drm, 9118 "using SSC reference clock of %d kHz\n", 9119 refclk); 9120 } 9121 9122 limit = &intel_limits_i9xx_lvds; 9123 } else { 9124 limit = &intel_limits_i9xx_sdvo; 9125 } 9126 9127 if (!crtc_state->clock_set && 9128 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9129 refclk, NULL, &crtc_state->dpll)) { 9130 drm_err(&dev_priv->drm, 9131 "Couldn't find PLL settings for mode!\n"); 9132 return -EINVAL; 9133 } 9134 9135 i9xx_compute_dpll(crtc, crtc_state, NULL); 9136 9137 return 0; 9138 } 9139 9140 static int chv_crtc_compute_clock(struct intel_crtc *crtc, 9141 struct intel_crtc_state *crtc_state) 9142 { 9143 int refclk = 100000; 9144 const struct intel_limit *limit = &intel_limits_chv; 9145 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 9146 9147 memset(&crtc_state->dpll_hw_state, 0, 9148 sizeof(crtc_state->dpll_hw_state)); 9149 9150 if (!crtc_state->clock_set && 9151 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9152 refclk, NULL, &crtc_state->dpll)) { 9153 drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n"); 9154 return -EINVAL; 9155 } 9156 9157 chv_compute_dpll(crtc, crtc_state); 9158 9159 return 0; 9160 } 9161 9162 static int vlv_crtc_compute_clock(struct intel_crtc *crtc, 9163 struct intel_crtc_state *crtc_state) 9164 { 9165 int refclk = 100000; 9166 const struct intel_limit *limit = &intel_limits_vlv; 9167 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 9168 9169 memset(&crtc_state->dpll_hw_state, 0, 9170 sizeof(crtc_state->dpll_hw_state)); 9171 9172 if (!crtc_state->clock_set && 9173 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9174 refclk, NULL, &crtc_state->dpll)) { 9175 drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n"); 9176 return -EINVAL; 9177 } 9178 9179 vlv_compute_dpll(crtc, crtc_state); 9180 9181 return 0; 9182 } 9183 9184 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) 9185 { 9186 if (IS_I830(dev_priv)) 9187 return false; 9188 9189 return INTEL_GEN(dev_priv) >= 4 || 9190 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 9191 } 9192 9193 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state) 9194 { 9195 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 9196 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9197 u32 tmp; 9198 9199 if (!i9xx_has_pfit(dev_priv)) 9200 return; 9201 9202 tmp = intel_de_read(dev_priv, PFIT_CONTROL); 9203 if (!(tmp & PFIT_ENABLE)) 9204 return; 9205 9206 /* Check whether the pfit is attached to our pipe. */ 9207 if (INTEL_GEN(dev_priv) < 4) { 9208 if (crtc->pipe != PIPE_B) 9209 return; 9210 } else { 9211 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 9212 return; 9213 } 9214 9215 crtc_state->gmch_pfit.control = tmp; 9216 crtc_state->gmch_pfit.pgm_ratios = 9217 intel_de_read(dev_priv, PFIT_PGM_RATIOS); 9218 } 9219 9220 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 9221 struct intel_crtc_state *pipe_config) 9222 { 9223 struct drm_device *dev = crtc->base.dev; 9224 struct drm_i915_private *dev_priv = to_i915(dev); 9225 enum pipe pipe = crtc->pipe; 9226 struct dpll clock; 9227 u32 mdiv; 9228 int refclk = 100000; 9229 9230 /* In case of DSI, DPLL will not be used */ 9231 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 9232 return; 9233 9234 vlv_dpio_get(dev_priv); 9235 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 9236 vlv_dpio_put(dev_priv); 9237 9238 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 9239 clock.m2 = mdiv & DPIO_M2DIV_MASK; 9240 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 9241 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 9242 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 9243 9244 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 9245 } 9246 9247 static void 9248 i9xx_get_initial_plane_config(struct intel_crtc *crtc, 9249 struct intel_initial_plane_config *plane_config) 9250 { 9251 struct drm_device *dev = crtc->base.dev; 9252 struct drm_i915_private *dev_priv = to_i915(dev); 9253 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 9254 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 9255 enum pipe pipe; 9256 u32 val, base, offset; 9257 int fourcc, pixel_format; 9258 unsigned int aligned_height; 9259 struct drm_framebuffer *fb; 9260 struct intel_framebuffer *intel_fb; 9261 9262 if (!plane->get_hw_state(plane, &pipe)) 9263 return; 9264 9265 drm_WARN_ON(dev, pipe != crtc->pipe); 9266 9267 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9268 if (!intel_fb) { 9269 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n"); 9270 return; 9271 } 9272 9273 fb = &intel_fb->base; 9274 9275 fb->dev = dev; 9276 9277 val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); 9278 9279 if (INTEL_GEN(dev_priv) >= 4) { 9280 if (val & DISPPLANE_TILED) { 9281 plane_config->tiling = I915_TILING_X; 9282 fb->modifier = I915_FORMAT_MOD_X_TILED; 9283 } 9284 9285 if (val & DISPPLANE_ROTATE_180) 9286 plane_config->rotation = DRM_MODE_ROTATE_180; 9287 } 9288 9289 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B && 9290 val & DISPPLANE_MIRROR) 9291 plane_config->rotation |= DRM_MODE_REFLECT_X; 9292 9293 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 9294 fourcc = i9xx_format_to_fourcc(pixel_format); 9295 fb->format = drm_format_info(fourcc); 9296 9297 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 9298 offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane)); 9299 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000; 9300 } else if (INTEL_GEN(dev_priv) >= 4) { 9301 if (plane_config->tiling) 9302 offset = intel_de_read(dev_priv, 9303 DSPTILEOFF(i9xx_plane)); 9304 else 9305 offset = intel_de_read(dev_priv, 9306 DSPLINOFF(i9xx_plane)); 9307 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000; 9308 } else { 9309 base = intel_de_read(dev_priv, DSPADDR(i9xx_plane)); 9310 } 9311 plane_config->base = base; 9312 9313 val = intel_de_read(dev_priv, PIPESRC(pipe)); 9314 fb->width = ((val >> 16) & 0xfff) + 1; 9315 fb->height = ((val >> 0) & 0xfff) + 1; 9316 9317 val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane)); 9318 fb->pitches[0] = val & 0xffffffc0; 9319 9320 aligned_height = intel_fb_align_height(fb, 0, fb->height); 9321 9322 plane_config->size = fb->pitches[0] * aligned_height; 9323 9324 drm_dbg_kms(&dev_priv->drm, 9325 "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 9326 crtc->base.name, plane->base.name, fb->width, fb->height, 9327 fb->format->cpp[0] * 8, base, fb->pitches[0], 9328 plane_config->size); 9329 9330 plane_config->fb = intel_fb; 9331 } 9332 9333 static void chv_crtc_clock_get(struct intel_crtc *crtc, 9334 struct intel_crtc_state *pipe_config) 9335 { 9336 struct drm_device *dev = crtc->base.dev; 9337 struct drm_i915_private *dev_priv = to_i915(dev); 9338 enum pipe pipe = crtc->pipe; 9339 enum dpio_channel port = vlv_pipe_to_channel(pipe); 9340 struct dpll clock; 9341 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 9342 int refclk = 100000; 9343 9344 /* In case of DSI, DPLL will not be used */ 9345 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 9346 return; 9347 9348 vlv_dpio_get(dev_priv); 9349 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 9350 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 9351 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 9352 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 9353 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 9354 vlv_dpio_put(dev_priv); 9355 9356 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 9357 clock.m2 = (pll_dw0 & 0xff) << 22; 9358 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 9359 clock.m2 |= pll_dw2 & 0x3fffff; 9360 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 9361 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 9362 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 9363 9364 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 9365 } 9366 9367 static enum intel_output_format 9368 bdw_get_pipemisc_output_format(struct intel_crtc *crtc) 9369 { 9370 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9371 u32 tmp; 9372 9373 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); 9374 9375 if (tmp & PIPEMISC_YUV420_ENABLE) { 9376 /* We support 4:2:0 in full blend mode only */ 9377 drm_WARN_ON(&dev_priv->drm, 9378 (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0); 9379 9380 return INTEL_OUTPUT_FORMAT_YCBCR420; 9381 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { 9382 return INTEL_OUTPUT_FORMAT_YCBCR444; 9383 } else { 9384 return INTEL_OUTPUT_FORMAT_RGB; 9385 } 9386 } 9387 9388 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) 9389 { 9390 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 9391 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 9392 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9393 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 9394 u32 tmp; 9395 9396 tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); 9397 9398 if (tmp & DISPPLANE_GAMMA_ENABLE) 9399 crtc_state->gamma_enable = true; 9400 9401 if (!HAS_GMCH(dev_priv) && 9402 tmp & DISPPLANE_PIPE_CSC_ENABLE) 9403 crtc_state->csc_enable = true; 9404 } 9405 9406 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 9407 struct intel_crtc_state *pipe_config) 9408 { 9409 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9410 enum intel_display_power_domain power_domain; 9411 intel_wakeref_t wakeref; 9412 u32 tmp; 9413 bool ret; 9414 9415 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9416 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 9417 if (!wakeref) 9418 return false; 9419 9420 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 9421 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9422 pipe_config->shared_dpll = NULL; 9423 9424 ret = false; 9425 9426 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); 9427 if (!(tmp & PIPECONF_ENABLE)) 9428 goto out; 9429 9430 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 9431 IS_CHERRYVIEW(dev_priv)) { 9432 switch (tmp & PIPECONF_BPC_MASK) { 9433 case PIPECONF_6BPC: 9434 pipe_config->pipe_bpp = 18; 9435 break; 9436 case PIPECONF_8BPC: 9437 pipe_config->pipe_bpp = 24; 9438 break; 9439 case PIPECONF_10BPC: 9440 pipe_config->pipe_bpp = 30; 9441 break; 9442 default: 9443 break; 9444 } 9445 } 9446 9447 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 9448 (tmp & PIPECONF_COLOR_RANGE_SELECT)) 9449 pipe_config->limited_color_range = true; 9450 9451 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >> 9452 PIPECONF_GAMMA_MODE_SHIFT; 9453 9454 if (IS_CHERRYVIEW(dev_priv)) 9455 pipe_config->cgm_mode = intel_de_read(dev_priv, 9456 CGM_PIPE_MODE(crtc->pipe)); 9457 9458 i9xx_get_pipe_color_config(pipe_config); 9459 intel_color_get_config(pipe_config); 9460 9461 if (INTEL_GEN(dev_priv) < 4) 9462 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 9463 9464 intel_get_pipe_timings(crtc, pipe_config); 9465 intel_get_pipe_src_size(crtc, pipe_config); 9466 9467 i9xx_get_pfit_config(pipe_config); 9468 9469 if (INTEL_GEN(dev_priv) >= 4) { 9470 /* No way to read it out on pipes B and C */ 9471 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 9472 tmp = dev_priv->chv_dpll_md[crtc->pipe]; 9473 else 9474 tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe)); 9475 pipe_config->pixel_multiplier = 9476 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 9477 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 9478 pipe_config->dpll_hw_state.dpll_md = tmp; 9479 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 9480 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 9481 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe)); 9482 pipe_config->pixel_multiplier = 9483 ((tmp & SDVO_MULTIPLIER_MASK) 9484 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 9485 } else { 9486 /* Note that on i915G/GM the pixel multiplier is in the sdvo 9487 * port and will be fixed up in the encoder->get_config 9488 * function. */ 9489 pipe_config->pixel_multiplier = 1; 9490 } 9491 pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv, 9492 DPLL(crtc->pipe)); 9493 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 9494 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv, 9495 FP0(crtc->pipe)); 9496 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv, 9497 FP1(crtc->pipe)); 9498 } else { 9499 /* Mask out read-only status bits. */ 9500 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 9501 DPLL_PORTC_READY_MASK | 9502 DPLL_PORTB_READY_MASK); 9503 } 9504 9505 if (IS_CHERRYVIEW(dev_priv)) 9506 chv_crtc_clock_get(crtc, pipe_config); 9507 else if (IS_VALLEYVIEW(dev_priv)) 9508 vlv_crtc_clock_get(crtc, pipe_config); 9509 else 9510 i9xx_crtc_clock_get(crtc, pipe_config); 9511 9512 /* 9513 * Normally the dotclock is filled in by the encoder .get_config() 9514 * but in case the pipe is enabled w/o any ports we need a sane 9515 * default. 9516 */ 9517 pipe_config->hw.adjusted_mode.crtc_clock = 9518 pipe_config->port_clock / pipe_config->pixel_multiplier; 9519 9520 ret = true; 9521 9522 out: 9523 intel_display_power_put(dev_priv, power_domain, wakeref); 9524 9525 return ret; 9526 } 9527 9528 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) 9529 { 9530 struct intel_encoder *encoder; 9531 int i; 9532 u32 val, final; 9533 bool has_lvds = false; 9534 bool has_cpu_edp = false; 9535 bool has_panel = false; 9536 bool has_ck505 = false; 9537 bool can_ssc = false; 9538 bool using_ssc_source = false; 9539 9540 /* We need to take the global config into account */ 9541 for_each_intel_encoder(&dev_priv->drm, encoder) { 9542 switch (encoder->type) { 9543 case INTEL_OUTPUT_LVDS: 9544 has_panel = true; 9545 has_lvds = true; 9546 break; 9547 case INTEL_OUTPUT_EDP: 9548 has_panel = true; 9549 if (encoder->port == PORT_A) 9550 has_cpu_edp = true; 9551 break; 9552 default: 9553 break; 9554 } 9555 } 9556 9557 if (HAS_PCH_IBX(dev_priv)) { 9558 has_ck505 = dev_priv->vbt.display_clock_mode; 9559 can_ssc = has_ck505; 9560 } else { 9561 has_ck505 = false; 9562 can_ssc = true; 9563 } 9564 9565 /* Check if any DPLLs are using the SSC source */ 9566 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { 9567 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i)); 9568 9569 if (!(temp & DPLL_VCO_ENABLE)) 9570 continue; 9571 9572 if ((temp & PLL_REF_INPUT_MASK) == 9573 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 9574 using_ssc_source = true; 9575 break; 9576 } 9577 } 9578 9579 drm_dbg_kms(&dev_priv->drm, 9580 "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 9581 has_panel, has_lvds, has_ck505, using_ssc_source); 9582 9583 /* Ironlake: try to setup display ref clock before DPLL 9584 * enabling. This is only under driver's control after 9585 * PCH B stepping, previous chipset stepping should be 9586 * ignoring this setting. 9587 */ 9588 val = intel_de_read(dev_priv, PCH_DREF_CONTROL); 9589 9590 /* As we must carefully and slowly disable/enable each source in turn, 9591 * compute the final state we want first and check if we need to 9592 * make any changes at all. 9593 */ 9594 final = val; 9595 final &= ~DREF_NONSPREAD_SOURCE_MASK; 9596 if (has_ck505) 9597 final |= DREF_NONSPREAD_CK505_ENABLE; 9598 else 9599 final |= DREF_NONSPREAD_SOURCE_ENABLE; 9600 9601 final &= ~DREF_SSC_SOURCE_MASK; 9602 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9603 final &= ~DREF_SSC1_ENABLE; 9604 9605 if (has_panel) { 9606 final |= DREF_SSC_SOURCE_ENABLE; 9607 9608 if (intel_panel_use_ssc(dev_priv) && can_ssc) 9609 final |= DREF_SSC1_ENABLE; 9610 9611 if (has_cpu_edp) { 9612 if (intel_panel_use_ssc(dev_priv) && can_ssc) 9613 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 9614 else 9615 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 9616 } else 9617 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9618 } else if (using_ssc_source) { 9619 final |= DREF_SSC_SOURCE_ENABLE; 9620 final |= DREF_SSC1_ENABLE; 9621 } 9622 9623 if (final == val) 9624 return; 9625 9626 /* Always enable nonspread source */ 9627 val &= ~DREF_NONSPREAD_SOURCE_MASK; 9628 9629 if (has_ck505) 9630 val |= DREF_NONSPREAD_CK505_ENABLE; 9631 else 9632 val |= DREF_NONSPREAD_SOURCE_ENABLE; 9633 9634 if (has_panel) { 9635 val &= ~DREF_SSC_SOURCE_MASK; 9636 val |= DREF_SSC_SOURCE_ENABLE; 9637 9638 /* SSC must be turned on before enabling the CPU output */ 9639 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 9640 drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n"); 9641 val |= DREF_SSC1_ENABLE; 9642 } else 9643 val &= ~DREF_SSC1_ENABLE; 9644 9645 /* Get SSC going before enabling the outputs */ 9646 intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 9647 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 9648 udelay(200); 9649 9650 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9651 9652 /* Enable CPU source on CPU attached eDP */ 9653 if (has_cpu_edp) { 9654 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 9655 drm_dbg_kms(&dev_priv->drm, 9656 "Using SSC on eDP\n"); 9657 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 9658 } else 9659 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 9660 } else 9661 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9662 9663 intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 9664 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 9665 udelay(200); 9666 } else { 9667 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n"); 9668 9669 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9670 9671 /* Turn off CPU output */ 9672 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9673 9674 intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 9675 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 9676 udelay(200); 9677 9678 if (!using_ssc_source) { 9679 drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n"); 9680 9681 /* Turn off the SSC source */ 9682 val &= ~DREF_SSC_SOURCE_MASK; 9683 val |= DREF_SSC_SOURCE_DISABLE; 9684 9685 /* Turn off SSC1 */ 9686 val &= ~DREF_SSC1_ENABLE; 9687 9688 intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 9689 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 9690 udelay(200); 9691 } 9692 } 9693 9694 BUG_ON(val != final); 9695 } 9696 9697 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 9698 { 9699 u32 tmp; 9700 9701 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); 9702 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 9703 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); 9704 9705 if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) & 9706 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 9707 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n"); 9708 9709 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); 9710 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 9711 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); 9712 9713 if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) & 9714 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 9715 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n"); 9716 } 9717 9718 /* WaMPhyProgramming:hsw */ 9719 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 9720 { 9721 u32 tmp; 9722 9723 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 9724 tmp &= ~(0xFF << 24); 9725 tmp |= (0x12 << 24); 9726 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 9727 9728 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 9729 tmp |= (1 << 11); 9730 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 9731 9732 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 9733 tmp |= (1 << 11); 9734 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 9735 9736 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 9737 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 9738 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 9739 9740 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 9741 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 9742 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 9743 9744 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 9745 tmp &= ~(7 << 13); 9746 tmp |= (5 << 13); 9747 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 9748 9749 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 9750 tmp &= ~(7 << 13); 9751 tmp |= (5 << 13); 9752 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 9753 9754 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 9755 tmp &= ~0xFF; 9756 tmp |= 0x1C; 9757 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 9758 9759 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 9760 tmp &= ~0xFF; 9761 tmp |= 0x1C; 9762 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 9763 9764 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 9765 tmp &= ~(0xFF << 16); 9766 tmp |= (0x1C << 16); 9767 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 9768 9769 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 9770 tmp &= ~(0xFF << 16); 9771 tmp |= (0x1C << 16); 9772 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 9773 9774 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 9775 tmp |= (1 << 27); 9776 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 9777 9778 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 9779 tmp |= (1 << 27); 9780 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 9781 9782 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 9783 tmp &= ~(0xF << 28); 9784 tmp |= (4 << 28); 9785 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 9786 9787 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 9788 tmp &= ~(0xF << 28); 9789 tmp |= (4 << 28); 9790 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 9791 } 9792 9793 /* Implements 3 different sequences from BSpec chapter "Display iCLK 9794 * Programming" based on the parameters passed: 9795 * - Sequence to enable CLKOUT_DP 9796 * - Sequence to enable CLKOUT_DP without spread 9797 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 9798 */ 9799 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, 9800 bool with_spread, bool with_fdi) 9801 { 9802 u32 reg, tmp; 9803 9804 if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread, 9805 "FDI requires downspread\n")) 9806 with_spread = true; 9807 if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) && 9808 with_fdi, "LP PCH doesn't have FDI\n")) 9809 with_fdi = false; 9810 9811 mutex_lock(&dev_priv->sb_lock); 9812 9813 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9814 tmp &= ~SBI_SSCCTL_DISABLE; 9815 tmp |= SBI_SSCCTL_PATHALT; 9816 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9817 9818 udelay(24); 9819 9820 if (with_spread) { 9821 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9822 tmp &= ~SBI_SSCCTL_PATHALT; 9823 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9824 9825 if (with_fdi) { 9826 lpt_reset_fdi_mphy(dev_priv); 9827 lpt_program_fdi_mphy(dev_priv); 9828 } 9829 } 9830 9831 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 9832 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 9833 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 9834 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 9835 9836 mutex_unlock(&dev_priv->sb_lock); 9837 } 9838 9839 /* Sequence to disable CLKOUT_DP */ 9840 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) 9841 { 9842 u32 reg, tmp; 9843 9844 mutex_lock(&dev_priv->sb_lock); 9845 9846 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 9847 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 9848 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 9849 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 9850 9851 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9852 if (!(tmp & SBI_SSCCTL_DISABLE)) { 9853 if (!(tmp & SBI_SSCCTL_PATHALT)) { 9854 tmp |= SBI_SSCCTL_PATHALT; 9855 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9856 udelay(32); 9857 } 9858 tmp |= SBI_SSCCTL_DISABLE; 9859 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9860 } 9861 9862 mutex_unlock(&dev_priv->sb_lock); 9863 } 9864 9865 #define BEND_IDX(steps) ((50 + (steps)) / 5) 9866 9867 static const u16 sscdivintphase[] = { 9868 [BEND_IDX( 50)] = 0x3B23, 9869 [BEND_IDX( 45)] = 0x3B23, 9870 [BEND_IDX( 40)] = 0x3C23, 9871 [BEND_IDX( 35)] = 0x3C23, 9872 [BEND_IDX( 30)] = 0x3D23, 9873 [BEND_IDX( 25)] = 0x3D23, 9874 [BEND_IDX( 20)] = 0x3E23, 9875 [BEND_IDX( 15)] = 0x3E23, 9876 [BEND_IDX( 10)] = 0x3F23, 9877 [BEND_IDX( 5)] = 0x3F23, 9878 [BEND_IDX( 0)] = 0x0025, 9879 [BEND_IDX( -5)] = 0x0025, 9880 [BEND_IDX(-10)] = 0x0125, 9881 [BEND_IDX(-15)] = 0x0125, 9882 [BEND_IDX(-20)] = 0x0225, 9883 [BEND_IDX(-25)] = 0x0225, 9884 [BEND_IDX(-30)] = 0x0325, 9885 [BEND_IDX(-35)] = 0x0325, 9886 [BEND_IDX(-40)] = 0x0425, 9887 [BEND_IDX(-45)] = 0x0425, 9888 [BEND_IDX(-50)] = 0x0525, 9889 }; 9890 9891 /* 9892 * Bend CLKOUT_DP 9893 * steps -50 to 50 inclusive, in steps of 5 9894 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 9895 * change in clock period = -(steps / 10) * 5.787 ps 9896 */ 9897 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 9898 { 9899 u32 tmp; 9900 int idx = BEND_IDX(steps); 9901 9902 if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0)) 9903 return; 9904 9905 if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase))) 9906 return; 9907 9908 mutex_lock(&dev_priv->sb_lock); 9909 9910 if (steps % 10 != 0) 9911 tmp = 0xAAAAAAAB; 9912 else 9913 tmp = 0x00000000; 9914 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 9915 9916 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 9917 tmp &= 0xffff0000; 9918 tmp |= sscdivintphase[idx]; 9919 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 9920 9921 mutex_unlock(&dev_priv->sb_lock); 9922 } 9923 9924 #undef BEND_IDX 9925 9926 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) 9927 { 9928 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); 9929 u32 ctl = intel_de_read(dev_priv, SPLL_CTL); 9930 9931 if ((ctl & SPLL_PLL_ENABLE) == 0) 9932 return false; 9933 9934 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && 9935 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 9936 return true; 9937 9938 if (IS_BROADWELL(dev_priv) && 9939 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) 9940 return true; 9941 9942 return false; 9943 } 9944 9945 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, 9946 enum intel_dpll_id id) 9947 { 9948 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); 9949 u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id)); 9950 9951 if ((ctl & WRPLL_PLL_ENABLE) == 0) 9952 return false; 9953 9954 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) 9955 return true; 9956 9957 if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && 9958 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && 9959 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 9960 return true; 9961 9962 return false; 9963 } 9964 9965 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) 9966 { 9967 struct intel_encoder *encoder; 9968 bool has_fdi = false; 9969 9970 for_each_intel_encoder(&dev_priv->drm, encoder) { 9971 switch (encoder->type) { 9972 case INTEL_OUTPUT_ANALOG: 9973 has_fdi = true; 9974 break; 9975 default: 9976 break; 9977 } 9978 } 9979 9980 /* 9981 * The BIOS may have decided to use the PCH SSC 9982 * reference so we must not disable it until the 9983 * relevant PLLs have stopped relying on it. We'll 9984 * just leave the PCH SSC reference enabled in case 9985 * any active PLL is using it. It will get disabled 9986 * after runtime suspend if we don't have FDI. 9987 * 9988 * TODO: Move the whole reference clock handling 9989 * to the modeset sequence proper so that we can 9990 * actually enable/disable/reconfigure these things 9991 * safely. To do that we need to introduce a real 9992 * clock hierarchy. That would also allow us to do 9993 * clock bending finally. 9994 */ 9995 dev_priv->pch_ssc_use = 0; 9996 9997 if (spll_uses_pch_ssc(dev_priv)) { 9998 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n"); 9999 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL); 10000 } 10001 10002 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { 10003 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n"); 10004 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1); 10005 } 10006 10007 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { 10008 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n"); 10009 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2); 10010 } 10011 10012 if (dev_priv->pch_ssc_use) 10013 return; 10014 10015 if (has_fdi) { 10016 lpt_bend_clkout_dp(dev_priv, 0); 10017 lpt_enable_clkout_dp(dev_priv, true, true); 10018 } else { 10019 lpt_disable_clkout_dp(dev_priv); 10020 } 10021 } 10022 10023 /* 10024 * Initialize reference clocks when the driver loads 10025 */ 10026 void intel_init_pch_refclk(struct drm_i915_private *dev_priv) 10027 { 10028 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 10029 ilk_init_pch_refclk(dev_priv); 10030 else if (HAS_PCH_LPT(dev_priv)) 10031 lpt_init_pch_refclk(dev_priv); 10032 } 10033 10034 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) 10035 { 10036 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 10037 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10038 enum pipe pipe = crtc->pipe; 10039 u32 val; 10040 10041 val = 0; 10042 10043 switch (crtc_state->pipe_bpp) { 10044 case 18: 10045 val |= PIPECONF_6BPC; 10046 break; 10047 case 24: 10048 val |= PIPECONF_8BPC; 10049 break; 10050 case 30: 10051 val |= PIPECONF_10BPC; 10052 break; 10053 case 36: 10054 val |= PIPECONF_12BPC; 10055 break; 10056 default: 10057 /* Case prevented by intel_choose_pipe_bpp_dither. */ 10058 BUG(); 10059 } 10060 10061 if (crtc_state->dither) 10062 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 10063 10064 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 10065 val |= PIPECONF_INTERLACED_ILK; 10066 else 10067 val |= PIPECONF_PROGRESSIVE; 10068 10069 /* 10070 * This would end up with an odd purple hue over 10071 * the entire display. Make sure we don't do it. 10072 */ 10073 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range && 10074 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); 10075 10076 if (crtc_state->limited_color_range && 10077 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 10078 val |= PIPECONF_COLOR_RANGE_SELECT; 10079 10080 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 10081 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709; 10082 10083 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 10084 10085 val |= PIPECONF_FRAME_START_DELAY(0); 10086 10087 intel_de_write(dev_priv, PIPECONF(pipe), val); 10088 intel_de_posting_read(dev_priv, PIPECONF(pipe)); 10089 } 10090 10091 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state) 10092 { 10093 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 10094 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10095 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 10096 u32 val = 0; 10097 10098 if (IS_HASWELL(dev_priv) && crtc_state->dither) 10099 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 10100 10101 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 10102 val |= PIPECONF_INTERLACED_ILK; 10103 else 10104 val |= PIPECONF_PROGRESSIVE; 10105 10106 if (IS_HASWELL(dev_priv) && 10107 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 10108 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW; 10109 10110 intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val); 10111 intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder)); 10112 } 10113 10114 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) 10115 { 10116 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 10117 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10118 u32 val = 0; 10119 10120 switch (crtc_state->pipe_bpp) { 10121 case 18: 10122 val |= PIPEMISC_DITHER_6_BPC; 10123 break; 10124 case 24: 10125 val |= PIPEMISC_DITHER_8_BPC; 10126 break; 10127 case 30: 10128 val |= PIPEMISC_DITHER_10_BPC; 10129 break; 10130 case 36: 10131 val |= PIPEMISC_DITHER_12_BPC; 10132 break; 10133 default: 10134 MISSING_CASE(crtc_state->pipe_bpp); 10135 break; 10136 } 10137 10138 if (crtc_state->dither) 10139 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 10140 10141 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 10142 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 10143 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV; 10144 10145 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 10146 val |= PIPEMISC_YUV420_ENABLE | 10147 PIPEMISC_YUV420_MODE_FULL_BLEND; 10148 10149 if (INTEL_GEN(dev_priv) >= 11 && 10150 (crtc_state->active_planes & ~(icl_hdr_plane_mask() | 10151 BIT(PLANE_CURSOR))) == 0) 10152 val |= PIPEMISC_HDR_MODE_PRECISION; 10153 10154 if (INTEL_GEN(dev_priv) >= 12) 10155 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC; 10156 10157 intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val); 10158 } 10159 10160 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) 10161 { 10162 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10163 u32 tmp; 10164 10165 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); 10166 10167 switch (tmp & PIPEMISC_DITHER_BPC_MASK) { 10168 case PIPEMISC_DITHER_6_BPC: 10169 return 18; 10170 case PIPEMISC_DITHER_8_BPC: 10171 return 24; 10172 case PIPEMISC_DITHER_10_BPC: 10173 return 30; 10174 case PIPEMISC_DITHER_12_BPC: 10175 return 36; 10176 default: 10177 MISSING_CASE(tmp); 10178 return 0; 10179 } 10180 } 10181 10182 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) 10183 { 10184 /* 10185 * Account for spread spectrum to avoid 10186 * oversubscribing the link. Max center spread 10187 * is 2.5%; use 5% for safety's sake. 10188 */ 10189 u32 bps = target_clock * bpp * 21 / 20; 10190 return DIV_ROUND_UP(bps, link_bw * 8); 10191 } 10192 10193 static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor) 10194 { 10195 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 10196 } 10197 10198 static void ilk_compute_dpll(struct intel_crtc *crtc, 10199 struct intel_crtc_state *crtc_state, 10200 struct dpll *reduced_clock) 10201 { 10202 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10203 u32 dpll, fp, fp2; 10204 int factor; 10205 10206 /* Enable autotuning of the PLL clock (if permissible) */ 10207 factor = 21; 10208 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 10209 if ((intel_panel_use_ssc(dev_priv) && 10210 dev_priv->vbt.lvds_ssc_freq == 100000) || 10211 (HAS_PCH_IBX(dev_priv) && 10212 intel_is_dual_link_lvds(dev_priv))) 10213 factor = 25; 10214 } else if (crtc_state->sdvo_tv_clock) { 10215 factor = 20; 10216 } 10217 10218 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 10219 10220 if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor)) 10221 fp |= FP_CB_TUNE; 10222 10223 if (reduced_clock) { 10224 fp2 = i9xx_dpll_compute_fp(reduced_clock); 10225 10226 if (reduced_clock->m < factor * reduced_clock->n) 10227 fp2 |= FP_CB_TUNE; 10228 } else { 10229 fp2 = fp; 10230 } 10231 10232 dpll = 0; 10233 10234 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 10235 dpll |= DPLLB_MODE_LVDS; 10236 else 10237 dpll |= DPLLB_MODE_DAC_SERIAL; 10238 10239 dpll |= (crtc_state->pixel_multiplier - 1) 10240 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 10241 10242 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 10243 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 10244 dpll |= DPLL_SDVO_HIGH_SPEED; 10245 10246 if (intel_crtc_has_dp_encoder(crtc_state)) 10247 dpll |= DPLL_SDVO_HIGH_SPEED; 10248 10249 /* 10250 * The high speed IO clock is only really required for 10251 * SDVO/HDMI/DP, but we also enable it for CRT to make it 10252 * possible to share the DPLL between CRT and HDMI. Enabling 10253 * the clock needlessly does no real harm, except use up a 10254 * bit of power potentially. 10255 * 10256 * We'll limit this to IVB with 3 pipes, since it has only two 10257 * DPLLs and so DPLL sharing is the only way to get three pipes 10258 * driving PCH ports at the same time. On SNB we could do this, 10259 * and potentially avoid enabling the second DPLL, but it's not 10260 * clear if it''s a win or loss power wise. No point in doing 10261 * this on ILK at all since it has a fixed DPLL<->pipe mapping. 10262 */ 10263 if (INTEL_NUM_PIPES(dev_priv) == 3 && 10264 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) 10265 dpll |= DPLL_SDVO_HIGH_SPEED; 10266 10267 /* compute bitmask from p1 value */ 10268 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 10269 /* also FPA1 */ 10270 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 10271 10272 switch (crtc_state->dpll.p2) { 10273 case 5: 10274 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 10275 break; 10276 case 7: 10277 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 10278 break; 10279 case 10: 10280 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 10281 break; 10282 case 14: 10283 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 10284 break; 10285 } 10286 10287 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 10288 intel_panel_use_ssc(dev_priv)) 10289 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 10290 else 10291 dpll |= PLL_REF_INPUT_DREFCLK; 10292 10293 dpll |= DPLL_VCO_ENABLE; 10294 10295 crtc_state->dpll_hw_state.dpll = dpll; 10296 crtc_state->dpll_hw_state.fp0 = fp; 10297 crtc_state->dpll_hw_state.fp1 = fp2; 10298 } 10299 10300 static int ilk_crtc_compute_clock(struct intel_crtc *crtc, 10301 struct intel_crtc_state *crtc_state) 10302 { 10303 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10304 struct intel_atomic_state *state = 10305 to_intel_atomic_state(crtc_state->uapi.state); 10306 const struct intel_limit *limit; 10307 int refclk = 120000; 10308 10309 memset(&crtc_state->dpll_hw_state, 0, 10310 sizeof(crtc_state->dpll_hw_state)); 10311 10312 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 10313 if (!crtc_state->has_pch_encoder) 10314 return 0; 10315 10316 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 10317 if (intel_panel_use_ssc(dev_priv)) { 10318 drm_dbg_kms(&dev_priv->drm, 10319 "using SSC reference clock of %d kHz\n", 10320 dev_priv->vbt.lvds_ssc_freq); 10321 refclk = dev_priv->vbt.lvds_ssc_freq; 10322 } 10323 10324 if (intel_is_dual_link_lvds(dev_priv)) { 10325 if (refclk == 100000) 10326 limit = &ilk_limits_dual_lvds_100m; 10327 else 10328 limit = &ilk_limits_dual_lvds; 10329 } else { 10330 if (refclk == 100000) 10331 limit = &ilk_limits_single_lvds_100m; 10332 else 10333 limit = &ilk_limits_single_lvds; 10334 } 10335 } else { 10336 limit = &ilk_limits_dac; 10337 } 10338 10339 if (!crtc_state->clock_set && 10340 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 10341 refclk, NULL, &crtc_state->dpll)) { 10342 drm_err(&dev_priv->drm, 10343 "Couldn't find PLL settings for mode!\n"); 10344 return -EINVAL; 10345 } 10346 10347 ilk_compute_dpll(crtc, crtc_state, NULL); 10348 10349 if (!intel_reserve_shared_dplls(state, crtc, NULL)) { 10350 drm_dbg_kms(&dev_priv->drm, 10351 "failed to find PLL for pipe %c\n", 10352 pipe_name(crtc->pipe)); 10353 return -EINVAL; 10354 } 10355 10356 return 0; 10357 } 10358 10359 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 10360 struct intel_link_m_n *m_n) 10361 { 10362 struct drm_device *dev = crtc->base.dev; 10363 struct drm_i915_private *dev_priv = to_i915(dev); 10364 enum pipe pipe = crtc->pipe; 10365 10366 m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe)); 10367 m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe)); 10368 m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe)) 10369 & ~TU_SIZE_MASK; 10370 m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe)); 10371 m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe)) 10372 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 10373 } 10374 10375 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 10376 enum transcoder transcoder, 10377 struct intel_link_m_n *m_n, 10378 struct intel_link_m_n *m2_n2) 10379 { 10380 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10381 enum pipe pipe = crtc->pipe; 10382 10383 if (INTEL_GEN(dev_priv) >= 5) { 10384 m_n->link_m = intel_de_read(dev_priv, 10385 PIPE_LINK_M1(transcoder)); 10386 m_n->link_n = intel_de_read(dev_priv, 10387 PIPE_LINK_N1(transcoder)); 10388 m_n->gmch_m = intel_de_read(dev_priv, 10389 PIPE_DATA_M1(transcoder)) 10390 & ~TU_SIZE_MASK; 10391 m_n->gmch_n = intel_de_read(dev_priv, 10392 PIPE_DATA_N1(transcoder)); 10393 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder)) 10394 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 10395 10396 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) { 10397 m2_n2->link_m = intel_de_read(dev_priv, 10398 PIPE_LINK_M2(transcoder)); 10399 m2_n2->link_n = intel_de_read(dev_priv, 10400 PIPE_LINK_N2(transcoder)); 10401 m2_n2->gmch_m = intel_de_read(dev_priv, 10402 PIPE_DATA_M2(transcoder)) 10403 & ~TU_SIZE_MASK; 10404 m2_n2->gmch_n = intel_de_read(dev_priv, 10405 PIPE_DATA_N2(transcoder)); 10406 m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder)) 10407 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 10408 } 10409 } else { 10410 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe)); 10411 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe)); 10412 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe)) 10413 & ~TU_SIZE_MASK; 10414 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe)); 10415 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe)) 10416 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 10417 } 10418 } 10419 10420 void intel_dp_get_m_n(struct intel_crtc *crtc, 10421 struct intel_crtc_state *pipe_config) 10422 { 10423 if (pipe_config->has_pch_encoder) 10424 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 10425 else 10426 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 10427 &pipe_config->dp_m_n, 10428 &pipe_config->dp_m2_n2); 10429 } 10430 10431 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, 10432 struct intel_crtc_state *pipe_config) 10433 { 10434 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 10435 &pipe_config->fdi_m_n, NULL); 10436 } 10437 10438 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state, 10439 u32 pos, u32 size) 10440 { 10441 drm_rect_init(&crtc_state->pch_pfit.dst, 10442 pos >> 16, pos & 0xffff, 10443 size >> 16, size & 0xffff); 10444 } 10445 10446 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state) 10447 { 10448 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 10449 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10450 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; 10451 int id = -1; 10452 int i; 10453 10454 /* find scaler attached to this pipe */ 10455 for (i = 0; i < crtc->num_scalers; i++) { 10456 u32 ctl, pos, size; 10457 10458 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i)); 10459 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN) 10460 continue; 10461 10462 id = i; 10463 crtc_state->pch_pfit.enabled = true; 10464 10465 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i)); 10466 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i)); 10467 10468 ilk_get_pfit_pos_size(crtc_state, pos, size); 10469 10470 scaler_state->scalers[i].in_use = true; 10471 break; 10472 } 10473 10474 scaler_state->scaler_id = id; 10475 if (id >= 0) 10476 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 10477 else 10478 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 10479 } 10480 10481 static void 10482 skl_get_initial_plane_config(struct intel_crtc *crtc, 10483 struct intel_initial_plane_config *plane_config) 10484 { 10485 struct drm_device *dev = crtc->base.dev; 10486 struct drm_i915_private *dev_priv = to_i915(dev); 10487 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 10488 enum plane_id plane_id = plane->id; 10489 enum pipe pipe; 10490 u32 val, base, offset, stride_mult, tiling, alpha; 10491 int fourcc, pixel_format; 10492 unsigned int aligned_height; 10493 struct drm_framebuffer *fb; 10494 struct intel_framebuffer *intel_fb; 10495 10496 if (!plane->get_hw_state(plane, &pipe)) 10497 return; 10498 10499 drm_WARN_ON(dev, pipe != crtc->pipe); 10500 10501 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 10502 if (!intel_fb) { 10503 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n"); 10504 return; 10505 } 10506 10507 fb = &intel_fb->base; 10508 10509 fb->dev = dev; 10510 10511 val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id)); 10512 10513 if (INTEL_GEN(dev_priv) >= 11) 10514 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK; 10515 else 10516 pixel_format = val & PLANE_CTL_FORMAT_MASK; 10517 10518 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { 10519 alpha = intel_de_read(dev_priv, 10520 PLANE_COLOR_CTL(pipe, plane_id)); 10521 alpha &= PLANE_COLOR_ALPHA_MASK; 10522 } else { 10523 alpha = val & PLANE_CTL_ALPHA_MASK; 10524 } 10525 10526 fourcc = skl_format_to_fourcc(pixel_format, 10527 val & PLANE_CTL_ORDER_RGBX, alpha); 10528 fb->format = drm_format_info(fourcc); 10529 10530 tiling = val & PLANE_CTL_TILED_MASK; 10531 switch (tiling) { 10532 case PLANE_CTL_TILED_LINEAR: 10533 fb->modifier = DRM_FORMAT_MOD_LINEAR; 10534 break; 10535 case PLANE_CTL_TILED_X: 10536 plane_config->tiling = I915_TILING_X; 10537 fb->modifier = I915_FORMAT_MOD_X_TILED; 10538 break; 10539 case PLANE_CTL_TILED_Y: 10540 plane_config->tiling = I915_TILING_Y; 10541 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) 10542 fb->modifier = INTEL_GEN(dev_priv) >= 12 ? 10543 I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS : 10544 I915_FORMAT_MOD_Y_TILED_CCS; 10545 else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE) 10546 fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; 10547 else 10548 fb->modifier = I915_FORMAT_MOD_Y_TILED; 10549 break; 10550 case PLANE_CTL_TILED_YF: 10551 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) 10552 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS; 10553 else 10554 fb->modifier = I915_FORMAT_MOD_Yf_TILED; 10555 break; 10556 default: 10557 MISSING_CASE(tiling); 10558 goto error; 10559 } 10560 10561 /* 10562 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 10563 * while i915 HW rotation is clockwise, thats why this swapping. 10564 */ 10565 switch (val & PLANE_CTL_ROTATE_MASK) { 10566 case PLANE_CTL_ROTATE_0: 10567 plane_config->rotation = DRM_MODE_ROTATE_0; 10568 break; 10569 case PLANE_CTL_ROTATE_90: 10570 plane_config->rotation = DRM_MODE_ROTATE_270; 10571 break; 10572 case PLANE_CTL_ROTATE_180: 10573 plane_config->rotation = DRM_MODE_ROTATE_180; 10574 break; 10575 case PLANE_CTL_ROTATE_270: 10576 plane_config->rotation = DRM_MODE_ROTATE_90; 10577 break; 10578 } 10579 10580 if (INTEL_GEN(dev_priv) >= 10 && 10581 val & PLANE_CTL_FLIP_HORIZONTAL) 10582 plane_config->rotation |= DRM_MODE_REFLECT_X; 10583 10584 base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000; 10585 plane_config->base = base; 10586 10587 offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id)); 10588 10589 val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id)); 10590 fb->height = ((val >> 16) & 0xffff) + 1; 10591 fb->width = ((val >> 0) & 0xffff) + 1; 10592 10593 val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id)); 10594 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); 10595 fb->pitches[0] = (val & 0x3ff) * stride_mult; 10596 10597 aligned_height = intel_fb_align_height(fb, 0, fb->height); 10598 10599 plane_config->size = fb->pitches[0] * aligned_height; 10600 10601 drm_dbg_kms(&dev_priv->drm, 10602 "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 10603 crtc->base.name, plane->base.name, fb->width, fb->height, 10604 fb->format->cpp[0] * 8, base, fb->pitches[0], 10605 plane_config->size); 10606 10607 plane_config->fb = intel_fb; 10608 return; 10609 10610 error: 10611 kfree(intel_fb); 10612 } 10613 10614 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state) 10615 { 10616 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 10617 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10618 u32 ctl, pos, size; 10619 10620 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe)); 10621 if ((ctl & PF_ENABLE) == 0) 10622 return; 10623 10624 crtc_state->pch_pfit.enabled = true; 10625 10626 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe)); 10627 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe)); 10628 10629 ilk_get_pfit_pos_size(crtc_state, pos, size); 10630 10631 /* 10632 * We currently do not free assignements of panel fitters on 10633 * ivb/hsw (since we don't use the higher upscaling modes which 10634 * differentiates them) so just WARN about this case for now. 10635 */ 10636 drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) && 10637 (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe)); 10638 } 10639 10640 static bool ilk_get_pipe_config(struct intel_crtc *crtc, 10641 struct intel_crtc_state *pipe_config) 10642 { 10643 struct drm_device *dev = crtc->base.dev; 10644 struct drm_i915_private *dev_priv = to_i915(dev); 10645 enum intel_display_power_domain power_domain; 10646 intel_wakeref_t wakeref; 10647 u32 tmp; 10648 bool ret; 10649 10650 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 10651 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 10652 if (!wakeref) 10653 return false; 10654 10655 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 10656 pipe_config->shared_dpll = NULL; 10657 10658 ret = false; 10659 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); 10660 if (!(tmp & PIPECONF_ENABLE)) 10661 goto out; 10662 10663 switch (tmp & PIPECONF_BPC_MASK) { 10664 case PIPECONF_6BPC: 10665 pipe_config->pipe_bpp = 18; 10666 break; 10667 case PIPECONF_8BPC: 10668 pipe_config->pipe_bpp = 24; 10669 break; 10670 case PIPECONF_10BPC: 10671 pipe_config->pipe_bpp = 30; 10672 break; 10673 case PIPECONF_12BPC: 10674 pipe_config->pipe_bpp = 36; 10675 break; 10676 default: 10677 break; 10678 } 10679 10680 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 10681 pipe_config->limited_color_range = true; 10682 10683 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) { 10684 case PIPECONF_OUTPUT_COLORSPACE_YUV601: 10685 case PIPECONF_OUTPUT_COLORSPACE_YUV709: 10686 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 10687 break; 10688 default: 10689 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 10690 break; 10691 } 10692 10693 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >> 10694 PIPECONF_GAMMA_MODE_SHIFT; 10695 10696 pipe_config->csc_mode = intel_de_read(dev_priv, 10697 PIPE_CSC_MODE(crtc->pipe)); 10698 10699 i9xx_get_pipe_color_config(pipe_config); 10700 intel_color_get_config(pipe_config); 10701 10702 if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 10703 struct intel_shared_dpll *pll; 10704 enum intel_dpll_id pll_id; 10705 10706 pipe_config->has_pch_encoder = true; 10707 10708 tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe)); 10709 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 10710 FDI_DP_PORT_WIDTH_SHIFT) + 1; 10711 10712 ilk_get_fdi_m_n_config(crtc, pipe_config); 10713 10714 if (HAS_PCH_IBX(dev_priv)) { 10715 /* 10716 * The pipe->pch transcoder and pch transcoder->pll 10717 * mapping is fixed. 10718 */ 10719 pll_id = (enum intel_dpll_id) crtc->pipe; 10720 } else { 10721 tmp = intel_de_read(dev_priv, PCH_DPLL_SEL); 10722 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 10723 pll_id = DPLL_ID_PCH_PLL_B; 10724 else 10725 pll_id= DPLL_ID_PCH_PLL_A; 10726 } 10727 10728 pipe_config->shared_dpll = 10729 intel_get_shared_dpll_by_id(dev_priv, pll_id); 10730 pll = pipe_config->shared_dpll; 10731 10732 drm_WARN_ON(dev, !pll->info->funcs->get_hw_state(dev_priv, pll, 10733 &pipe_config->dpll_hw_state)); 10734 10735 tmp = pipe_config->dpll_hw_state.dpll; 10736 pipe_config->pixel_multiplier = 10737 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 10738 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 10739 10740 ilk_pch_clock_get(crtc, pipe_config); 10741 } else { 10742 pipe_config->pixel_multiplier = 1; 10743 } 10744 10745 intel_get_pipe_timings(crtc, pipe_config); 10746 intel_get_pipe_src_size(crtc, pipe_config); 10747 10748 ilk_get_pfit_config(pipe_config); 10749 10750 ret = true; 10751 10752 out: 10753 intel_display_power_put(dev_priv, power_domain, wakeref); 10754 10755 return ret; 10756 } 10757 10758 static int hsw_crtc_compute_clock(struct intel_crtc *crtc, 10759 struct intel_crtc_state *crtc_state) 10760 { 10761 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10762 struct intel_atomic_state *state = 10763 to_intel_atomic_state(crtc_state->uapi.state); 10764 10765 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) || 10766 INTEL_GEN(dev_priv) >= 11) { 10767 struct intel_encoder *encoder = 10768 intel_get_crtc_new_encoder(state, crtc_state); 10769 10770 if (!intel_reserve_shared_dplls(state, crtc, encoder)) { 10771 drm_dbg_kms(&dev_priv->drm, 10772 "failed to find PLL for pipe %c\n", 10773 pipe_name(crtc->pipe)); 10774 return -EINVAL; 10775 } 10776 } 10777 10778 return 0; 10779 } 10780 10781 static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 10782 struct intel_crtc_state *pipe_config) 10783 { 10784 enum intel_dpll_id id; 10785 u32 temp; 10786 10787 temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); 10788 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); 10789 10790 if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2)) 10791 return; 10792 10793 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10794 } 10795 10796 static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 10797 struct intel_crtc_state *pipe_config) 10798 { 10799 enum phy phy = intel_port_to_phy(dev_priv, port); 10800 enum icl_port_dpll_id port_dpll_id; 10801 enum intel_dpll_id id; 10802 u32 temp; 10803 10804 if (intel_phy_is_combo(dev_priv, phy)) { 10805 temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) & 10806 ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); 10807 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); 10808 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 10809 } else if (intel_phy_is_tc(dev_priv, phy)) { 10810 u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; 10811 10812 if (clk_sel == DDI_CLK_SEL_MG) { 10813 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, 10814 port)); 10815 port_dpll_id = ICL_PORT_DPLL_MG_PHY; 10816 } else { 10817 drm_WARN_ON(&dev_priv->drm, 10818 clk_sel < DDI_CLK_SEL_TBT_162); 10819 id = DPLL_ID_ICL_TBTPLL; 10820 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 10821 } 10822 } else { 10823 drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port); 10824 return; 10825 } 10826 10827 pipe_config->icl_port_dplls[port_dpll_id].pll = 10828 intel_get_shared_dpll_by_id(dev_priv, id); 10829 10830 icl_set_active_port_dpll(pipe_config, port_dpll_id); 10831 } 10832 10833 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 10834 enum port port, 10835 struct intel_crtc_state *pipe_config) 10836 { 10837 enum intel_dpll_id id; 10838 10839 switch (port) { 10840 case PORT_A: 10841 id = DPLL_ID_SKL_DPLL0; 10842 break; 10843 case PORT_B: 10844 id = DPLL_ID_SKL_DPLL1; 10845 break; 10846 case PORT_C: 10847 id = DPLL_ID_SKL_DPLL2; 10848 break; 10849 default: 10850 drm_err(&dev_priv->drm, "Incorrect port type\n"); 10851 return; 10852 } 10853 10854 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10855 } 10856 10857 static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 10858 struct intel_crtc_state *pipe_config) 10859 { 10860 enum intel_dpll_id id; 10861 u32 temp; 10862 10863 temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 10864 id = temp >> (port * 3 + 1); 10865 10866 if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3)) 10867 return; 10868 10869 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10870 } 10871 10872 static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 10873 struct intel_crtc_state *pipe_config) 10874 { 10875 enum intel_dpll_id id; 10876 u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port)); 10877 10878 switch (ddi_pll_sel) { 10879 case PORT_CLK_SEL_WRPLL1: 10880 id = DPLL_ID_WRPLL1; 10881 break; 10882 case PORT_CLK_SEL_WRPLL2: 10883 id = DPLL_ID_WRPLL2; 10884 break; 10885 case PORT_CLK_SEL_SPLL: 10886 id = DPLL_ID_SPLL; 10887 break; 10888 case PORT_CLK_SEL_LCPLL_810: 10889 id = DPLL_ID_LCPLL_810; 10890 break; 10891 case PORT_CLK_SEL_LCPLL_1350: 10892 id = DPLL_ID_LCPLL_1350; 10893 break; 10894 case PORT_CLK_SEL_LCPLL_2700: 10895 id = DPLL_ID_LCPLL_2700; 10896 break; 10897 default: 10898 MISSING_CASE(ddi_pll_sel); 10899 fallthrough; 10900 case PORT_CLK_SEL_NONE: 10901 return; 10902 } 10903 10904 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10905 } 10906 10907 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 10908 struct intel_crtc_state *pipe_config, 10909 u64 *power_domain_mask, 10910 intel_wakeref_t *wakerefs) 10911 { 10912 struct drm_device *dev = crtc->base.dev; 10913 struct drm_i915_private *dev_priv = to_i915(dev); 10914 enum intel_display_power_domain power_domain; 10915 unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP); 10916 unsigned long enabled_panel_transcoders = 0; 10917 enum transcoder panel_transcoder; 10918 intel_wakeref_t wf; 10919 u32 tmp; 10920 10921 if (INTEL_GEN(dev_priv) >= 11) 10922 panel_transcoder_mask |= 10923 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 10924 10925 /* 10926 * The pipe->transcoder mapping is fixed with the exception of the eDP 10927 * and DSI transcoders handled below. 10928 */ 10929 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 10930 10931 /* 10932 * XXX: Do intel_display_power_get_if_enabled before reading this (for 10933 * consistency and less surprising code; it's in always on power). 10934 */ 10935 for_each_cpu_transcoder_masked(dev_priv, panel_transcoder, 10936 panel_transcoder_mask) { 10937 bool force_thru = false; 10938 enum pipe trans_pipe; 10939 10940 tmp = intel_de_read(dev_priv, 10941 TRANS_DDI_FUNC_CTL(panel_transcoder)); 10942 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 10943 continue; 10944 10945 /* 10946 * Log all enabled ones, only use the first one. 10947 * 10948 * FIXME: This won't work for two separate DSI displays. 10949 */ 10950 enabled_panel_transcoders |= BIT(panel_transcoder); 10951 if (enabled_panel_transcoders != BIT(panel_transcoder)) 10952 continue; 10953 10954 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 10955 default: 10956 drm_WARN(dev, 1, 10957 "unknown pipe linked to transcoder %s\n", 10958 transcoder_name(panel_transcoder)); 10959 fallthrough; 10960 case TRANS_DDI_EDP_INPUT_A_ONOFF: 10961 force_thru = true; 10962 fallthrough; 10963 case TRANS_DDI_EDP_INPUT_A_ON: 10964 trans_pipe = PIPE_A; 10965 break; 10966 case TRANS_DDI_EDP_INPUT_B_ONOFF: 10967 trans_pipe = PIPE_B; 10968 break; 10969 case TRANS_DDI_EDP_INPUT_C_ONOFF: 10970 trans_pipe = PIPE_C; 10971 break; 10972 case TRANS_DDI_EDP_INPUT_D_ONOFF: 10973 trans_pipe = PIPE_D; 10974 break; 10975 } 10976 10977 if (trans_pipe == crtc->pipe) { 10978 pipe_config->cpu_transcoder = panel_transcoder; 10979 pipe_config->pch_pfit.force_thru = force_thru; 10980 } 10981 } 10982 10983 /* 10984 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1 10985 */ 10986 drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) && 10987 enabled_panel_transcoders != BIT(TRANSCODER_EDP)); 10988 10989 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); 10990 drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain)); 10991 10992 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10993 if (!wf) 10994 return false; 10995 10996 wakerefs[power_domain] = wf; 10997 *power_domain_mask |= BIT_ULL(power_domain); 10998 10999 tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder)); 11000 11001 return tmp & PIPECONF_ENABLE; 11002 } 11003 11004 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 11005 struct intel_crtc_state *pipe_config, 11006 u64 *power_domain_mask, 11007 intel_wakeref_t *wakerefs) 11008 { 11009 struct drm_device *dev = crtc->base.dev; 11010 struct drm_i915_private *dev_priv = to_i915(dev); 11011 enum intel_display_power_domain power_domain; 11012 enum transcoder cpu_transcoder; 11013 intel_wakeref_t wf; 11014 enum port port; 11015 u32 tmp; 11016 11017 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 11018 if (port == PORT_A) 11019 cpu_transcoder = TRANSCODER_DSI_A; 11020 else 11021 cpu_transcoder = TRANSCODER_DSI_C; 11022 11023 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 11024 drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain)); 11025 11026 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 11027 if (!wf) 11028 continue; 11029 11030 wakerefs[power_domain] = wf; 11031 *power_domain_mask |= BIT_ULL(power_domain); 11032 11033 /* 11034 * The PLL needs to be enabled with a valid divider 11035 * configuration, otherwise accessing DSI registers will hang 11036 * the machine. See BSpec North Display Engine 11037 * registers/MIPI[BXT]. We can break out here early, since we 11038 * need the same DSI PLL to be enabled for both DSI ports. 11039 */ 11040 if (!bxt_dsi_pll_is_enabled(dev_priv)) 11041 break; 11042 11043 /* XXX: this works for video mode only */ 11044 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)); 11045 if (!(tmp & DPI_ENABLE)) 11046 continue; 11047 11048 tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); 11049 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 11050 continue; 11051 11052 pipe_config->cpu_transcoder = cpu_transcoder; 11053 break; 11054 } 11055 11056 return transcoder_is_dsi(pipe_config->cpu_transcoder); 11057 } 11058 11059 static void hsw_get_ddi_port_state(struct intel_crtc *crtc, 11060 struct intel_crtc_state *pipe_config) 11061 { 11062 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11063 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 11064 struct intel_shared_dpll *pll; 11065 enum port port; 11066 u32 tmp; 11067 11068 if (transcoder_is_dsi(cpu_transcoder)) { 11069 port = (cpu_transcoder == TRANSCODER_DSI_A) ? 11070 PORT_A : PORT_B; 11071 } else { 11072 tmp = intel_de_read(dev_priv, 11073 TRANS_DDI_FUNC_CTL(cpu_transcoder)); 11074 if (INTEL_GEN(dev_priv) >= 12) 11075 port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); 11076 else 11077 port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); 11078 } 11079 11080 if (INTEL_GEN(dev_priv) >= 11) 11081 icl_get_ddi_pll(dev_priv, port, pipe_config); 11082 else if (IS_CANNONLAKE(dev_priv)) 11083 cnl_get_ddi_pll(dev_priv, port, pipe_config); 11084 else if (IS_GEN9_BC(dev_priv)) 11085 skl_get_ddi_pll(dev_priv, port, pipe_config); 11086 else if (IS_GEN9_LP(dev_priv)) 11087 bxt_get_ddi_pll(dev_priv, port, pipe_config); 11088 else 11089 hsw_get_ddi_pll(dev_priv, port, pipe_config); 11090 11091 pll = pipe_config->shared_dpll; 11092 if (pll) { 11093 drm_WARN_ON(&dev_priv->drm, 11094 !pll->info->funcs->get_hw_state(dev_priv, pll, 11095 &pipe_config->dpll_hw_state)); 11096 } 11097 11098 /* 11099 * Haswell has only FDI/PCH transcoder A. It is which is connected to 11100 * DDI E. So just check whether this pipe is wired to DDI E and whether 11101 * the PCH transcoder is on. 11102 */ 11103 if (INTEL_GEN(dev_priv) < 9 && 11104 (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) { 11105 pipe_config->has_pch_encoder = true; 11106 11107 tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); 11108 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 11109 FDI_DP_PORT_WIDTH_SHIFT) + 1; 11110 11111 ilk_get_fdi_m_n_config(crtc, pipe_config); 11112 } 11113 } 11114 11115 static bool hsw_get_pipe_config(struct intel_crtc *crtc, 11116 struct intel_crtc_state *pipe_config) 11117 { 11118 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11119 intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf; 11120 enum intel_display_power_domain power_domain; 11121 u64 power_domain_mask; 11122 bool active; 11123 u32 tmp; 11124 11125 pipe_config->master_transcoder = INVALID_TRANSCODER; 11126 11127 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 11128 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 11129 if (!wf) 11130 return false; 11131 11132 wakerefs[power_domain] = wf; 11133 power_domain_mask = BIT_ULL(power_domain); 11134 11135 pipe_config->shared_dpll = NULL; 11136 11137 active = hsw_get_transcoder_state(crtc, pipe_config, 11138 &power_domain_mask, wakerefs); 11139 11140 if (IS_GEN9_LP(dev_priv) && 11141 bxt_get_dsi_transcoder_state(crtc, pipe_config, 11142 &power_domain_mask, wakerefs)) { 11143 drm_WARN_ON(&dev_priv->drm, active); 11144 active = true; 11145 } 11146 11147 if (!active) 11148 goto out; 11149 11150 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 11151 INTEL_GEN(dev_priv) >= 11) { 11152 hsw_get_ddi_port_state(crtc, pipe_config); 11153 intel_get_pipe_timings(crtc, pipe_config); 11154 } 11155 11156 intel_get_pipe_src_size(crtc, pipe_config); 11157 11158 if (IS_HASWELL(dev_priv)) { 11159 u32 tmp = intel_de_read(dev_priv, 11160 PIPECONF(pipe_config->cpu_transcoder)); 11161 11162 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW) 11163 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 11164 else 11165 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 11166 } else { 11167 pipe_config->output_format = 11168 bdw_get_pipemisc_output_format(crtc); 11169 11170 /* 11171 * Currently there is no interface defined to 11172 * check user preference between RGB/YCBCR444 11173 * or YCBCR420. So the only possible case for 11174 * YCBCR444 usage is driving YCBCR420 output 11175 * with LSPCON, when pipe is configured for 11176 * YCBCR444 output and LSPCON takes care of 11177 * downsampling it. 11178 */ 11179 pipe_config->lspcon_downsampling = 11180 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444; 11181 } 11182 11183 pipe_config->gamma_mode = intel_de_read(dev_priv, 11184 GAMMA_MODE(crtc->pipe)); 11185 11186 pipe_config->csc_mode = intel_de_read(dev_priv, 11187 PIPE_CSC_MODE(crtc->pipe)); 11188 11189 if (INTEL_GEN(dev_priv) >= 9) { 11190 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe)); 11191 11192 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) 11193 pipe_config->gamma_enable = true; 11194 11195 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE) 11196 pipe_config->csc_enable = true; 11197 } else { 11198 i9xx_get_pipe_color_config(pipe_config); 11199 } 11200 11201 intel_color_get_config(pipe_config); 11202 11203 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe)); 11204 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp); 11205 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 11206 pipe_config->ips_linetime = 11207 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp); 11208 11209 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 11210 drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain)); 11211 11212 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 11213 if (wf) { 11214 wakerefs[power_domain] = wf; 11215 power_domain_mask |= BIT_ULL(power_domain); 11216 11217 if (INTEL_GEN(dev_priv) >= 9) 11218 skl_get_pfit_config(pipe_config); 11219 else 11220 ilk_get_pfit_config(pipe_config); 11221 } 11222 11223 if (hsw_crtc_supports_ips(crtc)) { 11224 if (IS_HASWELL(dev_priv)) 11225 pipe_config->ips_enabled = intel_de_read(dev_priv, 11226 IPS_CTL) & IPS_ENABLE; 11227 else { 11228 /* 11229 * We cannot readout IPS state on broadwell, set to 11230 * true so we can set it to a defined state on first 11231 * commit. 11232 */ 11233 pipe_config->ips_enabled = true; 11234 } 11235 } 11236 11237 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 11238 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 11239 pipe_config->pixel_multiplier = 11240 intel_de_read(dev_priv, 11241 PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 11242 } else { 11243 pipe_config->pixel_multiplier = 1; 11244 } 11245 11246 out: 11247 for_each_power_domain(power_domain, power_domain_mask) 11248 intel_display_power_put(dev_priv, 11249 power_domain, wakerefs[power_domain]); 11250 11251 return active; 11252 } 11253 11254 static u32 intel_cursor_base(const struct intel_plane_state *plane_state) 11255 { 11256 struct drm_i915_private *dev_priv = 11257 to_i915(plane_state->uapi.plane->dev); 11258 const struct drm_framebuffer *fb = plane_state->hw.fb; 11259 const struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11260 u32 base; 11261 11262 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) 11263 base = sg_dma_address(obj->mm.pages->sgl); 11264 else 11265 base = intel_plane_ggtt_offset(plane_state); 11266 11267 return base + plane_state->color_plane[0].offset; 11268 } 11269 11270 static u32 intel_cursor_position(const struct intel_plane_state *plane_state) 11271 { 11272 int x = plane_state->uapi.dst.x1; 11273 int y = plane_state->uapi.dst.y1; 11274 u32 pos = 0; 11275 11276 if (x < 0) { 11277 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 11278 x = -x; 11279 } 11280 pos |= x << CURSOR_X_SHIFT; 11281 11282 if (y < 0) { 11283 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 11284 y = -y; 11285 } 11286 pos |= y << CURSOR_Y_SHIFT; 11287 11288 return pos; 11289 } 11290 11291 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) 11292 { 11293 const struct drm_mode_config *config = 11294 &plane_state->uapi.plane->dev->mode_config; 11295 int width = drm_rect_width(&plane_state->uapi.dst); 11296 int height = drm_rect_height(&plane_state->uapi.dst); 11297 11298 return width > 0 && width <= config->cursor_width && 11299 height > 0 && height <= config->cursor_height; 11300 } 11301 11302 static int intel_cursor_check_surface(struct intel_plane_state *plane_state) 11303 { 11304 struct drm_i915_private *dev_priv = 11305 to_i915(plane_state->uapi.plane->dev); 11306 unsigned int rotation = plane_state->hw.rotation; 11307 int src_x, src_y; 11308 u32 offset; 11309 int ret; 11310 11311 ret = intel_plane_compute_gtt(plane_state); 11312 if (ret) 11313 return ret; 11314 11315 if (!plane_state->uapi.visible) 11316 return 0; 11317 11318 src_x = plane_state->uapi.src.x1 >> 16; 11319 src_y = plane_state->uapi.src.y1 >> 16; 11320 11321 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 11322 offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 11323 plane_state, 0); 11324 11325 if (src_x != 0 || src_y != 0) { 11326 drm_dbg_kms(&dev_priv->drm, 11327 "Arbitrary cursor panning not supported\n"); 11328 return -EINVAL; 11329 } 11330 11331 /* 11332 * Put the final coordinates back so that the src 11333 * coordinate checks will see the right values. 11334 */ 11335 drm_rect_translate_to(&plane_state->uapi.src, 11336 src_x << 16, src_y << 16); 11337 11338 /* ILK+ do this automagically in hardware */ 11339 if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) { 11340 const struct drm_framebuffer *fb = plane_state->hw.fb; 11341 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 11342 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 11343 11344 offset += (src_h * src_w - 1) * fb->format->cpp[0]; 11345 } 11346 11347 plane_state->color_plane[0].offset = offset; 11348 plane_state->color_plane[0].x = src_x; 11349 plane_state->color_plane[0].y = src_y; 11350 11351 return 0; 11352 } 11353 11354 static int intel_check_cursor(struct intel_crtc_state *crtc_state, 11355 struct intel_plane_state *plane_state) 11356 { 11357 const struct drm_framebuffer *fb = plane_state->hw.fb; 11358 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 11359 int ret; 11360 11361 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) { 11362 drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n"); 11363 return -EINVAL; 11364 } 11365 11366 ret = drm_atomic_helper_check_plane_state(&plane_state->uapi, 11367 &crtc_state->uapi, 11368 DRM_PLANE_HELPER_NO_SCALING, 11369 DRM_PLANE_HELPER_NO_SCALING, 11370 true, true); 11371 if (ret) 11372 return ret; 11373 11374 /* Use the unclipped src/dst rectangles, which we program to hw */ 11375 plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi); 11376 plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi); 11377 11378 ret = intel_cursor_check_surface(plane_state); 11379 if (ret) 11380 return ret; 11381 11382 if (!plane_state->uapi.visible) 11383 return 0; 11384 11385 ret = intel_plane_check_src_coordinates(plane_state); 11386 if (ret) 11387 return ret; 11388 11389 return 0; 11390 } 11391 11392 static unsigned int 11393 i845_cursor_max_stride(struct intel_plane *plane, 11394 u32 pixel_format, u64 modifier, 11395 unsigned int rotation) 11396 { 11397 return 2048; 11398 } 11399 11400 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 11401 { 11402 u32 cntl = 0; 11403 11404 if (crtc_state->gamma_enable) 11405 cntl |= CURSOR_GAMMA_ENABLE; 11406 11407 return cntl; 11408 } 11409 11410 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, 11411 const struct intel_plane_state *plane_state) 11412 { 11413 return CURSOR_ENABLE | 11414 CURSOR_FORMAT_ARGB | 11415 CURSOR_STRIDE(plane_state->color_plane[0].stride); 11416 } 11417 11418 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) 11419 { 11420 int width = drm_rect_width(&plane_state->uapi.dst); 11421 11422 /* 11423 * 845g/865g are only limited by the width of their cursors, 11424 * the height is arbitrary up to the precision of the register. 11425 */ 11426 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64); 11427 } 11428 11429 static int i845_check_cursor(struct intel_crtc_state *crtc_state, 11430 struct intel_plane_state *plane_state) 11431 { 11432 const struct drm_framebuffer *fb = plane_state->hw.fb; 11433 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 11434 int ret; 11435 11436 ret = intel_check_cursor(crtc_state, plane_state); 11437 if (ret) 11438 return ret; 11439 11440 /* if we want to turn off the cursor ignore width and height */ 11441 if (!fb) 11442 return 0; 11443 11444 /* Check for which cursor types we support */ 11445 if (!i845_cursor_size_ok(plane_state)) { 11446 drm_dbg_kms(&i915->drm, 11447 "Cursor dimension %dx%d not supported\n", 11448 drm_rect_width(&plane_state->uapi.dst), 11449 drm_rect_height(&plane_state->uapi.dst)); 11450 return -EINVAL; 11451 } 11452 11453 drm_WARN_ON(&i915->drm, plane_state->uapi.visible && 11454 plane_state->color_plane[0].stride != fb->pitches[0]); 11455 11456 switch (fb->pitches[0]) { 11457 case 256: 11458 case 512: 11459 case 1024: 11460 case 2048: 11461 break; 11462 default: 11463 drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n", 11464 fb->pitches[0]); 11465 return -EINVAL; 11466 } 11467 11468 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state); 11469 11470 return 0; 11471 } 11472 11473 static void i845_update_cursor(struct intel_plane *plane, 11474 const struct intel_crtc_state *crtc_state, 11475 const struct intel_plane_state *plane_state) 11476 { 11477 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11478 u32 cntl = 0, base = 0, pos = 0, size = 0; 11479 unsigned long irqflags; 11480 11481 if (plane_state && plane_state->uapi.visible) { 11482 unsigned int width = drm_rect_width(&plane_state->uapi.dst); 11483 unsigned int height = drm_rect_height(&plane_state->uapi.dst); 11484 11485 cntl = plane_state->ctl | 11486 i845_cursor_ctl_crtc(crtc_state); 11487 11488 size = (height << 12) | width; 11489 11490 base = intel_cursor_base(plane_state); 11491 pos = intel_cursor_position(plane_state); 11492 } 11493 11494 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 11495 11496 /* On these chipsets we can only modify the base/size/stride 11497 * whilst the cursor is disabled. 11498 */ 11499 if (plane->cursor.base != base || 11500 plane->cursor.size != size || 11501 plane->cursor.cntl != cntl) { 11502 intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0); 11503 intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base); 11504 intel_de_write_fw(dev_priv, CURSIZE, size); 11505 intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos); 11506 intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl); 11507 11508 plane->cursor.base = base; 11509 plane->cursor.size = size; 11510 plane->cursor.cntl = cntl; 11511 } else { 11512 intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos); 11513 } 11514 11515 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 11516 } 11517 11518 static void i845_disable_cursor(struct intel_plane *plane, 11519 const struct intel_crtc_state *crtc_state) 11520 { 11521 i845_update_cursor(plane, crtc_state, NULL); 11522 } 11523 11524 static bool i845_cursor_get_hw_state(struct intel_plane *plane, 11525 enum pipe *pipe) 11526 { 11527 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11528 enum intel_display_power_domain power_domain; 11529 intel_wakeref_t wakeref; 11530 bool ret; 11531 11532 power_domain = POWER_DOMAIN_PIPE(PIPE_A); 11533 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 11534 if (!wakeref) 11535 return false; 11536 11537 ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE; 11538 11539 *pipe = PIPE_A; 11540 11541 intel_display_power_put(dev_priv, power_domain, wakeref); 11542 11543 return ret; 11544 } 11545 11546 static unsigned int 11547 i9xx_cursor_max_stride(struct intel_plane *plane, 11548 u32 pixel_format, u64 modifier, 11549 unsigned int rotation) 11550 { 11551 return plane->base.dev->mode_config.cursor_width * 4; 11552 } 11553 11554 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 11555 { 11556 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 11557 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11558 u32 cntl = 0; 11559 11560 if (INTEL_GEN(dev_priv) >= 11) 11561 return cntl; 11562 11563 if (crtc_state->gamma_enable) 11564 cntl = MCURSOR_GAMMA_ENABLE; 11565 11566 if (crtc_state->csc_enable) 11567 cntl |= MCURSOR_PIPE_CSC_ENABLE; 11568 11569 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 11570 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); 11571 11572 return cntl; 11573 } 11574 11575 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, 11576 const struct intel_plane_state *plane_state) 11577 { 11578 struct drm_i915_private *dev_priv = 11579 to_i915(plane_state->uapi.plane->dev); 11580 u32 cntl = 0; 11581 11582 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 11583 cntl |= MCURSOR_TRICKLE_FEED_DISABLE; 11584 11585 switch (drm_rect_width(&plane_state->uapi.dst)) { 11586 case 64: 11587 cntl |= MCURSOR_MODE_64_ARGB_AX; 11588 break; 11589 case 128: 11590 cntl |= MCURSOR_MODE_128_ARGB_AX; 11591 break; 11592 case 256: 11593 cntl |= MCURSOR_MODE_256_ARGB_AX; 11594 break; 11595 default: 11596 MISSING_CASE(drm_rect_width(&plane_state->uapi.dst)); 11597 return 0; 11598 } 11599 11600 if (plane_state->hw.rotation & DRM_MODE_ROTATE_180) 11601 cntl |= MCURSOR_ROTATE_180; 11602 11603 return cntl; 11604 } 11605 11606 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) 11607 { 11608 struct drm_i915_private *dev_priv = 11609 to_i915(plane_state->uapi.plane->dev); 11610 int width = drm_rect_width(&plane_state->uapi.dst); 11611 int height = drm_rect_height(&plane_state->uapi.dst); 11612 11613 if (!intel_cursor_size_ok(plane_state)) 11614 return false; 11615 11616 /* Cursor width is limited to a few power-of-two sizes */ 11617 switch (width) { 11618 case 256: 11619 case 128: 11620 case 64: 11621 break; 11622 default: 11623 return false; 11624 } 11625 11626 /* 11627 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor 11628 * height from 8 lines up to the cursor width, when the 11629 * cursor is not rotated. Everything else requires square 11630 * cursors. 11631 */ 11632 if (HAS_CUR_FBC(dev_priv) && 11633 plane_state->hw.rotation & DRM_MODE_ROTATE_0) { 11634 if (height < 8 || height > width) 11635 return false; 11636 } else { 11637 if (height != width) 11638 return false; 11639 } 11640 11641 return true; 11642 } 11643 11644 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, 11645 struct intel_plane_state *plane_state) 11646 { 11647 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 11648 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11649 const struct drm_framebuffer *fb = plane_state->hw.fb; 11650 enum pipe pipe = plane->pipe; 11651 int ret; 11652 11653 ret = intel_check_cursor(crtc_state, plane_state); 11654 if (ret) 11655 return ret; 11656 11657 /* if we want to turn off the cursor ignore width and height */ 11658 if (!fb) 11659 return 0; 11660 11661 /* Check for which cursor types we support */ 11662 if (!i9xx_cursor_size_ok(plane_state)) { 11663 drm_dbg(&dev_priv->drm, 11664 "Cursor dimension %dx%d not supported\n", 11665 drm_rect_width(&plane_state->uapi.dst), 11666 drm_rect_height(&plane_state->uapi.dst)); 11667 return -EINVAL; 11668 } 11669 11670 drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible && 11671 plane_state->color_plane[0].stride != fb->pitches[0]); 11672 11673 if (fb->pitches[0] != 11674 drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) { 11675 drm_dbg_kms(&dev_priv->drm, 11676 "Invalid cursor stride (%u) (cursor width %d)\n", 11677 fb->pitches[0], 11678 drm_rect_width(&plane_state->uapi.dst)); 11679 return -EINVAL; 11680 } 11681 11682 /* 11683 * There's something wrong with the cursor on CHV pipe C. 11684 * If it straddles the left edge of the screen then 11685 * moving it away from the edge or disabling it often 11686 * results in a pipe underrun, and often that can lead to 11687 * dead pipe (constant underrun reported, and it scans 11688 * out just a solid color). To recover from that, the 11689 * display power well must be turned off and on again. 11690 * Refuse the put the cursor into that compromised position. 11691 */ 11692 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && 11693 plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) { 11694 drm_dbg_kms(&dev_priv->drm, 11695 "CHV cursor C not allowed to straddle the left screen edge\n"); 11696 return -EINVAL; 11697 } 11698 11699 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state); 11700 11701 return 0; 11702 } 11703 11704 static void i9xx_update_cursor(struct intel_plane *plane, 11705 const struct intel_crtc_state *crtc_state, 11706 const struct intel_plane_state *plane_state) 11707 { 11708 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11709 enum pipe pipe = plane->pipe; 11710 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0; 11711 unsigned long irqflags; 11712 11713 if (plane_state && plane_state->uapi.visible) { 11714 unsigned width = drm_rect_width(&plane_state->uapi.dst); 11715 unsigned height = drm_rect_height(&plane_state->uapi.dst); 11716 11717 cntl = plane_state->ctl | 11718 i9xx_cursor_ctl_crtc(crtc_state); 11719 11720 if (width != height) 11721 fbc_ctl = CUR_FBC_CTL_EN | (height - 1); 11722 11723 base = intel_cursor_base(plane_state); 11724 pos = intel_cursor_position(plane_state); 11725 } 11726 11727 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 11728 11729 /* 11730 * On some platforms writing CURCNTR first will also 11731 * cause CURPOS to be armed by the CURBASE write. 11732 * Without the CURCNTR write the CURPOS write would 11733 * arm itself. Thus we always update CURCNTR before 11734 * CURPOS. 11735 * 11736 * On other platforms CURPOS always requires the 11737 * CURBASE write to arm the update. Additonally 11738 * a write to any of the cursor register will cancel 11739 * an already armed cursor update. Thus leaving out 11740 * the CURBASE write after CURPOS could lead to a 11741 * cursor that doesn't appear to move, or even change 11742 * shape. Thus we always write CURBASE. 11743 * 11744 * The other registers are armed by by the CURBASE write 11745 * except when the plane is getting enabled at which time 11746 * the CURCNTR write arms the update. 11747 */ 11748 11749 if (INTEL_GEN(dev_priv) >= 9) 11750 skl_write_cursor_wm(plane, crtc_state); 11751 11752 if (plane->cursor.base != base || 11753 plane->cursor.size != fbc_ctl || 11754 plane->cursor.cntl != cntl) { 11755 if (HAS_CUR_FBC(dev_priv)) 11756 intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe), 11757 fbc_ctl); 11758 intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl); 11759 intel_de_write_fw(dev_priv, CURPOS(pipe), pos); 11760 intel_de_write_fw(dev_priv, CURBASE(pipe), base); 11761 11762 plane->cursor.base = base; 11763 plane->cursor.size = fbc_ctl; 11764 plane->cursor.cntl = cntl; 11765 } else { 11766 intel_de_write_fw(dev_priv, CURPOS(pipe), pos); 11767 intel_de_write_fw(dev_priv, CURBASE(pipe), base); 11768 } 11769 11770 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 11771 } 11772 11773 static void i9xx_disable_cursor(struct intel_plane *plane, 11774 const struct intel_crtc_state *crtc_state) 11775 { 11776 i9xx_update_cursor(plane, crtc_state, NULL); 11777 } 11778 11779 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, 11780 enum pipe *pipe) 11781 { 11782 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11783 enum intel_display_power_domain power_domain; 11784 intel_wakeref_t wakeref; 11785 bool ret; 11786 u32 val; 11787 11788 /* 11789 * Not 100% correct for planes that can move between pipes, 11790 * but that's only the case for gen2-3 which don't have any 11791 * display power wells. 11792 */ 11793 power_domain = POWER_DOMAIN_PIPE(plane->pipe); 11794 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 11795 if (!wakeref) 11796 return false; 11797 11798 val = intel_de_read(dev_priv, CURCNTR(plane->pipe)); 11799 11800 ret = val & MCURSOR_MODE; 11801 11802 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 11803 *pipe = plane->pipe; 11804 else 11805 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >> 11806 MCURSOR_PIPE_SELECT_SHIFT; 11807 11808 intel_display_power_put(dev_priv, power_domain, wakeref); 11809 11810 return ret; 11811 } 11812 11813 /* VESA 640x480x72Hz mode to set on the pipe */ 11814 static const struct drm_display_mode load_detect_mode = { 11815 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 11816 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 11817 }; 11818 11819 struct drm_framebuffer * 11820 intel_framebuffer_create(struct drm_i915_gem_object *obj, 11821 struct drm_mode_fb_cmd2 *mode_cmd) 11822 { 11823 struct intel_framebuffer *intel_fb; 11824 int ret; 11825 11826 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 11827 if (!intel_fb) 11828 return ERR_PTR(-ENOMEM); 11829 11830 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd); 11831 if (ret) 11832 goto err; 11833 11834 return &intel_fb->base; 11835 11836 err: 11837 kfree(intel_fb); 11838 return ERR_PTR(ret); 11839 } 11840 11841 static int intel_modeset_disable_planes(struct drm_atomic_state *state, 11842 struct drm_crtc *crtc) 11843 { 11844 struct drm_plane *plane; 11845 struct drm_plane_state *plane_state; 11846 int ret, i; 11847 11848 ret = drm_atomic_add_affected_planes(state, crtc); 11849 if (ret) 11850 return ret; 11851 11852 for_each_new_plane_in_state(state, plane, plane_state, i) { 11853 if (plane_state->crtc != crtc) 11854 continue; 11855 11856 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); 11857 if (ret) 11858 return ret; 11859 11860 drm_atomic_set_fb_for_plane(plane_state, NULL); 11861 } 11862 11863 return 0; 11864 } 11865 11866 int intel_get_load_detect_pipe(struct drm_connector *connector, 11867 struct intel_load_detect_pipe *old, 11868 struct drm_modeset_acquire_ctx *ctx) 11869 { 11870 struct intel_crtc *intel_crtc; 11871 struct intel_encoder *intel_encoder = 11872 intel_attached_encoder(to_intel_connector(connector)); 11873 struct drm_crtc *possible_crtc; 11874 struct drm_encoder *encoder = &intel_encoder->base; 11875 struct drm_crtc *crtc = NULL; 11876 struct drm_device *dev = encoder->dev; 11877 struct drm_i915_private *dev_priv = to_i915(dev); 11878 struct drm_mode_config *config = &dev->mode_config; 11879 struct drm_atomic_state *state = NULL, *restore_state = NULL; 11880 struct drm_connector_state *connector_state; 11881 struct intel_crtc_state *crtc_state; 11882 int ret, i = -1; 11883 11884 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 11885 connector->base.id, connector->name, 11886 encoder->base.id, encoder->name); 11887 11888 old->restore_state = NULL; 11889 11890 drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex)); 11891 11892 /* 11893 * Algorithm gets a little messy: 11894 * 11895 * - if the connector already has an assigned crtc, use it (but make 11896 * sure it's on first) 11897 * 11898 * - try to find the first unused crtc that can drive this connector, 11899 * and use that if we find one 11900 */ 11901 11902 /* See if we already have a CRTC for this connector */ 11903 if (connector->state->crtc) { 11904 crtc = connector->state->crtc; 11905 11906 ret = drm_modeset_lock(&crtc->mutex, ctx); 11907 if (ret) 11908 goto fail; 11909 11910 /* Make sure the crtc and connector are running */ 11911 goto found; 11912 } 11913 11914 /* Find an unused one (if possible) */ 11915 for_each_crtc(dev, possible_crtc) { 11916 i++; 11917 if (!(encoder->possible_crtcs & (1 << i))) 11918 continue; 11919 11920 ret = drm_modeset_lock(&possible_crtc->mutex, ctx); 11921 if (ret) 11922 goto fail; 11923 11924 if (possible_crtc->state->enable) { 11925 drm_modeset_unlock(&possible_crtc->mutex); 11926 continue; 11927 } 11928 11929 crtc = possible_crtc; 11930 break; 11931 } 11932 11933 /* 11934 * If we didn't find an unused CRTC, don't use any. 11935 */ 11936 if (!crtc) { 11937 drm_dbg_kms(&dev_priv->drm, 11938 "no pipe available for load-detect\n"); 11939 ret = -ENODEV; 11940 goto fail; 11941 } 11942 11943 found: 11944 intel_crtc = to_intel_crtc(crtc); 11945 11946 state = drm_atomic_state_alloc(dev); 11947 restore_state = drm_atomic_state_alloc(dev); 11948 if (!state || !restore_state) { 11949 ret = -ENOMEM; 11950 goto fail; 11951 } 11952 11953 state->acquire_ctx = ctx; 11954 restore_state->acquire_ctx = ctx; 11955 11956 connector_state = drm_atomic_get_connector_state(state, connector); 11957 if (IS_ERR(connector_state)) { 11958 ret = PTR_ERR(connector_state); 11959 goto fail; 11960 } 11961 11962 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc); 11963 if (ret) 11964 goto fail; 11965 11966 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 11967 if (IS_ERR(crtc_state)) { 11968 ret = PTR_ERR(crtc_state); 11969 goto fail; 11970 } 11971 11972 crtc_state->uapi.active = true; 11973 11974 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi, 11975 &load_detect_mode); 11976 if (ret) 11977 goto fail; 11978 11979 ret = intel_modeset_disable_planes(state, crtc); 11980 if (ret) 11981 goto fail; 11982 11983 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); 11984 if (!ret) 11985 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc)); 11986 if (!ret) 11987 ret = drm_atomic_add_affected_planes(restore_state, crtc); 11988 if (ret) { 11989 drm_dbg_kms(&dev_priv->drm, 11990 "Failed to create a copy of old state to restore: %i\n", 11991 ret); 11992 goto fail; 11993 } 11994 11995 ret = drm_atomic_commit(state); 11996 if (ret) { 11997 drm_dbg_kms(&dev_priv->drm, 11998 "failed to set mode on load-detect pipe\n"); 11999 goto fail; 12000 } 12001 12002 old->restore_state = restore_state; 12003 drm_atomic_state_put(state); 12004 12005 /* let the connector get through one full cycle before testing */ 12006 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 12007 return true; 12008 12009 fail: 12010 if (state) { 12011 drm_atomic_state_put(state); 12012 state = NULL; 12013 } 12014 if (restore_state) { 12015 drm_atomic_state_put(restore_state); 12016 restore_state = NULL; 12017 } 12018 12019 if (ret == -EDEADLK) 12020 return ret; 12021 12022 return false; 12023 } 12024 12025 void intel_release_load_detect_pipe(struct drm_connector *connector, 12026 struct intel_load_detect_pipe *old, 12027 struct drm_modeset_acquire_ctx *ctx) 12028 { 12029 struct intel_encoder *intel_encoder = 12030 intel_attached_encoder(to_intel_connector(connector)); 12031 struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev); 12032 struct drm_encoder *encoder = &intel_encoder->base; 12033 struct drm_atomic_state *state = old->restore_state; 12034 int ret; 12035 12036 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 12037 connector->base.id, connector->name, 12038 encoder->base.id, encoder->name); 12039 12040 if (!state) 12041 return; 12042 12043 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 12044 if (ret) 12045 drm_dbg_kms(&i915->drm, 12046 "Couldn't release load detect pipe: %i\n", ret); 12047 drm_atomic_state_put(state); 12048 } 12049 12050 static int i9xx_pll_refclk(struct drm_device *dev, 12051 const struct intel_crtc_state *pipe_config) 12052 { 12053 struct drm_i915_private *dev_priv = to_i915(dev); 12054 u32 dpll = pipe_config->dpll_hw_state.dpll; 12055 12056 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 12057 return dev_priv->vbt.lvds_ssc_freq; 12058 else if (HAS_PCH_SPLIT(dev_priv)) 12059 return 120000; 12060 else if (!IS_GEN(dev_priv, 2)) 12061 return 96000; 12062 else 12063 return 48000; 12064 } 12065 12066 /* Returns the clock of the currently programmed mode of the given pipe. */ 12067 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 12068 struct intel_crtc_state *pipe_config) 12069 { 12070 struct drm_device *dev = crtc->base.dev; 12071 struct drm_i915_private *dev_priv = to_i915(dev); 12072 enum pipe pipe = crtc->pipe; 12073 u32 dpll = pipe_config->dpll_hw_state.dpll; 12074 u32 fp; 12075 struct dpll clock; 12076 int port_clock; 12077 int refclk = i9xx_pll_refclk(dev, pipe_config); 12078 12079 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 12080 fp = pipe_config->dpll_hw_state.fp0; 12081 else 12082 fp = pipe_config->dpll_hw_state.fp1; 12083 12084 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 12085 if (IS_PINEVIEW(dev_priv)) { 12086 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 12087 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 12088 } else { 12089 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 12090 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 12091 } 12092 12093 if (!IS_GEN(dev_priv, 2)) { 12094 if (IS_PINEVIEW(dev_priv)) 12095 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 12096 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 12097 else 12098 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 12099 DPLL_FPA01_P1_POST_DIV_SHIFT); 12100 12101 switch (dpll & DPLL_MODE_MASK) { 12102 case DPLLB_MODE_DAC_SERIAL: 12103 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 12104 5 : 10; 12105 break; 12106 case DPLLB_MODE_LVDS: 12107 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 12108 7 : 14; 12109 break; 12110 default: 12111 drm_dbg_kms(&dev_priv->drm, 12112 "Unknown DPLL mode %08x in programmed " 12113 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 12114 return; 12115 } 12116 12117 if (IS_PINEVIEW(dev_priv)) 12118 port_clock = pnv_calc_dpll_params(refclk, &clock); 12119 else 12120 port_clock = i9xx_calc_dpll_params(refclk, &clock); 12121 } else { 12122 u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv, 12123 LVDS); 12124 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 12125 12126 if (is_lvds) { 12127 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 12128 DPLL_FPA01_P1_POST_DIV_SHIFT); 12129 12130 if (lvds & LVDS_CLKB_POWER_UP) 12131 clock.p2 = 7; 12132 else 12133 clock.p2 = 14; 12134 } else { 12135 if (dpll & PLL_P1_DIVIDE_BY_TWO) 12136 clock.p1 = 2; 12137 else { 12138 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 12139 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 12140 } 12141 if (dpll & PLL_P2_DIVIDE_BY_4) 12142 clock.p2 = 4; 12143 else 12144 clock.p2 = 2; 12145 } 12146 12147 port_clock = i9xx_calc_dpll_params(refclk, &clock); 12148 } 12149 12150 /* 12151 * This value includes pixel_multiplier. We will use 12152 * port_clock to compute adjusted_mode.crtc_clock in the 12153 * encoder's get_config() function. 12154 */ 12155 pipe_config->port_clock = port_clock; 12156 } 12157 12158 int intel_dotclock_calculate(int link_freq, 12159 const struct intel_link_m_n *m_n) 12160 { 12161 /* 12162 * The calculation for the data clock is: 12163 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 12164 * But we want to avoid losing precison if possible, so: 12165 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 12166 * 12167 * and the link clock is simpler: 12168 * link_clock = (m * link_clock) / n 12169 */ 12170 12171 if (!m_n->link_n) 12172 return 0; 12173 12174 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); 12175 } 12176 12177 static void ilk_pch_clock_get(struct intel_crtc *crtc, 12178 struct intel_crtc_state *pipe_config) 12179 { 12180 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12181 12182 /* read out port_clock from the DPLL */ 12183 i9xx_crtc_clock_get(crtc, pipe_config); 12184 12185 /* 12186 * In case there is an active pipe without active ports, 12187 * we may need some idea for the dotclock anyway. 12188 * Calculate one based on the FDI configuration. 12189 */ 12190 pipe_config->hw.adjusted_mode.crtc_clock = 12191 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 12192 &pipe_config->fdi_m_n); 12193 } 12194 12195 static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state, 12196 struct intel_crtc *crtc) 12197 { 12198 memset(crtc_state, 0, sizeof(*crtc_state)); 12199 12200 __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base); 12201 12202 crtc_state->cpu_transcoder = INVALID_TRANSCODER; 12203 crtc_state->master_transcoder = INVALID_TRANSCODER; 12204 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 12205 crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID; 12206 crtc_state->scaler_state.scaler_id = -1; 12207 crtc_state->mst_master_transcoder = INVALID_TRANSCODER; 12208 } 12209 12210 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc) 12211 { 12212 struct intel_crtc_state *crtc_state; 12213 12214 crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL); 12215 12216 if (crtc_state) 12217 intel_crtc_state_reset(crtc_state, crtc); 12218 12219 return crtc_state; 12220 } 12221 12222 /* Returns the currently programmed mode of the given encoder. */ 12223 struct drm_display_mode * 12224 intel_encoder_current_mode(struct intel_encoder *encoder) 12225 { 12226 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 12227 struct intel_crtc_state *crtc_state; 12228 struct drm_display_mode *mode; 12229 struct intel_crtc *crtc; 12230 enum pipe pipe; 12231 12232 if (!encoder->get_hw_state(encoder, &pipe)) 12233 return NULL; 12234 12235 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 12236 12237 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 12238 if (!mode) 12239 return NULL; 12240 12241 crtc_state = intel_crtc_state_alloc(crtc); 12242 if (!crtc_state) { 12243 kfree(mode); 12244 return NULL; 12245 } 12246 12247 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) { 12248 kfree(crtc_state); 12249 kfree(mode); 12250 return NULL; 12251 } 12252 12253 encoder->get_config(encoder, crtc_state); 12254 12255 intel_mode_from_pipe_config(mode, crtc_state); 12256 12257 kfree(crtc_state); 12258 12259 return mode; 12260 } 12261 12262 static void intel_crtc_destroy(struct drm_crtc *crtc) 12263 { 12264 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12265 12266 drm_crtc_cleanup(crtc); 12267 kfree(intel_crtc); 12268 } 12269 12270 /** 12271 * intel_wm_need_update - Check whether watermarks need updating 12272 * @cur: current plane state 12273 * @new: new plane state 12274 * 12275 * Check current plane state versus the new one to determine whether 12276 * watermarks need to be recalculated. 12277 * 12278 * Returns true or false. 12279 */ 12280 static bool intel_wm_need_update(const struct intel_plane_state *cur, 12281 struct intel_plane_state *new) 12282 { 12283 /* Update watermarks on tiling or size changes. */ 12284 if (new->uapi.visible != cur->uapi.visible) 12285 return true; 12286 12287 if (!cur->hw.fb || !new->hw.fb) 12288 return false; 12289 12290 if (cur->hw.fb->modifier != new->hw.fb->modifier || 12291 cur->hw.rotation != new->hw.rotation || 12292 drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) || 12293 drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) || 12294 drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) || 12295 drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst)) 12296 return true; 12297 12298 return false; 12299 } 12300 12301 static bool needs_scaling(const struct intel_plane_state *state) 12302 { 12303 int src_w = drm_rect_width(&state->uapi.src) >> 16; 12304 int src_h = drm_rect_height(&state->uapi.src) >> 16; 12305 int dst_w = drm_rect_width(&state->uapi.dst); 12306 int dst_h = drm_rect_height(&state->uapi.dst); 12307 12308 return (src_w != dst_w || src_h != dst_h); 12309 } 12310 12311 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, 12312 struct intel_crtc_state *crtc_state, 12313 const struct intel_plane_state *old_plane_state, 12314 struct intel_plane_state *plane_state) 12315 { 12316 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 12317 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 12318 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12319 bool mode_changed = needs_modeset(crtc_state); 12320 bool was_crtc_enabled = old_crtc_state->hw.active; 12321 bool is_crtc_enabled = crtc_state->hw.active; 12322 bool turn_off, turn_on, visible, was_visible; 12323 int ret; 12324 12325 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { 12326 ret = skl_update_scaler_plane(crtc_state, plane_state); 12327 if (ret) 12328 return ret; 12329 } 12330 12331 was_visible = old_plane_state->uapi.visible; 12332 visible = plane_state->uapi.visible; 12333 12334 if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible)) 12335 was_visible = false; 12336 12337 /* 12338 * Visibility is calculated as if the crtc was on, but 12339 * after scaler setup everything depends on it being off 12340 * when the crtc isn't active. 12341 * 12342 * FIXME this is wrong for watermarks. Watermarks should also 12343 * be computed as if the pipe would be active. Perhaps move 12344 * per-plane wm computation to the .check_plane() hook, and 12345 * only combine the results from all planes in the current place? 12346 */ 12347 if (!is_crtc_enabled) { 12348 intel_plane_set_invisible(crtc_state, plane_state); 12349 visible = false; 12350 } 12351 12352 if (!was_visible && !visible) 12353 return 0; 12354 12355 turn_off = was_visible && (!visible || mode_changed); 12356 turn_on = visible && (!was_visible || mode_changed); 12357 12358 drm_dbg_atomic(&dev_priv->drm, 12359 "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", 12360 crtc->base.base.id, crtc->base.name, 12361 plane->base.base.id, plane->base.name, 12362 was_visible, visible, 12363 turn_off, turn_on, mode_changed); 12364 12365 if (turn_on) { 12366 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 12367 crtc_state->update_wm_pre = true; 12368 12369 /* must disable cxsr around plane enable/disable */ 12370 if (plane->id != PLANE_CURSOR) 12371 crtc_state->disable_cxsr = true; 12372 } else if (turn_off) { 12373 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 12374 crtc_state->update_wm_post = true; 12375 12376 /* must disable cxsr around plane enable/disable */ 12377 if (plane->id != PLANE_CURSOR) 12378 crtc_state->disable_cxsr = true; 12379 } else if (intel_wm_need_update(old_plane_state, plane_state)) { 12380 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) { 12381 /* FIXME bollocks */ 12382 crtc_state->update_wm_pre = true; 12383 crtc_state->update_wm_post = true; 12384 } 12385 } 12386 12387 if (visible || was_visible) 12388 crtc_state->fb_bits |= plane->frontbuffer_bit; 12389 12390 /* 12391 * ILK/SNB DVSACNTR/Sprite Enable 12392 * IVB SPR_CTL/Sprite Enable 12393 * "When in Self Refresh Big FIFO mode, a write to enable the 12394 * plane will be internally buffered and delayed while Big FIFO 12395 * mode is exiting." 12396 * 12397 * Which means that enabling the sprite can take an extra frame 12398 * when we start in big FIFO mode (LP1+). Thus we need to drop 12399 * down to LP0 and wait for vblank in order to make sure the 12400 * sprite gets enabled on the next vblank after the register write. 12401 * Doing otherwise would risk enabling the sprite one frame after 12402 * we've already signalled flip completion. We can resume LP1+ 12403 * once the sprite has been enabled. 12404 * 12405 * 12406 * WaCxSRDisabledForSpriteScaling:ivb 12407 * IVB SPR_SCALE/Scaling Enable 12408 * "Low Power watermarks must be disabled for at least one 12409 * frame before enabling sprite scaling, and kept disabled 12410 * until sprite scaling is disabled." 12411 * 12412 * ILK/SNB DVSASCALE/Scaling Enable 12413 * "When in Self Refresh Big FIFO mode, scaling enable will be 12414 * masked off while Big FIFO mode is exiting." 12415 * 12416 * Despite the w/a only being listed for IVB we assume that 12417 * the ILK/SNB note has similar ramifications, hence we apply 12418 * the w/a on all three platforms. 12419 * 12420 * With experimental results seems this is needed also for primary 12421 * plane, not only sprite plane. 12422 */ 12423 if (plane->id != PLANE_CURSOR && 12424 (IS_GEN_RANGE(dev_priv, 5, 6) || 12425 IS_IVYBRIDGE(dev_priv)) && 12426 (turn_on || (!needs_scaling(old_plane_state) && 12427 needs_scaling(plane_state)))) 12428 crtc_state->disable_lp_wm = true; 12429 12430 return 0; 12431 } 12432 12433 static bool encoders_cloneable(const struct intel_encoder *a, 12434 const struct intel_encoder *b) 12435 { 12436 /* masks could be asymmetric, so check both ways */ 12437 return a == b || (a->cloneable & (1 << b->type) && 12438 b->cloneable & (1 << a->type)); 12439 } 12440 12441 static bool check_single_encoder_cloning(struct drm_atomic_state *state, 12442 struct intel_crtc *crtc, 12443 struct intel_encoder *encoder) 12444 { 12445 struct intel_encoder *source_encoder; 12446 struct drm_connector *connector; 12447 struct drm_connector_state *connector_state; 12448 int i; 12449 12450 for_each_new_connector_in_state(state, connector, connector_state, i) { 12451 if (connector_state->crtc != &crtc->base) 12452 continue; 12453 12454 source_encoder = 12455 to_intel_encoder(connector_state->best_encoder); 12456 if (!encoders_cloneable(encoder, source_encoder)) 12457 return false; 12458 } 12459 12460 return true; 12461 } 12462 12463 static int icl_add_linked_planes(struct intel_atomic_state *state) 12464 { 12465 struct intel_plane *plane, *linked; 12466 struct intel_plane_state *plane_state, *linked_plane_state; 12467 int i; 12468 12469 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12470 linked = plane_state->planar_linked_plane; 12471 12472 if (!linked) 12473 continue; 12474 12475 linked_plane_state = intel_atomic_get_plane_state(state, linked); 12476 if (IS_ERR(linked_plane_state)) 12477 return PTR_ERR(linked_plane_state); 12478 12479 drm_WARN_ON(state->base.dev, 12480 linked_plane_state->planar_linked_plane != plane); 12481 drm_WARN_ON(state->base.dev, 12482 linked_plane_state->planar_slave == plane_state->planar_slave); 12483 } 12484 12485 return 0; 12486 } 12487 12488 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) 12489 { 12490 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 12491 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12492 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); 12493 struct intel_plane *plane, *linked; 12494 struct intel_plane_state *plane_state; 12495 int i; 12496 12497 if (INTEL_GEN(dev_priv) < 11) 12498 return 0; 12499 12500 /* 12501 * Destroy all old plane links and make the slave plane invisible 12502 * in the crtc_state->active_planes mask. 12503 */ 12504 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12505 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane) 12506 continue; 12507 12508 plane_state->planar_linked_plane = NULL; 12509 if (plane_state->planar_slave && !plane_state->uapi.visible) { 12510 crtc_state->active_planes &= ~BIT(plane->id); 12511 crtc_state->update_planes |= BIT(plane->id); 12512 } 12513 12514 plane_state->planar_slave = false; 12515 } 12516 12517 if (!crtc_state->nv12_planes) 12518 return 0; 12519 12520 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12521 struct intel_plane_state *linked_state = NULL; 12522 12523 if (plane->pipe != crtc->pipe || 12524 !(crtc_state->nv12_planes & BIT(plane->id))) 12525 continue; 12526 12527 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { 12528 if (!icl_is_nv12_y_plane(dev_priv, linked->id)) 12529 continue; 12530 12531 if (crtc_state->active_planes & BIT(linked->id)) 12532 continue; 12533 12534 linked_state = intel_atomic_get_plane_state(state, linked); 12535 if (IS_ERR(linked_state)) 12536 return PTR_ERR(linked_state); 12537 12538 break; 12539 } 12540 12541 if (!linked_state) { 12542 drm_dbg_kms(&dev_priv->drm, 12543 "Need %d free Y planes for planar YUV\n", 12544 hweight8(crtc_state->nv12_planes)); 12545 12546 return -EINVAL; 12547 } 12548 12549 plane_state->planar_linked_plane = linked; 12550 12551 linked_state->planar_slave = true; 12552 linked_state->planar_linked_plane = plane; 12553 crtc_state->active_planes |= BIT(linked->id); 12554 crtc_state->update_planes |= BIT(linked->id); 12555 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n", 12556 linked->base.name, plane->base.name); 12557 12558 /* Copy parameters to slave plane */ 12559 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; 12560 linked_state->color_ctl = plane_state->color_ctl; 12561 linked_state->view = plane_state->view; 12562 memcpy(linked_state->color_plane, plane_state->color_plane, 12563 sizeof(linked_state->color_plane)); 12564 12565 intel_plane_copy_uapi_to_hw_state(linked_state, plane_state); 12566 linked_state->uapi.src = plane_state->uapi.src; 12567 linked_state->uapi.dst = plane_state->uapi.dst; 12568 12569 if (icl_is_hdr_plane(dev_priv, plane->id)) { 12570 if (linked->id == PLANE_SPRITE5) 12571 plane_state->cus_ctl |= PLANE_CUS_PLANE_7; 12572 else if (linked->id == PLANE_SPRITE4) 12573 plane_state->cus_ctl |= PLANE_CUS_PLANE_6; 12574 else if (linked->id == PLANE_SPRITE3) 12575 plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL; 12576 else if (linked->id == PLANE_SPRITE2) 12577 plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL; 12578 else 12579 MISSING_CASE(linked->id); 12580 } 12581 } 12582 12583 return 0; 12584 } 12585 12586 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) 12587 { 12588 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 12589 struct intel_atomic_state *state = 12590 to_intel_atomic_state(new_crtc_state->uapi.state); 12591 const struct intel_crtc_state *old_crtc_state = 12592 intel_atomic_get_old_crtc_state(state, crtc); 12593 12594 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; 12595 } 12596 12597 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state) 12598 { 12599 const struct drm_display_mode *adjusted_mode = 12600 &crtc_state->hw.adjusted_mode; 12601 int linetime_wm; 12602 12603 if (!crtc_state->hw.enable) 12604 return 0; 12605 12606 linetime_wm = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 12607 adjusted_mode->crtc_clock); 12608 12609 return min(linetime_wm, 0x1ff); 12610 } 12611 12612 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, 12613 const struct intel_cdclk_state *cdclk_state) 12614 { 12615 const struct drm_display_mode *adjusted_mode = 12616 &crtc_state->hw.adjusted_mode; 12617 int linetime_wm; 12618 12619 if (!crtc_state->hw.enable) 12620 return 0; 12621 12622 linetime_wm = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 12623 cdclk_state->logical.cdclk); 12624 12625 return min(linetime_wm, 0x1ff); 12626 } 12627 12628 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) 12629 { 12630 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 12631 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12632 const struct drm_display_mode *adjusted_mode = 12633 &crtc_state->hw.adjusted_mode; 12634 int linetime_wm; 12635 12636 if (!crtc_state->hw.enable) 12637 return 0; 12638 12639 linetime_wm = DIV_ROUND_UP(adjusted_mode->crtc_htotal * 1000 * 8, 12640 crtc_state->pixel_rate); 12641 12642 /* Display WA #1135: BXT:ALL GLK:ALL */ 12643 if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled) 12644 linetime_wm /= 2; 12645 12646 return min(linetime_wm, 0x1ff); 12647 } 12648 12649 static int hsw_compute_linetime_wm(struct intel_atomic_state *state, 12650 struct intel_crtc *crtc) 12651 { 12652 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12653 struct intel_crtc_state *crtc_state = 12654 intel_atomic_get_new_crtc_state(state, crtc); 12655 const struct intel_cdclk_state *cdclk_state; 12656 12657 if (INTEL_GEN(dev_priv) >= 9) 12658 crtc_state->linetime = skl_linetime_wm(crtc_state); 12659 else 12660 crtc_state->linetime = hsw_linetime_wm(crtc_state); 12661 12662 if (!hsw_crtc_supports_ips(crtc)) 12663 return 0; 12664 12665 cdclk_state = intel_atomic_get_cdclk_state(state); 12666 if (IS_ERR(cdclk_state)) 12667 return PTR_ERR(cdclk_state); 12668 12669 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state, 12670 cdclk_state); 12671 12672 return 0; 12673 } 12674 12675 static int intel_crtc_atomic_check(struct intel_atomic_state *state, 12676 struct intel_crtc *crtc) 12677 { 12678 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12679 struct intel_crtc_state *crtc_state = 12680 intel_atomic_get_new_crtc_state(state, crtc); 12681 bool mode_changed = needs_modeset(crtc_state); 12682 int ret; 12683 12684 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) && 12685 mode_changed && !crtc_state->hw.active) 12686 crtc_state->update_wm_post = true; 12687 12688 if (mode_changed && crtc_state->hw.enable && 12689 dev_priv->display.crtc_compute_clock && 12690 !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) { 12691 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state); 12692 if (ret) 12693 return ret; 12694 } 12695 12696 /* 12697 * May need to update pipe gamma enable bits 12698 * when C8 planes are getting enabled/disabled. 12699 */ 12700 if (c8_planes_changed(crtc_state)) 12701 crtc_state->uapi.color_mgmt_changed = true; 12702 12703 if (mode_changed || crtc_state->update_pipe || 12704 crtc_state->uapi.color_mgmt_changed) { 12705 ret = intel_color_check(crtc_state); 12706 if (ret) 12707 return ret; 12708 } 12709 12710 if (dev_priv->display.compute_pipe_wm) { 12711 ret = dev_priv->display.compute_pipe_wm(crtc_state); 12712 if (ret) { 12713 drm_dbg_kms(&dev_priv->drm, 12714 "Target pipe watermarks are invalid\n"); 12715 return ret; 12716 } 12717 } 12718 12719 if (dev_priv->display.compute_intermediate_wm) { 12720 if (drm_WARN_ON(&dev_priv->drm, 12721 !dev_priv->display.compute_pipe_wm)) 12722 return 0; 12723 12724 /* 12725 * Calculate 'intermediate' watermarks that satisfy both the 12726 * old state and the new state. We can program these 12727 * immediately. 12728 */ 12729 ret = dev_priv->display.compute_intermediate_wm(crtc_state); 12730 if (ret) { 12731 drm_dbg_kms(&dev_priv->drm, 12732 "No valid intermediate pipe watermarks are possible\n"); 12733 return ret; 12734 } 12735 } 12736 12737 if (INTEL_GEN(dev_priv) >= 9) { 12738 if (mode_changed || crtc_state->update_pipe) { 12739 ret = skl_update_scaler_crtc(crtc_state); 12740 if (ret) 12741 return ret; 12742 } 12743 12744 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state); 12745 if (ret) 12746 return ret; 12747 } 12748 12749 if (HAS_IPS(dev_priv)) { 12750 ret = hsw_compute_ips_config(crtc_state); 12751 if (ret) 12752 return ret; 12753 } 12754 12755 if (INTEL_GEN(dev_priv) >= 9 || 12756 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 12757 ret = hsw_compute_linetime_wm(state, crtc); 12758 if (ret) 12759 return ret; 12760 12761 } 12762 12763 return 0; 12764 } 12765 12766 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 12767 { 12768 struct intel_connector *connector; 12769 struct drm_connector_list_iter conn_iter; 12770 12771 drm_connector_list_iter_begin(dev, &conn_iter); 12772 for_each_intel_connector_iter(connector, &conn_iter) { 12773 if (connector->base.state->crtc) 12774 drm_connector_put(&connector->base); 12775 12776 if (connector->base.encoder) { 12777 connector->base.state->best_encoder = 12778 connector->base.encoder; 12779 connector->base.state->crtc = 12780 connector->base.encoder->crtc; 12781 12782 drm_connector_get(&connector->base); 12783 } else { 12784 connector->base.state->best_encoder = NULL; 12785 connector->base.state->crtc = NULL; 12786 } 12787 } 12788 drm_connector_list_iter_end(&conn_iter); 12789 } 12790 12791 static int 12792 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 12793 struct intel_crtc_state *pipe_config) 12794 { 12795 struct drm_connector *connector = conn_state->connector; 12796 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); 12797 const struct drm_display_info *info = &connector->display_info; 12798 int bpp; 12799 12800 switch (conn_state->max_bpc) { 12801 case 6 ... 7: 12802 bpp = 6 * 3; 12803 break; 12804 case 8 ... 9: 12805 bpp = 8 * 3; 12806 break; 12807 case 10 ... 11: 12808 bpp = 10 * 3; 12809 break; 12810 case 12: 12811 bpp = 12 * 3; 12812 break; 12813 default: 12814 return -EINVAL; 12815 } 12816 12817 if (bpp < pipe_config->pipe_bpp) { 12818 drm_dbg_kms(&i915->drm, 12819 "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of " 12820 "EDID bpp %d, requested bpp %d, max platform bpp %d\n", 12821 connector->base.id, connector->name, 12822 bpp, 3 * info->bpc, 12823 3 * conn_state->max_requested_bpc, 12824 pipe_config->pipe_bpp); 12825 12826 pipe_config->pipe_bpp = bpp; 12827 } 12828 12829 return 0; 12830 } 12831 12832 static int 12833 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 12834 struct intel_crtc_state *pipe_config) 12835 { 12836 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12837 struct drm_atomic_state *state = pipe_config->uapi.state; 12838 struct drm_connector *connector; 12839 struct drm_connector_state *connector_state; 12840 int bpp, i; 12841 12842 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 12843 IS_CHERRYVIEW(dev_priv))) 12844 bpp = 10*3; 12845 else if (INTEL_GEN(dev_priv) >= 5) 12846 bpp = 12*3; 12847 else 12848 bpp = 8*3; 12849 12850 pipe_config->pipe_bpp = bpp; 12851 12852 /* Clamp display bpp to connector max bpp */ 12853 for_each_new_connector_in_state(state, connector, connector_state, i) { 12854 int ret; 12855 12856 if (connector_state->crtc != &crtc->base) 12857 continue; 12858 12859 ret = compute_sink_pipe_bpp(connector_state, pipe_config); 12860 if (ret) 12861 return ret; 12862 } 12863 12864 return 0; 12865 } 12866 12867 static void intel_dump_crtc_timings(struct drm_i915_private *i915, 12868 const struct drm_display_mode *mode) 12869 { 12870 drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, " 12871 "type: 0x%x flags: 0x%x\n", 12872 mode->crtc_clock, 12873 mode->crtc_hdisplay, mode->crtc_hsync_start, 12874 mode->crtc_hsync_end, mode->crtc_htotal, 12875 mode->crtc_vdisplay, mode->crtc_vsync_start, 12876 mode->crtc_vsync_end, mode->crtc_vtotal, 12877 mode->type, mode->flags); 12878 } 12879 12880 static void 12881 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config, 12882 const char *id, unsigned int lane_count, 12883 const struct intel_link_m_n *m_n) 12884 { 12885 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); 12886 12887 drm_dbg_kms(&i915->drm, 12888 "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 12889 id, lane_count, 12890 m_n->gmch_m, m_n->gmch_n, 12891 m_n->link_m, m_n->link_n, m_n->tu); 12892 } 12893 12894 static void 12895 intel_dump_infoframe(struct drm_i915_private *dev_priv, 12896 const union hdmi_infoframe *frame) 12897 { 12898 if (!drm_debug_enabled(DRM_UT_KMS)) 12899 return; 12900 12901 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame); 12902 } 12903 12904 static void 12905 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv, 12906 const struct drm_dp_vsc_sdp *vsc) 12907 { 12908 if (!drm_debug_enabled(DRM_UT_KMS)) 12909 return; 12910 12911 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc); 12912 } 12913 12914 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x 12915 12916 static const char * const output_type_str[] = { 12917 OUTPUT_TYPE(UNUSED), 12918 OUTPUT_TYPE(ANALOG), 12919 OUTPUT_TYPE(DVO), 12920 OUTPUT_TYPE(SDVO), 12921 OUTPUT_TYPE(LVDS), 12922 OUTPUT_TYPE(TVOUT), 12923 OUTPUT_TYPE(HDMI), 12924 OUTPUT_TYPE(DP), 12925 OUTPUT_TYPE(EDP), 12926 OUTPUT_TYPE(DSI), 12927 OUTPUT_TYPE(DDI), 12928 OUTPUT_TYPE(DP_MST), 12929 }; 12930 12931 #undef OUTPUT_TYPE 12932 12933 static void snprintf_output_types(char *buf, size_t len, 12934 unsigned int output_types) 12935 { 12936 char *str = buf; 12937 int i; 12938 12939 str[0] = '\0'; 12940 12941 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) { 12942 int r; 12943 12944 if ((output_types & BIT(i)) == 0) 12945 continue; 12946 12947 r = snprintf(str, len, "%s%s", 12948 str != buf ? "," : "", output_type_str[i]); 12949 if (r >= len) 12950 break; 12951 str += r; 12952 len -= r; 12953 12954 output_types &= ~BIT(i); 12955 } 12956 12957 WARN_ON_ONCE(output_types != 0); 12958 } 12959 12960 static const char * const output_format_str[] = { 12961 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid", 12962 [INTEL_OUTPUT_FORMAT_RGB] = "RGB", 12963 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0", 12964 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4", 12965 }; 12966 12967 static const char *output_formats(enum intel_output_format format) 12968 { 12969 if (format >= ARRAY_SIZE(output_format_str)) 12970 format = INTEL_OUTPUT_FORMAT_INVALID; 12971 return output_format_str[format]; 12972 } 12973 12974 static void intel_dump_plane_state(const struct intel_plane_state *plane_state) 12975 { 12976 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 12977 struct drm_i915_private *i915 = to_i915(plane->base.dev); 12978 const struct drm_framebuffer *fb = plane_state->hw.fb; 12979 struct drm_format_name_buf format_name; 12980 12981 if (!fb) { 12982 drm_dbg_kms(&i915->drm, 12983 "[PLANE:%d:%s] fb: [NOFB], visible: %s\n", 12984 plane->base.base.id, plane->base.name, 12985 yesno(plane_state->uapi.visible)); 12986 return; 12987 } 12988 12989 drm_dbg_kms(&i915->drm, 12990 "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n", 12991 plane->base.base.id, plane->base.name, 12992 fb->base.id, fb->width, fb->height, 12993 drm_get_format_name(fb->format->format, &format_name), 12994 yesno(plane_state->uapi.visible)); 12995 drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n", 12996 plane_state->hw.rotation, plane_state->scaler_id); 12997 if (plane_state->uapi.visible) 12998 drm_dbg_kms(&i915->drm, 12999 "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n", 13000 DRM_RECT_FP_ARG(&plane_state->uapi.src), 13001 DRM_RECT_ARG(&plane_state->uapi.dst)); 13002 } 13003 13004 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, 13005 struct intel_atomic_state *state, 13006 const char *context) 13007 { 13008 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 13009 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13010 const struct intel_plane_state *plane_state; 13011 struct intel_plane *plane; 13012 char buf[64]; 13013 int i; 13014 13015 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n", 13016 crtc->base.base.id, crtc->base.name, 13017 yesno(pipe_config->hw.enable), context); 13018 13019 if (!pipe_config->hw.enable) 13020 goto dump_planes; 13021 13022 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types); 13023 drm_dbg_kms(&dev_priv->drm, 13024 "active: %s, output_types: %s (0x%x), output format: %s\n", 13025 yesno(pipe_config->hw.active), 13026 buf, pipe_config->output_types, 13027 output_formats(pipe_config->output_format)); 13028 13029 drm_dbg_kms(&dev_priv->drm, 13030 "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", 13031 transcoder_name(pipe_config->cpu_transcoder), 13032 pipe_config->pipe_bpp, pipe_config->dither); 13033 13034 drm_dbg_kms(&dev_priv->drm, 13035 "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n", 13036 transcoder_name(pipe_config->master_transcoder), 13037 pipe_config->sync_mode_slaves_mask); 13038 13039 if (pipe_config->has_pch_encoder) 13040 intel_dump_m_n_config(pipe_config, "fdi", 13041 pipe_config->fdi_lanes, 13042 &pipe_config->fdi_m_n); 13043 13044 if (intel_crtc_has_dp_encoder(pipe_config)) { 13045 intel_dump_m_n_config(pipe_config, "dp m_n", 13046 pipe_config->lane_count, &pipe_config->dp_m_n); 13047 if (pipe_config->has_drrs) 13048 intel_dump_m_n_config(pipe_config, "dp m2_n2", 13049 pipe_config->lane_count, 13050 &pipe_config->dp_m2_n2); 13051 } 13052 13053 drm_dbg_kms(&dev_priv->drm, 13054 "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n", 13055 pipe_config->has_audio, pipe_config->has_infoframe, 13056 pipe_config->infoframes.enable); 13057 13058 if (pipe_config->infoframes.enable & 13059 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) 13060 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n", 13061 pipe_config->infoframes.gcp); 13062 if (pipe_config->infoframes.enable & 13063 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) 13064 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi); 13065 if (pipe_config->infoframes.enable & 13066 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD)) 13067 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd); 13068 if (pipe_config->infoframes.enable & 13069 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR)) 13070 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi); 13071 if (pipe_config->infoframes.enable & 13072 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM)) 13073 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm); 13074 if (pipe_config->infoframes.enable & 13075 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA)) 13076 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm); 13077 if (pipe_config->infoframes.enable & 13078 intel_hdmi_infoframe_enable(DP_SDP_VSC)) 13079 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc); 13080 13081 drm_dbg_kms(&dev_priv->drm, "requested mode:\n"); 13082 drm_mode_debug_printmodeline(&pipe_config->hw.mode); 13083 drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n"); 13084 drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode); 13085 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode); 13086 drm_dbg_kms(&dev_priv->drm, 13087 "port clock: %d, pipe src size: %dx%d, pixel rate %d\n", 13088 pipe_config->port_clock, 13089 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 13090 pipe_config->pixel_rate); 13091 13092 drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n", 13093 pipe_config->linetime, pipe_config->ips_linetime); 13094 13095 if (INTEL_GEN(dev_priv) >= 9) 13096 drm_dbg_kms(&dev_priv->drm, 13097 "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 13098 crtc->num_scalers, 13099 pipe_config->scaler_state.scaler_users, 13100 pipe_config->scaler_state.scaler_id); 13101 13102 if (HAS_GMCH(dev_priv)) 13103 drm_dbg_kms(&dev_priv->drm, 13104 "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 13105 pipe_config->gmch_pfit.control, 13106 pipe_config->gmch_pfit.pgm_ratios, 13107 pipe_config->gmch_pfit.lvds_border_bits); 13108 else 13109 drm_dbg_kms(&dev_priv->drm, 13110 "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n", 13111 DRM_RECT_ARG(&pipe_config->pch_pfit.dst), 13112 enableddisabled(pipe_config->pch_pfit.enabled), 13113 yesno(pipe_config->pch_pfit.force_thru)); 13114 13115 drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n", 13116 pipe_config->ips_enabled, pipe_config->double_wide); 13117 13118 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state); 13119 13120 if (IS_CHERRYVIEW(dev_priv)) 13121 drm_dbg_kms(&dev_priv->drm, 13122 "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", 13123 pipe_config->cgm_mode, pipe_config->gamma_mode, 13124 pipe_config->gamma_enable, pipe_config->csc_enable); 13125 else 13126 drm_dbg_kms(&dev_priv->drm, 13127 "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", 13128 pipe_config->csc_mode, pipe_config->gamma_mode, 13129 pipe_config->gamma_enable, pipe_config->csc_enable); 13130 13131 drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n", 13132 transcoder_name(pipe_config->mst_master_transcoder)); 13133 13134 dump_planes: 13135 if (!state) 13136 return; 13137 13138 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 13139 if (plane->pipe == crtc->pipe) 13140 intel_dump_plane_state(plane_state); 13141 } 13142 } 13143 13144 static bool check_digital_port_conflicts(struct intel_atomic_state *state) 13145 { 13146 struct drm_device *dev = state->base.dev; 13147 struct drm_connector *connector; 13148 struct drm_connector_list_iter conn_iter; 13149 unsigned int used_ports = 0; 13150 unsigned int used_mst_ports = 0; 13151 bool ret = true; 13152 13153 /* 13154 * We're going to peek into connector->state, 13155 * hence connection_mutex must be held. 13156 */ 13157 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex); 13158 13159 /* 13160 * Walk the connector list instead of the encoder 13161 * list to detect the problem on ddi platforms 13162 * where there's just one encoder per digital port. 13163 */ 13164 drm_connector_list_iter_begin(dev, &conn_iter); 13165 drm_for_each_connector_iter(connector, &conn_iter) { 13166 struct drm_connector_state *connector_state; 13167 struct intel_encoder *encoder; 13168 13169 connector_state = 13170 drm_atomic_get_new_connector_state(&state->base, 13171 connector); 13172 if (!connector_state) 13173 connector_state = connector->state; 13174 13175 if (!connector_state->best_encoder) 13176 continue; 13177 13178 encoder = to_intel_encoder(connector_state->best_encoder); 13179 13180 drm_WARN_ON(dev, !connector_state->crtc); 13181 13182 switch (encoder->type) { 13183 case INTEL_OUTPUT_DDI: 13184 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev)))) 13185 break; 13186 fallthrough; 13187 case INTEL_OUTPUT_DP: 13188 case INTEL_OUTPUT_HDMI: 13189 case INTEL_OUTPUT_EDP: 13190 /* the same port mustn't appear more than once */ 13191 if (used_ports & BIT(encoder->port)) 13192 ret = false; 13193 13194 used_ports |= BIT(encoder->port); 13195 break; 13196 case INTEL_OUTPUT_DP_MST: 13197 used_mst_ports |= 13198 1 << encoder->port; 13199 break; 13200 default: 13201 break; 13202 } 13203 } 13204 drm_connector_list_iter_end(&conn_iter); 13205 13206 /* can't mix MST and SST/HDMI on the same port */ 13207 if (used_ports & used_mst_ports) 13208 return false; 13209 13210 return ret; 13211 } 13212 13213 static void 13214 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state) 13215 { 13216 intel_crtc_copy_color_blobs(crtc_state); 13217 } 13218 13219 static void 13220 intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state) 13221 { 13222 crtc_state->hw.enable = crtc_state->uapi.enable; 13223 crtc_state->hw.active = crtc_state->uapi.active; 13224 crtc_state->hw.mode = crtc_state->uapi.mode; 13225 crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode; 13226 intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state); 13227 } 13228 13229 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state) 13230 { 13231 crtc_state->uapi.enable = crtc_state->hw.enable; 13232 crtc_state->uapi.active = crtc_state->hw.active; 13233 drm_WARN_ON(crtc_state->uapi.crtc->dev, 13234 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0); 13235 13236 crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode; 13237 13238 /* copy color blobs to uapi */ 13239 drm_property_replace_blob(&crtc_state->uapi.degamma_lut, 13240 crtc_state->hw.degamma_lut); 13241 drm_property_replace_blob(&crtc_state->uapi.gamma_lut, 13242 crtc_state->hw.gamma_lut); 13243 drm_property_replace_blob(&crtc_state->uapi.ctm, 13244 crtc_state->hw.ctm); 13245 } 13246 13247 static int 13248 intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state) 13249 { 13250 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 13251 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13252 struct intel_crtc_state *saved_state; 13253 13254 saved_state = intel_crtc_state_alloc(crtc); 13255 if (!saved_state) 13256 return -ENOMEM; 13257 13258 /* free the old crtc_state->hw members */ 13259 intel_crtc_free_hw_state(crtc_state); 13260 13261 /* FIXME: before the switch to atomic started, a new pipe_config was 13262 * kzalloc'd. Code that depends on any field being zero should be 13263 * fixed, so that the crtc_state can be safely duplicated. For now, 13264 * only fields that are know to not cause problems are preserved. */ 13265 13266 saved_state->uapi = crtc_state->uapi; 13267 saved_state->scaler_state = crtc_state->scaler_state; 13268 saved_state->shared_dpll = crtc_state->shared_dpll; 13269 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 13270 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 13271 sizeof(saved_state->icl_port_dplls)); 13272 saved_state->crc_enabled = crtc_state->crc_enabled; 13273 if (IS_G4X(dev_priv) || 13274 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 13275 saved_state->wm = crtc_state->wm; 13276 13277 memcpy(crtc_state, saved_state, sizeof(*crtc_state)); 13278 kfree(saved_state); 13279 13280 intel_crtc_copy_uapi_to_hw_state(crtc_state); 13281 13282 return 0; 13283 } 13284 13285 static int 13286 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config) 13287 { 13288 struct drm_crtc *crtc = pipe_config->uapi.crtc; 13289 struct drm_atomic_state *state = pipe_config->uapi.state; 13290 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); 13291 struct drm_connector *connector; 13292 struct drm_connector_state *connector_state; 13293 int base_bpp, ret, i; 13294 bool retry = true; 13295 13296 pipe_config->cpu_transcoder = 13297 (enum transcoder) to_intel_crtc(crtc)->pipe; 13298 13299 /* 13300 * Sanitize sync polarity flags based on requested ones. If neither 13301 * positive or negative polarity is requested, treat this as meaning 13302 * negative polarity. 13303 */ 13304 if (!(pipe_config->hw.adjusted_mode.flags & 13305 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 13306 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 13307 13308 if (!(pipe_config->hw.adjusted_mode.flags & 13309 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 13310 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 13311 13312 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 13313 pipe_config); 13314 if (ret) 13315 return ret; 13316 13317 base_bpp = pipe_config->pipe_bpp; 13318 13319 /* 13320 * Determine the real pipe dimensions. Note that stereo modes can 13321 * increase the actual pipe size due to the frame doubling and 13322 * insertion of additional space for blanks between the frame. This 13323 * is stored in the crtc timings. We use the requested mode to do this 13324 * computation to clearly distinguish it from the adjusted mode, which 13325 * can be changed by the connectors in the below retry loop. 13326 */ 13327 drm_mode_get_hv_timing(&pipe_config->hw.mode, 13328 &pipe_config->pipe_src_w, 13329 &pipe_config->pipe_src_h); 13330 13331 for_each_new_connector_in_state(state, connector, connector_state, i) { 13332 struct intel_encoder *encoder = 13333 to_intel_encoder(connector_state->best_encoder); 13334 13335 if (connector_state->crtc != crtc) 13336 continue; 13337 13338 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { 13339 drm_dbg_kms(&i915->drm, 13340 "rejecting invalid cloning configuration\n"); 13341 return -EINVAL; 13342 } 13343 13344 /* 13345 * Determine output_types before calling the .compute_config() 13346 * hooks so that the hooks can use this information safely. 13347 */ 13348 if (encoder->compute_output_type) 13349 pipe_config->output_types |= 13350 BIT(encoder->compute_output_type(encoder, pipe_config, 13351 connector_state)); 13352 else 13353 pipe_config->output_types |= BIT(encoder->type); 13354 } 13355 13356 encoder_retry: 13357 /* Ensure the port clock defaults are reset when retrying. */ 13358 pipe_config->port_clock = 0; 13359 pipe_config->pixel_multiplier = 1; 13360 13361 /* Fill in default crtc timings, allow encoders to overwrite them. */ 13362 drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode, 13363 CRTC_STEREO_DOUBLE); 13364 13365 /* Pass our mode to the connectors and the CRTC to give them a chance to 13366 * adjust it according to limitations or connector properties, and also 13367 * a chance to reject the mode entirely. 13368 */ 13369 for_each_new_connector_in_state(state, connector, connector_state, i) { 13370 struct intel_encoder *encoder = 13371 to_intel_encoder(connector_state->best_encoder); 13372 13373 if (connector_state->crtc != crtc) 13374 continue; 13375 13376 ret = encoder->compute_config(encoder, pipe_config, 13377 connector_state); 13378 if (ret < 0) { 13379 if (ret != -EDEADLK) 13380 drm_dbg_kms(&i915->drm, 13381 "Encoder config failure: %d\n", 13382 ret); 13383 return ret; 13384 } 13385 } 13386 13387 /* Set default port clock if not overwritten by the encoder. Needs to be 13388 * done afterwards in case the encoder adjusts the mode. */ 13389 if (!pipe_config->port_clock) 13390 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock 13391 * pipe_config->pixel_multiplier; 13392 13393 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 13394 if (ret == -EDEADLK) 13395 return ret; 13396 if (ret < 0) { 13397 drm_dbg_kms(&i915->drm, "CRTC fixup failed\n"); 13398 return ret; 13399 } 13400 13401 if (ret == RETRY) { 13402 if (drm_WARN(&i915->drm, !retry, 13403 "loop in pipe configuration computation\n")) 13404 return -EINVAL; 13405 13406 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n"); 13407 retry = false; 13408 goto encoder_retry; 13409 } 13410 13411 /* Dithering seems to not pass-through bits correctly when it should, so 13412 * only enable it on 6bpc panels and when its not a compliance 13413 * test requesting 6bpc video pattern. 13414 */ 13415 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) && 13416 !pipe_config->dither_force_disable; 13417 drm_dbg_kms(&i915->drm, 13418 "hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 13419 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 13420 13421 /* 13422 * Make drm_calc_timestamping_constants in 13423 * drm_atomic_helper_update_legacy_modeset_state() happy 13424 */ 13425 pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode; 13426 13427 return 0; 13428 } 13429 13430 static int 13431 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state) 13432 { 13433 struct intel_atomic_state *state = 13434 to_intel_atomic_state(crtc_state->uapi.state); 13435 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 13436 struct drm_connector_state *conn_state; 13437 struct drm_connector *connector; 13438 int i; 13439 13440 for_each_new_connector_in_state(&state->base, connector, 13441 conn_state, i) { 13442 struct intel_encoder *encoder = 13443 to_intel_encoder(conn_state->best_encoder); 13444 int ret; 13445 13446 if (conn_state->crtc != &crtc->base || 13447 !encoder->compute_config_late) 13448 continue; 13449 13450 ret = encoder->compute_config_late(encoder, crtc_state, 13451 conn_state); 13452 if (ret) 13453 return ret; 13454 } 13455 13456 return 0; 13457 } 13458 13459 bool intel_fuzzy_clock_check(int clock1, int clock2) 13460 { 13461 int diff; 13462 13463 if (clock1 == clock2) 13464 return true; 13465 13466 if (!clock1 || !clock2) 13467 return false; 13468 13469 diff = abs(clock1 - clock2); 13470 13471 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 13472 return true; 13473 13474 return false; 13475 } 13476 13477 static bool 13478 intel_compare_m_n(unsigned int m, unsigned int n, 13479 unsigned int m2, unsigned int n2, 13480 bool exact) 13481 { 13482 if (m == m2 && n == n2) 13483 return true; 13484 13485 if (exact || !m || !n || !m2 || !n2) 13486 return false; 13487 13488 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 13489 13490 if (n > n2) { 13491 while (n > n2) { 13492 m2 <<= 1; 13493 n2 <<= 1; 13494 } 13495 } else if (n < n2) { 13496 while (n < n2) { 13497 m <<= 1; 13498 n <<= 1; 13499 } 13500 } 13501 13502 if (n != n2) 13503 return false; 13504 13505 return intel_fuzzy_clock_check(m, m2); 13506 } 13507 13508 static bool 13509 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 13510 const struct intel_link_m_n *m2_n2, 13511 bool exact) 13512 { 13513 return m_n->tu == m2_n2->tu && 13514 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, 13515 m2_n2->gmch_m, m2_n2->gmch_n, exact) && 13516 intel_compare_m_n(m_n->link_m, m_n->link_n, 13517 m2_n2->link_m, m2_n2->link_n, exact); 13518 } 13519 13520 static bool 13521 intel_compare_infoframe(const union hdmi_infoframe *a, 13522 const union hdmi_infoframe *b) 13523 { 13524 return memcmp(a, b, sizeof(*a)) == 0; 13525 } 13526 13527 static bool 13528 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a, 13529 const struct drm_dp_vsc_sdp *b) 13530 { 13531 return memcmp(a, b, sizeof(*a)) == 0; 13532 } 13533 13534 static void 13535 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, 13536 bool fastset, const char *name, 13537 const union hdmi_infoframe *a, 13538 const union hdmi_infoframe *b) 13539 { 13540 if (fastset) { 13541 if (!drm_debug_enabled(DRM_UT_KMS)) 13542 return; 13543 13544 drm_dbg_kms(&dev_priv->drm, 13545 "fastset mismatch in %s infoframe\n", name); 13546 drm_dbg_kms(&dev_priv->drm, "expected:\n"); 13547 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); 13548 drm_dbg_kms(&dev_priv->drm, "found:\n"); 13549 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); 13550 } else { 13551 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name); 13552 drm_err(&dev_priv->drm, "expected:\n"); 13553 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); 13554 drm_err(&dev_priv->drm, "found:\n"); 13555 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); 13556 } 13557 } 13558 13559 static void 13560 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv, 13561 bool fastset, const char *name, 13562 const struct drm_dp_vsc_sdp *a, 13563 const struct drm_dp_vsc_sdp *b) 13564 { 13565 if (fastset) { 13566 if (!drm_debug_enabled(DRM_UT_KMS)) 13567 return; 13568 13569 drm_dbg_kms(&dev_priv->drm, 13570 "fastset mismatch in %s dp sdp\n", name); 13571 drm_dbg_kms(&dev_priv->drm, "expected:\n"); 13572 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a); 13573 drm_dbg_kms(&dev_priv->drm, "found:\n"); 13574 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b); 13575 } else { 13576 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name); 13577 drm_err(&dev_priv->drm, "expected:\n"); 13578 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a); 13579 drm_err(&dev_priv->drm, "found:\n"); 13580 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b); 13581 } 13582 } 13583 13584 static void __printf(4, 5) 13585 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, 13586 const char *name, const char *format, ...) 13587 { 13588 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 13589 struct va_format vaf; 13590 va_list args; 13591 13592 va_start(args, format); 13593 vaf.fmt = format; 13594 vaf.va = &args; 13595 13596 if (fastset) 13597 drm_dbg_kms(&i915->drm, 13598 "[CRTC:%d:%s] fastset mismatch in %s %pV\n", 13599 crtc->base.base.id, crtc->base.name, name, &vaf); 13600 else 13601 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n", 13602 crtc->base.base.id, crtc->base.name, name, &vaf); 13603 13604 va_end(args); 13605 } 13606 13607 static bool fastboot_enabled(struct drm_i915_private *dev_priv) 13608 { 13609 if (dev_priv->params.fastboot != -1) 13610 return dev_priv->params.fastboot; 13611 13612 /* Enable fastboot by default on Skylake and newer */ 13613 if (INTEL_GEN(dev_priv) >= 9) 13614 return true; 13615 13616 /* Enable fastboot by default on VLV and CHV */ 13617 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 13618 return true; 13619 13620 /* Disabled by default on all others */ 13621 return false; 13622 } 13623 13624 static bool 13625 intel_pipe_config_compare(const struct intel_crtc_state *current_config, 13626 const struct intel_crtc_state *pipe_config, 13627 bool fastset) 13628 { 13629 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev); 13630 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 13631 bool ret = true; 13632 u32 bp_gamma = 0; 13633 bool fixup_inherited = fastset && 13634 current_config->inherited && !pipe_config->inherited; 13635 13636 if (fixup_inherited && !fastboot_enabled(dev_priv)) { 13637 drm_dbg_kms(&dev_priv->drm, 13638 "initial modeset and fastboot not set\n"); 13639 ret = false; 13640 } 13641 13642 #define PIPE_CONF_CHECK_X(name) do { \ 13643 if (current_config->name != pipe_config->name) { \ 13644 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13645 "(expected 0x%08x, found 0x%08x)", \ 13646 current_config->name, \ 13647 pipe_config->name); \ 13648 ret = false; \ 13649 } \ 13650 } while (0) 13651 13652 #define PIPE_CONF_CHECK_I(name) do { \ 13653 if (current_config->name != pipe_config->name) { \ 13654 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13655 "(expected %i, found %i)", \ 13656 current_config->name, \ 13657 pipe_config->name); \ 13658 ret = false; \ 13659 } \ 13660 } while (0) 13661 13662 #define PIPE_CONF_CHECK_BOOL(name) do { \ 13663 if (current_config->name != pipe_config->name) { \ 13664 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13665 "(expected %s, found %s)", \ 13666 yesno(current_config->name), \ 13667 yesno(pipe_config->name)); \ 13668 ret = false; \ 13669 } \ 13670 } while (0) 13671 13672 /* 13673 * Checks state where we only read out the enabling, but not the entire 13674 * state itself (like full infoframes or ELD for audio). These states 13675 * require a full modeset on bootup to fix up. 13676 */ 13677 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \ 13678 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \ 13679 PIPE_CONF_CHECK_BOOL(name); \ 13680 } else { \ 13681 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13682 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \ 13683 yesno(current_config->name), \ 13684 yesno(pipe_config->name)); \ 13685 ret = false; \ 13686 } \ 13687 } while (0) 13688 13689 #define PIPE_CONF_CHECK_P(name) do { \ 13690 if (current_config->name != pipe_config->name) { \ 13691 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13692 "(expected %p, found %p)", \ 13693 current_config->name, \ 13694 pipe_config->name); \ 13695 ret = false; \ 13696 } \ 13697 } while (0) 13698 13699 #define PIPE_CONF_CHECK_M_N(name) do { \ 13700 if (!intel_compare_link_m_n(¤t_config->name, \ 13701 &pipe_config->name,\ 13702 !fastset)) { \ 13703 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13704 "(expected tu %i gmch %i/%i link %i/%i, " \ 13705 "found tu %i, gmch %i/%i link %i/%i)", \ 13706 current_config->name.tu, \ 13707 current_config->name.gmch_m, \ 13708 current_config->name.gmch_n, \ 13709 current_config->name.link_m, \ 13710 current_config->name.link_n, \ 13711 pipe_config->name.tu, \ 13712 pipe_config->name.gmch_m, \ 13713 pipe_config->name.gmch_n, \ 13714 pipe_config->name.link_m, \ 13715 pipe_config->name.link_n); \ 13716 ret = false; \ 13717 } \ 13718 } while (0) 13719 13720 /* This is required for BDW+ where there is only one set of registers for 13721 * switching between high and low RR. 13722 * This macro can be used whenever a comparison has to be made between one 13723 * hw state and multiple sw state variables. 13724 */ 13725 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \ 13726 if (!intel_compare_link_m_n(¤t_config->name, \ 13727 &pipe_config->name, !fastset) && \ 13728 !intel_compare_link_m_n(¤t_config->alt_name, \ 13729 &pipe_config->name, !fastset)) { \ 13730 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13731 "(expected tu %i gmch %i/%i link %i/%i, " \ 13732 "or tu %i gmch %i/%i link %i/%i, " \ 13733 "found tu %i, gmch %i/%i link %i/%i)", \ 13734 current_config->name.tu, \ 13735 current_config->name.gmch_m, \ 13736 current_config->name.gmch_n, \ 13737 current_config->name.link_m, \ 13738 current_config->name.link_n, \ 13739 current_config->alt_name.tu, \ 13740 current_config->alt_name.gmch_m, \ 13741 current_config->alt_name.gmch_n, \ 13742 current_config->alt_name.link_m, \ 13743 current_config->alt_name.link_n, \ 13744 pipe_config->name.tu, \ 13745 pipe_config->name.gmch_m, \ 13746 pipe_config->name.gmch_n, \ 13747 pipe_config->name.link_m, \ 13748 pipe_config->name.link_n); \ 13749 ret = false; \ 13750 } \ 13751 } while (0) 13752 13753 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 13754 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 13755 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13756 "(%x) (expected %i, found %i)", \ 13757 (mask), \ 13758 current_config->name & (mask), \ 13759 pipe_config->name & (mask)); \ 13760 ret = false; \ 13761 } \ 13762 } while (0) 13763 13764 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \ 13765 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 13766 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13767 "(expected %i, found %i)", \ 13768 current_config->name, \ 13769 pipe_config->name); \ 13770 ret = false; \ 13771 } \ 13772 } while (0) 13773 13774 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 13775 if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 13776 &pipe_config->infoframes.name)) { \ 13777 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \ 13778 ¤t_config->infoframes.name, \ 13779 &pipe_config->infoframes.name); \ 13780 ret = false; \ 13781 } \ 13782 } while (0) 13783 13784 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \ 13785 if (!current_config->has_psr && !pipe_config->has_psr && \ 13786 !intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \ 13787 &pipe_config->infoframes.name)) { \ 13788 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \ 13789 ¤t_config->infoframes.name, \ 13790 &pipe_config->infoframes.name); \ 13791 ret = false; \ 13792 } \ 13793 } while (0) 13794 13795 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \ 13796 if (current_config->name1 != pipe_config->name1) { \ 13797 pipe_config_mismatch(fastset, crtc, __stringify(name1), \ 13798 "(expected %i, found %i, won't compare lut values)", \ 13799 current_config->name1, \ 13800 pipe_config->name1); \ 13801 ret = false;\ 13802 } else { \ 13803 if (!intel_color_lut_equal(current_config->name2, \ 13804 pipe_config->name2, pipe_config->name1, \ 13805 bit_precision)) { \ 13806 pipe_config_mismatch(fastset, crtc, __stringify(name2), \ 13807 "hw_state doesn't match sw_state"); \ 13808 ret = false; \ 13809 } \ 13810 } \ 13811 } while (0) 13812 13813 #define PIPE_CONF_QUIRK(quirk) \ 13814 ((current_config->quirks | pipe_config->quirks) & (quirk)) 13815 13816 PIPE_CONF_CHECK_I(cpu_transcoder); 13817 13818 PIPE_CONF_CHECK_BOOL(has_pch_encoder); 13819 PIPE_CONF_CHECK_I(fdi_lanes); 13820 PIPE_CONF_CHECK_M_N(fdi_m_n); 13821 13822 PIPE_CONF_CHECK_I(lane_count); 13823 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 13824 13825 if (INTEL_GEN(dev_priv) < 8) { 13826 PIPE_CONF_CHECK_M_N(dp_m_n); 13827 13828 if (current_config->has_drrs) 13829 PIPE_CONF_CHECK_M_N(dp_m2_n2); 13830 } else 13831 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 13832 13833 PIPE_CONF_CHECK_X(output_types); 13834 13835 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay); 13836 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal); 13837 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start); 13838 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end); 13839 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start); 13840 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end); 13841 13842 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay); 13843 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal); 13844 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start); 13845 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end); 13846 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start); 13847 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end); 13848 13849 PIPE_CONF_CHECK_I(pixel_multiplier); 13850 PIPE_CONF_CHECK_I(output_format); 13851 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 13852 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 13853 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 13854 PIPE_CONF_CHECK_BOOL(limited_color_range); 13855 13856 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 13857 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 13858 PIPE_CONF_CHECK_BOOL(has_infoframe); 13859 PIPE_CONF_CHECK_BOOL(fec_enable); 13860 13861 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); 13862 13863 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13864 DRM_MODE_FLAG_INTERLACE); 13865 13866 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 13867 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13868 DRM_MODE_FLAG_PHSYNC); 13869 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13870 DRM_MODE_FLAG_NHSYNC); 13871 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13872 DRM_MODE_FLAG_PVSYNC); 13873 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13874 DRM_MODE_FLAG_NVSYNC); 13875 } 13876 13877 PIPE_CONF_CHECK_X(gmch_pfit.control); 13878 /* pfit ratios are autocomputed by the hw on gen4+ */ 13879 if (INTEL_GEN(dev_priv) < 4) 13880 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 13881 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 13882 13883 /* 13884 * Changing the EDP transcoder input mux 13885 * (A_ONOFF vs. A_ON) requires a full modeset. 13886 */ 13887 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 13888 13889 if (!fastset) { 13890 PIPE_CONF_CHECK_I(pipe_src_w); 13891 PIPE_CONF_CHECK_I(pipe_src_h); 13892 13893 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 13894 if (current_config->pch_pfit.enabled) { 13895 PIPE_CONF_CHECK_I(pch_pfit.dst.x1); 13896 PIPE_CONF_CHECK_I(pch_pfit.dst.y1); 13897 PIPE_CONF_CHECK_I(pch_pfit.dst.x2); 13898 PIPE_CONF_CHECK_I(pch_pfit.dst.y2); 13899 } 13900 13901 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 13902 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); 13903 13904 PIPE_CONF_CHECK_X(gamma_mode); 13905 if (IS_CHERRYVIEW(dev_priv)) 13906 PIPE_CONF_CHECK_X(cgm_mode); 13907 else 13908 PIPE_CONF_CHECK_X(csc_mode); 13909 PIPE_CONF_CHECK_BOOL(gamma_enable); 13910 PIPE_CONF_CHECK_BOOL(csc_enable); 13911 13912 PIPE_CONF_CHECK_I(linetime); 13913 PIPE_CONF_CHECK_I(ips_linetime); 13914 13915 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config); 13916 if (bp_gamma) 13917 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma); 13918 } 13919 13920 PIPE_CONF_CHECK_BOOL(double_wide); 13921 13922 PIPE_CONF_CHECK_P(shared_dpll); 13923 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 13924 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 13925 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 13926 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 13927 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 13928 PIPE_CONF_CHECK_X(dpll_hw_state.spll); 13929 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 13930 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 13931 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 13932 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0); 13933 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0); 13934 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4); 13935 PIPE_CONF_CHECK_X(dpll_hw_state.pll0); 13936 PIPE_CONF_CHECK_X(dpll_hw_state.pll1); 13937 PIPE_CONF_CHECK_X(dpll_hw_state.pll2); 13938 PIPE_CONF_CHECK_X(dpll_hw_state.pll3); 13939 PIPE_CONF_CHECK_X(dpll_hw_state.pll6); 13940 PIPE_CONF_CHECK_X(dpll_hw_state.pll8); 13941 PIPE_CONF_CHECK_X(dpll_hw_state.pll9); 13942 PIPE_CONF_CHECK_X(dpll_hw_state.pll10); 13943 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12); 13944 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl); 13945 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1); 13946 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl); 13947 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0); 13948 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1); 13949 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf); 13950 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock); 13951 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc); 13952 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias); 13953 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); 13954 13955 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 13956 PIPE_CONF_CHECK_X(dsi_pll.div); 13957 13958 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) 13959 PIPE_CONF_CHECK_I(pipe_bpp); 13960 13961 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); 13962 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 13963 13964 PIPE_CONF_CHECK_I(min_voltage_level); 13965 13966 PIPE_CONF_CHECK_X(infoframes.enable); 13967 PIPE_CONF_CHECK_X(infoframes.gcp); 13968 PIPE_CONF_CHECK_INFOFRAME(avi); 13969 PIPE_CONF_CHECK_INFOFRAME(spd); 13970 PIPE_CONF_CHECK_INFOFRAME(hdmi); 13971 PIPE_CONF_CHECK_INFOFRAME(drm); 13972 PIPE_CONF_CHECK_DP_VSC_SDP(vsc); 13973 13974 PIPE_CONF_CHECK_X(sync_mode_slaves_mask); 13975 PIPE_CONF_CHECK_I(master_transcoder); 13976 13977 PIPE_CONF_CHECK_I(dsc.compression_enable); 13978 PIPE_CONF_CHECK_I(dsc.dsc_split); 13979 PIPE_CONF_CHECK_I(dsc.compressed_bpp); 13980 13981 PIPE_CONF_CHECK_I(mst_master_transcoder); 13982 13983 #undef PIPE_CONF_CHECK_X 13984 #undef PIPE_CONF_CHECK_I 13985 #undef PIPE_CONF_CHECK_BOOL 13986 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE 13987 #undef PIPE_CONF_CHECK_P 13988 #undef PIPE_CONF_CHECK_FLAGS 13989 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 13990 #undef PIPE_CONF_CHECK_COLOR_LUT 13991 #undef PIPE_CONF_QUIRK 13992 13993 return ret; 13994 } 13995 13996 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, 13997 const struct intel_crtc_state *pipe_config) 13998 { 13999 if (pipe_config->has_pch_encoder) { 14000 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 14001 &pipe_config->fdi_m_n); 14002 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock; 14003 14004 /* 14005 * FDI already provided one idea for the dotclock. 14006 * Yell if the encoder disagrees. 14007 */ 14008 drm_WARN(&dev_priv->drm, 14009 !intel_fuzzy_clock_check(fdi_dotclock, dotclock), 14010 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 14011 fdi_dotclock, dotclock); 14012 } 14013 } 14014 14015 static void verify_wm_state(struct intel_crtc *crtc, 14016 struct intel_crtc_state *new_crtc_state) 14017 { 14018 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14019 struct skl_hw_state { 14020 struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; 14021 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES]; 14022 struct skl_pipe_wm wm; 14023 } *hw; 14024 struct skl_pipe_wm *sw_wm; 14025 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; 14026 u8 hw_enabled_slices; 14027 const enum pipe pipe = crtc->pipe; 14028 int plane, level, max_level = ilk_wm_max_level(dev_priv); 14029 14030 if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active) 14031 return; 14032 14033 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 14034 if (!hw) 14035 return; 14036 14037 skl_pipe_wm_get_hw_state(crtc, &hw->wm); 14038 sw_wm = &new_crtc_state->wm.skl.optimal; 14039 14040 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv); 14041 14042 hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv); 14043 14044 if (INTEL_GEN(dev_priv) >= 11 && 14045 hw_enabled_slices != dev_priv->dbuf.enabled_slices) 14046 drm_err(&dev_priv->drm, 14047 "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n", 14048 dev_priv->dbuf.enabled_slices, 14049 hw_enabled_slices); 14050 14051 /* planes */ 14052 for_each_universal_plane(dev_priv, pipe, plane) { 14053 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 14054 14055 hw_plane_wm = &hw->wm.planes[plane]; 14056 sw_plane_wm = &sw_wm->planes[plane]; 14057 14058 /* Watermarks */ 14059 for (level = 0; level <= max_level; level++) { 14060 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 14061 &sw_plane_wm->wm[level]) || 14062 (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level], 14063 &sw_plane_wm->sagv_wm0))) 14064 continue; 14065 14066 drm_err(&dev_priv->drm, 14067 "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 14068 pipe_name(pipe), plane + 1, level, 14069 sw_plane_wm->wm[level].plane_en, 14070 sw_plane_wm->wm[level].plane_res_b, 14071 sw_plane_wm->wm[level].plane_res_l, 14072 hw_plane_wm->wm[level].plane_en, 14073 hw_plane_wm->wm[level].plane_res_b, 14074 hw_plane_wm->wm[level].plane_res_l); 14075 } 14076 14077 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 14078 &sw_plane_wm->trans_wm)) { 14079 drm_err(&dev_priv->drm, 14080 "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 14081 pipe_name(pipe), plane + 1, 14082 sw_plane_wm->trans_wm.plane_en, 14083 sw_plane_wm->trans_wm.plane_res_b, 14084 sw_plane_wm->trans_wm.plane_res_l, 14085 hw_plane_wm->trans_wm.plane_en, 14086 hw_plane_wm->trans_wm.plane_res_b, 14087 hw_plane_wm->trans_wm.plane_res_l); 14088 } 14089 14090 /* DDB */ 14091 hw_ddb_entry = &hw->ddb_y[plane]; 14092 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane]; 14093 14094 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 14095 drm_err(&dev_priv->drm, 14096 "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", 14097 pipe_name(pipe), plane + 1, 14098 sw_ddb_entry->start, sw_ddb_entry->end, 14099 hw_ddb_entry->start, hw_ddb_entry->end); 14100 } 14101 } 14102 14103 /* 14104 * cursor 14105 * If the cursor plane isn't active, we may not have updated it's ddb 14106 * allocation. In that case since the ddb allocation will be updated 14107 * once the plane becomes visible, we can skip this check 14108 */ 14109 if (1) { 14110 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 14111 14112 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR]; 14113 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR]; 14114 14115 /* Watermarks */ 14116 for (level = 0; level <= max_level; level++) { 14117 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 14118 &sw_plane_wm->wm[level]) || 14119 (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level], 14120 &sw_plane_wm->sagv_wm0))) 14121 continue; 14122 14123 drm_err(&dev_priv->drm, 14124 "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 14125 pipe_name(pipe), level, 14126 sw_plane_wm->wm[level].plane_en, 14127 sw_plane_wm->wm[level].plane_res_b, 14128 sw_plane_wm->wm[level].plane_res_l, 14129 hw_plane_wm->wm[level].plane_en, 14130 hw_plane_wm->wm[level].plane_res_b, 14131 hw_plane_wm->wm[level].plane_res_l); 14132 } 14133 14134 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 14135 &sw_plane_wm->trans_wm)) { 14136 drm_err(&dev_priv->drm, 14137 "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 14138 pipe_name(pipe), 14139 sw_plane_wm->trans_wm.plane_en, 14140 sw_plane_wm->trans_wm.plane_res_b, 14141 sw_plane_wm->trans_wm.plane_res_l, 14142 hw_plane_wm->trans_wm.plane_en, 14143 hw_plane_wm->trans_wm.plane_res_b, 14144 hw_plane_wm->trans_wm.plane_res_l); 14145 } 14146 14147 /* DDB */ 14148 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR]; 14149 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; 14150 14151 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 14152 drm_err(&dev_priv->drm, 14153 "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", 14154 pipe_name(pipe), 14155 sw_ddb_entry->start, sw_ddb_entry->end, 14156 hw_ddb_entry->start, hw_ddb_entry->end); 14157 } 14158 } 14159 14160 kfree(hw); 14161 } 14162 14163 static void 14164 verify_connector_state(struct intel_atomic_state *state, 14165 struct intel_crtc *crtc) 14166 { 14167 struct drm_connector *connector; 14168 struct drm_connector_state *new_conn_state; 14169 int i; 14170 14171 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { 14172 struct drm_encoder *encoder = connector->encoder; 14173 struct intel_crtc_state *crtc_state = NULL; 14174 14175 if (new_conn_state->crtc != &crtc->base) 14176 continue; 14177 14178 if (crtc) 14179 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 14180 14181 intel_connector_verify_state(crtc_state, new_conn_state); 14182 14183 I915_STATE_WARN(new_conn_state->best_encoder != encoder, 14184 "connector's atomic encoder doesn't match legacy encoder\n"); 14185 } 14186 } 14187 14188 static void 14189 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state) 14190 { 14191 struct intel_encoder *encoder; 14192 struct drm_connector *connector; 14193 struct drm_connector_state *old_conn_state, *new_conn_state; 14194 int i; 14195 14196 for_each_intel_encoder(&dev_priv->drm, encoder) { 14197 bool enabled = false, found = false; 14198 enum pipe pipe; 14199 14200 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n", 14201 encoder->base.base.id, 14202 encoder->base.name); 14203 14204 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state, 14205 new_conn_state, i) { 14206 if (old_conn_state->best_encoder == &encoder->base) 14207 found = true; 14208 14209 if (new_conn_state->best_encoder != &encoder->base) 14210 continue; 14211 found = enabled = true; 14212 14213 I915_STATE_WARN(new_conn_state->crtc != 14214 encoder->base.crtc, 14215 "connector's crtc doesn't match encoder crtc\n"); 14216 } 14217 14218 if (!found) 14219 continue; 14220 14221 I915_STATE_WARN(!!encoder->base.crtc != enabled, 14222 "encoder's enabled state mismatch " 14223 "(expected %i, found %i)\n", 14224 !!encoder->base.crtc, enabled); 14225 14226 if (!encoder->base.crtc) { 14227 bool active; 14228 14229 active = encoder->get_hw_state(encoder, &pipe); 14230 I915_STATE_WARN(active, 14231 "encoder detached but still enabled on pipe %c.\n", 14232 pipe_name(pipe)); 14233 } 14234 } 14235 } 14236 14237 static void 14238 verify_crtc_state(struct intel_crtc *crtc, 14239 struct intel_crtc_state *old_crtc_state, 14240 struct intel_crtc_state *new_crtc_state) 14241 { 14242 struct drm_device *dev = crtc->base.dev; 14243 struct drm_i915_private *dev_priv = to_i915(dev); 14244 struct intel_encoder *encoder; 14245 struct intel_crtc_state *pipe_config = old_crtc_state; 14246 struct drm_atomic_state *state = old_crtc_state->uapi.state; 14247 bool active; 14248 14249 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi); 14250 intel_crtc_free_hw_state(old_crtc_state); 14251 intel_crtc_state_reset(old_crtc_state, crtc); 14252 old_crtc_state->uapi.state = state; 14253 14254 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id, 14255 crtc->base.name); 14256 14257 active = dev_priv->display.get_pipe_config(crtc, pipe_config); 14258 14259 /* we keep both pipes enabled on 830 */ 14260 if (IS_I830(dev_priv)) 14261 active = new_crtc_state->hw.active; 14262 14263 I915_STATE_WARN(new_crtc_state->hw.active != active, 14264 "crtc active state doesn't match with hw state " 14265 "(expected %i, found %i)\n", 14266 new_crtc_state->hw.active, active); 14267 14268 I915_STATE_WARN(crtc->active != new_crtc_state->hw.active, 14269 "transitional active state does not match atomic hw state " 14270 "(expected %i, found %i)\n", 14271 new_crtc_state->hw.active, crtc->active); 14272 14273 for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 14274 enum pipe pipe; 14275 14276 active = encoder->get_hw_state(encoder, &pipe); 14277 I915_STATE_WARN(active != new_crtc_state->hw.active, 14278 "[ENCODER:%i] active %i with crtc active %i\n", 14279 encoder->base.base.id, active, 14280 new_crtc_state->hw.active); 14281 14282 I915_STATE_WARN(active && crtc->pipe != pipe, 14283 "Encoder connected to wrong pipe %c\n", 14284 pipe_name(pipe)); 14285 14286 if (active) 14287 encoder->get_config(encoder, pipe_config); 14288 } 14289 14290 intel_crtc_compute_pixel_rate(pipe_config); 14291 14292 if (!new_crtc_state->hw.active) 14293 return; 14294 14295 intel_pipe_config_sanity_check(dev_priv, pipe_config); 14296 14297 if (!intel_pipe_config_compare(new_crtc_state, 14298 pipe_config, false)) { 14299 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 14300 intel_dump_pipe_config(pipe_config, NULL, "[hw state]"); 14301 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]"); 14302 } 14303 } 14304 14305 static void 14306 intel_verify_planes(struct intel_atomic_state *state) 14307 { 14308 struct intel_plane *plane; 14309 const struct intel_plane_state *plane_state; 14310 int i; 14311 14312 for_each_new_intel_plane_in_state(state, plane, 14313 plane_state, i) 14314 assert_plane(plane, plane_state->planar_slave || 14315 plane_state->uapi.visible); 14316 } 14317 14318 static void 14319 verify_single_dpll_state(struct drm_i915_private *dev_priv, 14320 struct intel_shared_dpll *pll, 14321 struct intel_crtc *crtc, 14322 struct intel_crtc_state *new_crtc_state) 14323 { 14324 struct intel_dpll_hw_state dpll_hw_state; 14325 unsigned int crtc_mask; 14326 bool active; 14327 14328 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 14329 14330 drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name); 14331 14332 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state); 14333 14334 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) { 14335 I915_STATE_WARN(!pll->on && pll->active_mask, 14336 "pll in active use but not on in sw tracking\n"); 14337 I915_STATE_WARN(pll->on && !pll->active_mask, 14338 "pll is on but not used by any active crtc\n"); 14339 I915_STATE_WARN(pll->on != active, 14340 "pll on state mismatch (expected %i, found %i)\n", 14341 pll->on, active); 14342 } 14343 14344 if (!crtc) { 14345 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask, 14346 "more active pll users than references: %x vs %x\n", 14347 pll->active_mask, pll->state.crtc_mask); 14348 14349 return; 14350 } 14351 14352 crtc_mask = drm_crtc_mask(&crtc->base); 14353 14354 if (new_crtc_state->hw.active) 14355 I915_STATE_WARN(!(pll->active_mask & crtc_mask), 14356 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", 14357 pipe_name(crtc->pipe), pll->active_mask); 14358 else 14359 I915_STATE_WARN(pll->active_mask & crtc_mask, 14360 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", 14361 pipe_name(crtc->pipe), pll->active_mask); 14362 14363 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), 14364 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", 14365 crtc_mask, pll->state.crtc_mask); 14366 14367 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state, 14368 &dpll_hw_state, 14369 sizeof(dpll_hw_state)), 14370 "pll hw state mismatch\n"); 14371 } 14372 14373 static void 14374 verify_shared_dpll_state(struct intel_crtc *crtc, 14375 struct intel_crtc_state *old_crtc_state, 14376 struct intel_crtc_state *new_crtc_state) 14377 { 14378 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14379 14380 if (new_crtc_state->shared_dpll) 14381 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state); 14382 14383 if (old_crtc_state->shared_dpll && 14384 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) { 14385 unsigned int crtc_mask = drm_crtc_mask(&crtc->base); 14386 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll; 14387 14388 I915_STATE_WARN(pll->active_mask & crtc_mask, 14389 "pll active mismatch (didn't expect pipe %c in active mask)\n", 14390 pipe_name(crtc->pipe)); 14391 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, 14392 "pll enabled crtcs mismatch (found %x in enabled mask)\n", 14393 pipe_name(crtc->pipe)); 14394 } 14395 } 14396 14397 static void 14398 intel_modeset_verify_crtc(struct intel_crtc *crtc, 14399 struct intel_atomic_state *state, 14400 struct intel_crtc_state *old_crtc_state, 14401 struct intel_crtc_state *new_crtc_state) 14402 { 14403 if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) 14404 return; 14405 14406 verify_wm_state(crtc, new_crtc_state); 14407 verify_connector_state(state, crtc); 14408 verify_crtc_state(crtc, old_crtc_state, new_crtc_state); 14409 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state); 14410 } 14411 14412 static void 14413 verify_disabled_dpll_state(struct drm_i915_private *dev_priv) 14414 { 14415 int i; 14416 14417 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) 14418 verify_single_dpll_state(dev_priv, 14419 &dev_priv->dpll.shared_dplls[i], 14420 NULL, NULL); 14421 } 14422 14423 static void 14424 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv, 14425 struct intel_atomic_state *state) 14426 { 14427 verify_encoder_state(dev_priv, state); 14428 verify_connector_state(state, NULL); 14429 verify_disabled_dpll_state(dev_priv); 14430 } 14431 14432 static void 14433 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) 14434 { 14435 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 14436 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14437 const struct drm_display_mode *adjusted_mode = 14438 &crtc_state->hw.adjusted_mode; 14439 14440 drm_calc_timestamping_constants(&crtc->base, adjusted_mode); 14441 14442 crtc->mode_flags = crtc_state->mode_flags; 14443 14444 /* 14445 * The scanline counter increments at the leading edge of hsync. 14446 * 14447 * On most platforms it starts counting from vtotal-1 on the 14448 * first active line. That means the scanline counter value is 14449 * always one less than what we would expect. Ie. just after 14450 * start of vblank, which also occurs at start of hsync (on the 14451 * last active line), the scanline counter will read vblank_start-1. 14452 * 14453 * On gen2 the scanline counter starts counting from 1 instead 14454 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 14455 * to keep the value positive), instead of adding one. 14456 * 14457 * On HSW+ the behaviour of the scanline counter depends on the output 14458 * type. For DP ports it behaves like most other platforms, but on HDMI 14459 * there's an extra 1 line difference. So we need to add two instead of 14460 * one to the value. 14461 * 14462 * On VLV/CHV DSI the scanline counter would appear to increment 14463 * approx. 1/3 of a scanline before start of vblank. Unfortunately 14464 * that means we can't tell whether we're in vblank or not while 14465 * we're on that particular line. We must still set scanline_offset 14466 * to 1 so that the vblank timestamps come out correct when we query 14467 * the scanline counter from within the vblank interrupt handler. 14468 * However if queried just before the start of vblank we'll get an 14469 * answer that's slightly in the future. 14470 */ 14471 if (IS_GEN(dev_priv, 2)) { 14472 int vtotal; 14473 14474 vtotal = adjusted_mode->crtc_vtotal; 14475 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 14476 vtotal /= 2; 14477 14478 crtc->scanline_offset = vtotal - 1; 14479 } else if (HAS_DDI(dev_priv) && 14480 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 14481 crtc->scanline_offset = 2; 14482 } else { 14483 crtc->scanline_offset = 1; 14484 } 14485 } 14486 14487 static void intel_modeset_clear_plls(struct intel_atomic_state *state) 14488 { 14489 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14490 struct intel_crtc_state *new_crtc_state; 14491 struct intel_crtc *crtc; 14492 int i; 14493 14494 if (!dev_priv->display.crtc_compute_clock) 14495 return; 14496 14497 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14498 if (!needs_modeset(new_crtc_state)) 14499 continue; 14500 14501 intel_release_shared_dplls(state, crtc); 14502 } 14503 } 14504 14505 /* 14506 * This implements the workaround described in the "notes" section of the mode 14507 * set sequence documentation. When going from no pipes or single pipe to 14508 * multiple pipes, and planes are enabled after the pipe, we need to wait at 14509 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 14510 */ 14511 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) 14512 { 14513 struct intel_crtc_state *crtc_state; 14514 struct intel_crtc *crtc; 14515 struct intel_crtc_state *first_crtc_state = NULL; 14516 struct intel_crtc_state *other_crtc_state = NULL; 14517 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 14518 int i; 14519 14520 /* look at all crtc's that are going to be enabled in during modeset */ 14521 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 14522 if (!crtc_state->hw.active || 14523 !needs_modeset(crtc_state)) 14524 continue; 14525 14526 if (first_crtc_state) { 14527 other_crtc_state = crtc_state; 14528 break; 14529 } else { 14530 first_crtc_state = crtc_state; 14531 first_pipe = crtc->pipe; 14532 } 14533 } 14534 14535 /* No workaround needed? */ 14536 if (!first_crtc_state) 14537 return 0; 14538 14539 /* w/a possibly needed, check how many crtc's are already enabled. */ 14540 for_each_intel_crtc(state->base.dev, crtc) { 14541 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 14542 if (IS_ERR(crtc_state)) 14543 return PTR_ERR(crtc_state); 14544 14545 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 14546 14547 if (!crtc_state->hw.active || 14548 needs_modeset(crtc_state)) 14549 continue; 14550 14551 /* 2 or more enabled crtcs means no need for w/a */ 14552 if (enabled_pipe != INVALID_PIPE) 14553 return 0; 14554 14555 enabled_pipe = crtc->pipe; 14556 } 14557 14558 if (enabled_pipe != INVALID_PIPE) 14559 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 14560 else if (other_crtc_state) 14561 other_crtc_state->hsw_workaround_pipe = first_pipe; 14562 14563 return 0; 14564 } 14565 14566 u8 intel_calc_active_pipes(struct intel_atomic_state *state, 14567 u8 active_pipes) 14568 { 14569 const struct intel_crtc_state *crtc_state; 14570 struct intel_crtc *crtc; 14571 int i; 14572 14573 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 14574 if (crtc_state->hw.active) 14575 active_pipes |= BIT(crtc->pipe); 14576 else 14577 active_pipes &= ~BIT(crtc->pipe); 14578 } 14579 14580 return active_pipes; 14581 } 14582 14583 static int intel_modeset_checks(struct intel_atomic_state *state) 14584 { 14585 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14586 int ret; 14587 14588 state->modeset = true; 14589 state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes); 14590 14591 if (state->active_pipes != dev_priv->active_pipes) { 14592 ret = _intel_atomic_lock_global_state(state); 14593 if (ret) 14594 return ret; 14595 } 14596 14597 if (IS_HASWELL(dev_priv)) 14598 return hsw_mode_set_planes_workaround(state); 14599 14600 return 0; 14601 } 14602 14603 /* 14604 * Handle calculation of various watermark data at the end of the atomic check 14605 * phase. The code here should be run after the per-crtc and per-plane 'check' 14606 * handlers to ensure that all derived state has been updated. 14607 */ 14608 static int calc_watermark_data(struct intel_atomic_state *state) 14609 { 14610 struct drm_device *dev = state->base.dev; 14611 struct drm_i915_private *dev_priv = to_i915(dev); 14612 14613 /* Is there platform-specific watermark information to calculate? */ 14614 if (dev_priv->display.compute_global_watermarks) 14615 return dev_priv->display.compute_global_watermarks(state); 14616 14617 return 0; 14618 } 14619 14620 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 14621 struct intel_crtc_state *new_crtc_state) 14622 { 14623 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) 14624 return; 14625 14626 new_crtc_state->uapi.mode_changed = false; 14627 new_crtc_state->update_pipe = true; 14628 } 14629 14630 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state, 14631 struct intel_crtc_state *new_crtc_state) 14632 { 14633 /* 14634 * If we're not doing the full modeset we want to 14635 * keep the current M/N values as they may be 14636 * sufficiently different to the computed values 14637 * to cause problems. 14638 * 14639 * FIXME: should really copy more fuzzy state here 14640 */ 14641 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n; 14642 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n; 14643 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2; 14644 new_crtc_state->has_drrs = old_crtc_state->has_drrs; 14645 } 14646 14647 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, 14648 struct intel_crtc *crtc, 14649 u8 plane_ids_mask) 14650 { 14651 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14652 struct intel_plane *plane; 14653 14654 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 14655 struct intel_plane_state *plane_state; 14656 14657 if ((plane_ids_mask & BIT(plane->id)) == 0) 14658 continue; 14659 14660 plane_state = intel_atomic_get_plane_state(state, plane); 14661 if (IS_ERR(plane_state)) 14662 return PTR_ERR(plane_state); 14663 } 14664 14665 return 0; 14666 } 14667 14668 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) 14669 { 14670 /* See {hsw,vlv,ivb}_plane_ratio() */ 14671 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) || 14672 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) || 14673 IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11); 14674 } 14675 14676 static int intel_atomic_check_planes(struct intel_atomic_state *state) 14677 { 14678 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14679 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 14680 struct intel_plane_state *plane_state; 14681 struct intel_plane *plane; 14682 struct intel_crtc *crtc; 14683 int i, ret; 14684 14685 ret = icl_add_linked_planes(state); 14686 if (ret) 14687 return ret; 14688 14689 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 14690 ret = intel_plane_atomic_check(state, plane); 14691 if (ret) { 14692 drm_dbg_atomic(&dev_priv->drm, 14693 "[PLANE:%d:%s] atomic driver check failed\n", 14694 plane->base.base.id, plane->base.name); 14695 return ret; 14696 } 14697 } 14698 14699 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14700 new_crtc_state, i) { 14701 u8 old_active_planes, new_active_planes; 14702 14703 ret = icl_check_nv12_planes(new_crtc_state); 14704 if (ret) 14705 return ret; 14706 14707 /* 14708 * On some platforms the number of active planes affects 14709 * the planes' minimum cdclk calculation. Add such planes 14710 * to the state before we compute the minimum cdclk. 14711 */ 14712 if (!active_planes_affects_min_cdclk(dev_priv)) 14713 continue; 14714 14715 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 14716 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 14717 14718 /* 14719 * Not only the number of planes, but if the plane configuration had 14720 * changed might already mean we need to recompute min CDCLK, 14721 * because different planes might consume different amount of Dbuf bandwidth 14722 * according to formula: Bw per plane = Pixel rate * bpp * pipe/plane scale factor 14723 */ 14724 if (old_active_planes == new_active_planes) 14725 continue; 14726 14727 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes); 14728 if (ret) 14729 return ret; 14730 } 14731 14732 return 0; 14733 } 14734 14735 static int intel_atomic_check_cdclk(struct intel_atomic_state *state, 14736 bool *need_cdclk_calc) 14737 { 14738 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14739 struct intel_cdclk_state *new_cdclk_state; 14740 struct intel_plane_state *plane_state; 14741 struct intel_bw_state *new_bw_state; 14742 struct intel_plane *plane; 14743 int min_cdclk = 0; 14744 enum pipe pipe; 14745 int ret; 14746 int i; 14747 /* 14748 * active_planes bitmask has been updated, and potentially 14749 * affected planes are part of the state. We can now 14750 * compute the minimum cdclk for each plane. 14751 */ 14752 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 14753 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc); 14754 if (ret) 14755 return ret; 14756 } 14757 14758 new_cdclk_state = intel_atomic_get_new_cdclk_state(state); 14759 14760 if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed) 14761 *need_cdclk_calc = true; 14762 14763 ret = dev_priv->display.bw_calc_min_cdclk(state); 14764 if (ret) 14765 return ret; 14766 14767 new_bw_state = intel_atomic_get_new_bw_state(state); 14768 14769 if (!new_cdclk_state || !new_bw_state) 14770 return 0; 14771 14772 for_each_pipe(dev_priv, pipe) { 14773 min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk); 14774 14775 /* 14776 * Currently do this change only if we need to increase 14777 */ 14778 if (new_bw_state->min_cdclk > min_cdclk) 14779 *need_cdclk_calc = true; 14780 } 14781 14782 return 0; 14783 } 14784 14785 static int intel_atomic_check_crtcs(struct intel_atomic_state *state) 14786 { 14787 struct intel_crtc_state *crtc_state; 14788 struct intel_crtc *crtc; 14789 int i; 14790 14791 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 14792 int ret = intel_crtc_atomic_check(state, crtc); 14793 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 14794 if (ret) { 14795 drm_dbg_atomic(&i915->drm, 14796 "[CRTC:%d:%s] atomic driver check failed\n", 14797 crtc->base.base.id, crtc->base.name); 14798 return ret; 14799 } 14800 } 14801 14802 return 0; 14803 } 14804 14805 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, 14806 u8 transcoders) 14807 { 14808 const struct intel_crtc_state *new_crtc_state; 14809 struct intel_crtc *crtc; 14810 int i; 14811 14812 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14813 if (new_crtc_state->hw.enable && 14814 transcoders & BIT(new_crtc_state->cpu_transcoder) && 14815 needs_modeset(new_crtc_state)) 14816 return true; 14817 } 14818 14819 return false; 14820 } 14821 14822 /** 14823 * intel_atomic_check - validate state object 14824 * @dev: drm device 14825 * @_state: state to validate 14826 */ 14827 static int intel_atomic_check(struct drm_device *dev, 14828 struct drm_atomic_state *_state) 14829 { 14830 struct drm_i915_private *dev_priv = to_i915(dev); 14831 struct intel_atomic_state *state = to_intel_atomic_state(_state); 14832 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 14833 struct intel_crtc *crtc; 14834 int ret, i; 14835 bool any_ms = false; 14836 14837 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14838 new_crtc_state, i) { 14839 if (new_crtc_state->inherited != old_crtc_state->inherited) 14840 new_crtc_state->uapi.mode_changed = true; 14841 } 14842 14843 ret = drm_atomic_helper_check_modeset(dev, &state->base); 14844 if (ret) 14845 goto fail; 14846 14847 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14848 new_crtc_state, i) { 14849 if (!needs_modeset(new_crtc_state)) { 14850 /* Light copy */ 14851 intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state); 14852 14853 continue; 14854 } 14855 14856 ret = intel_crtc_prepare_cleared_state(new_crtc_state); 14857 if (ret) 14858 goto fail; 14859 14860 if (!new_crtc_state->hw.enable) 14861 continue; 14862 14863 ret = intel_modeset_pipe_config(new_crtc_state); 14864 if (ret) 14865 goto fail; 14866 } 14867 14868 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14869 new_crtc_state, i) { 14870 if (!needs_modeset(new_crtc_state)) 14871 continue; 14872 14873 ret = intel_modeset_pipe_config_late(new_crtc_state); 14874 if (ret) 14875 goto fail; 14876 14877 intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 14878 } 14879 14880 /** 14881 * Check if fastset is allowed by external dependencies like other 14882 * pipes and transcoders. 14883 * 14884 * Right now it only forces a fullmodeset when the MST master 14885 * transcoder did not changed but the pipe of the master transcoder 14886 * needs a fullmodeset so all slaves also needs to do a fullmodeset or 14887 * in case of port synced crtcs, if one of the synced crtcs 14888 * needs a full modeset, all other synced crtcs should be 14889 * forced a full modeset. 14890 */ 14891 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14892 if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state)) 14893 continue; 14894 14895 if (intel_dp_mst_is_slave_trans(new_crtc_state)) { 14896 enum transcoder master = new_crtc_state->mst_master_transcoder; 14897 14898 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) { 14899 new_crtc_state->uapi.mode_changed = true; 14900 new_crtc_state->update_pipe = false; 14901 } 14902 } 14903 14904 if (is_trans_port_sync_mode(new_crtc_state)) { 14905 u8 trans = new_crtc_state->sync_mode_slaves_mask; 14906 14907 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER) 14908 trans |= BIT(new_crtc_state->master_transcoder); 14909 14910 if (intel_cpu_transcoders_need_modeset(state, trans)) { 14911 new_crtc_state->uapi.mode_changed = true; 14912 new_crtc_state->update_pipe = false; 14913 } 14914 } 14915 } 14916 14917 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14918 new_crtc_state, i) { 14919 if (needs_modeset(new_crtc_state)) { 14920 any_ms = true; 14921 continue; 14922 } 14923 14924 if (!new_crtc_state->update_pipe) 14925 continue; 14926 14927 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state); 14928 } 14929 14930 if (any_ms && !check_digital_port_conflicts(state)) { 14931 drm_dbg_kms(&dev_priv->drm, 14932 "rejecting conflicting digital port configuration\n"); 14933 ret = -EINVAL; 14934 goto fail; 14935 } 14936 14937 ret = drm_dp_mst_atomic_check(&state->base); 14938 if (ret) 14939 goto fail; 14940 14941 ret = intel_atomic_check_planes(state); 14942 if (ret) 14943 goto fail; 14944 14945 /* 14946 * distrust_bios_wm will force a full dbuf recomputation 14947 * but the hardware state will only get updated accordingly 14948 * if state->modeset==true. Hence distrust_bios_wm==true && 14949 * state->modeset==false is an invalid combination which 14950 * would cause the hardware and software dbuf state to get 14951 * out of sync. We must prevent that. 14952 * 14953 * FIXME clean up this mess and introduce better 14954 * state tracking for dbuf. 14955 */ 14956 if (dev_priv->wm.distrust_bios_wm) 14957 any_ms = true; 14958 14959 intel_fbc_choose_crtc(dev_priv, state); 14960 ret = calc_watermark_data(state); 14961 if (ret) 14962 goto fail; 14963 14964 ret = intel_bw_atomic_check(state); 14965 if (ret) 14966 goto fail; 14967 14968 ret = intel_atomic_check_cdclk(state, &any_ms); 14969 if (ret) 14970 goto fail; 14971 14972 if (any_ms) { 14973 ret = intel_modeset_checks(state); 14974 if (ret) 14975 goto fail; 14976 14977 ret = intel_modeset_calc_cdclk(state); 14978 if (ret) 14979 return ret; 14980 14981 intel_modeset_clear_plls(state); 14982 } 14983 14984 ret = intel_atomic_check_crtcs(state); 14985 if (ret) 14986 goto fail; 14987 14988 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14989 new_crtc_state, i) { 14990 if (!needs_modeset(new_crtc_state) && 14991 !new_crtc_state->update_pipe) 14992 continue; 14993 14994 intel_dump_pipe_config(new_crtc_state, state, 14995 needs_modeset(new_crtc_state) ? 14996 "[modeset]" : "[fastset]"); 14997 } 14998 14999 return 0; 15000 15001 fail: 15002 if (ret == -EDEADLK) 15003 return ret; 15004 15005 /* 15006 * FIXME would probably be nice to know which crtc specifically 15007 * caused the failure, in cases where we can pinpoint it. 15008 */ 15009 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15010 new_crtc_state, i) 15011 intel_dump_pipe_config(new_crtc_state, state, "[failed]"); 15012 15013 return ret; 15014 } 15015 15016 static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 15017 { 15018 struct intel_crtc_state *crtc_state; 15019 struct intel_crtc *crtc; 15020 int i, ret; 15021 15022 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base); 15023 if (ret < 0) 15024 return ret; 15025 15026 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 15027 bool mode_changed = needs_modeset(crtc_state); 15028 15029 if (mode_changed || crtc_state->update_pipe || 15030 crtc_state->uapi.color_mgmt_changed) { 15031 intel_dsb_prepare(crtc_state); 15032 } 15033 } 15034 15035 return 0; 15036 } 15037 15038 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) 15039 { 15040 struct drm_device *dev = crtc->base.dev; 15041 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)]; 15042 15043 if (!vblank->max_vblank_count) 15044 return (u32)drm_crtc_accurate_vblank_count(&crtc->base); 15045 15046 return crtc->base.funcs->get_vblank_counter(&crtc->base); 15047 } 15048 15049 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 15050 struct intel_crtc_state *crtc_state) 15051 { 15052 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 15053 15054 if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes) 15055 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 15056 15057 if (crtc_state->has_pch_encoder) { 15058 enum pipe pch_transcoder = 15059 intel_crtc_pch_transcoder(crtc); 15060 15061 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); 15062 } 15063 } 15064 15065 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, 15066 const struct intel_crtc_state *new_crtc_state) 15067 { 15068 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 15069 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 15070 15071 /* 15072 * Update pipe size and adjust fitter if needed: the reason for this is 15073 * that in compute_mode_changes we check the native mode (not the pfit 15074 * mode) to see if we can flip rather than do a full mode set. In the 15075 * fastboot case, we'll flip, but if we don't update the pipesrc and 15076 * pfit state, we'll end up with a big fb scanned out into the wrong 15077 * sized surface. 15078 */ 15079 intel_set_pipe_src_size(new_crtc_state); 15080 15081 /* on skylake this is done by detaching scalers */ 15082 if (INTEL_GEN(dev_priv) >= 9) { 15083 skl_detach_scalers(new_crtc_state); 15084 15085 if (new_crtc_state->pch_pfit.enabled) 15086 skl_pfit_enable(new_crtc_state); 15087 } else if (HAS_PCH_SPLIT(dev_priv)) { 15088 if (new_crtc_state->pch_pfit.enabled) 15089 ilk_pfit_enable(new_crtc_state); 15090 else if (old_crtc_state->pch_pfit.enabled) 15091 ilk_pfit_disable(old_crtc_state); 15092 } 15093 15094 /* 15095 * The register is supposedly single buffered so perhaps 15096 * not 100% correct to do this here. But SKL+ calculate 15097 * this based on the adjust pixel rate so pfit changes do 15098 * affect it and so it must be updated for fastsets. 15099 * HSW/BDW only really need this here for fastboot, after 15100 * that the value should not change without a full modeset. 15101 */ 15102 if (INTEL_GEN(dev_priv) >= 9 || 15103 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 15104 hsw_set_linetime_wm(new_crtc_state); 15105 15106 if (INTEL_GEN(dev_priv) >= 11) 15107 icl_set_pipe_chicken(crtc); 15108 } 15109 15110 static void commit_pipe_config(struct intel_atomic_state *state, 15111 struct intel_crtc *crtc) 15112 { 15113 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 15114 const struct intel_crtc_state *old_crtc_state = 15115 intel_atomic_get_old_crtc_state(state, crtc); 15116 const struct intel_crtc_state *new_crtc_state = 15117 intel_atomic_get_new_crtc_state(state, crtc); 15118 bool modeset = needs_modeset(new_crtc_state); 15119 15120 /* 15121 * During modesets pipe configuration was programmed as the 15122 * CRTC was enabled. 15123 */ 15124 if (!modeset) { 15125 if (new_crtc_state->uapi.color_mgmt_changed || 15126 new_crtc_state->update_pipe) 15127 intel_color_commit(new_crtc_state); 15128 15129 if (INTEL_GEN(dev_priv) >= 9) 15130 skl_detach_scalers(new_crtc_state); 15131 15132 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 15133 bdw_set_pipemisc(new_crtc_state); 15134 15135 if (new_crtc_state->update_pipe) 15136 intel_pipe_fastset(old_crtc_state, new_crtc_state); 15137 } 15138 15139 if (dev_priv->display.atomic_update_watermarks) 15140 dev_priv->display.atomic_update_watermarks(state, crtc); 15141 } 15142 15143 static void intel_enable_crtc(struct intel_atomic_state *state, 15144 struct intel_crtc *crtc) 15145 { 15146 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 15147 const struct intel_crtc_state *new_crtc_state = 15148 intel_atomic_get_new_crtc_state(state, crtc); 15149 15150 if (!needs_modeset(new_crtc_state)) 15151 return; 15152 15153 intel_crtc_update_active_timings(new_crtc_state); 15154 15155 dev_priv->display.crtc_enable(state, crtc); 15156 15157 /* vblanks work again, re-enable pipe CRC. */ 15158 intel_crtc_enable_pipe_crc(crtc); 15159 } 15160 15161 static void intel_update_crtc(struct intel_atomic_state *state, 15162 struct intel_crtc *crtc) 15163 { 15164 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 15165 const struct intel_crtc_state *old_crtc_state = 15166 intel_atomic_get_old_crtc_state(state, crtc); 15167 struct intel_crtc_state *new_crtc_state = 15168 intel_atomic_get_new_crtc_state(state, crtc); 15169 bool modeset = needs_modeset(new_crtc_state); 15170 15171 if (!modeset) { 15172 if (new_crtc_state->preload_luts && 15173 (new_crtc_state->uapi.color_mgmt_changed || 15174 new_crtc_state->update_pipe)) 15175 intel_color_load_luts(new_crtc_state); 15176 15177 intel_pre_plane_update(state, crtc); 15178 15179 if (new_crtc_state->update_pipe) 15180 intel_encoders_update_pipe(state, crtc); 15181 } 15182 15183 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) 15184 intel_fbc_disable(crtc); 15185 else 15186 intel_fbc_enable(state, crtc); 15187 15188 /* Perform vblank evasion around commit operation */ 15189 intel_pipe_update_start(new_crtc_state); 15190 15191 commit_pipe_config(state, crtc); 15192 15193 if (INTEL_GEN(dev_priv) >= 9) 15194 skl_update_planes_on_crtc(state, crtc); 15195 else 15196 i9xx_update_planes_on_crtc(state, crtc); 15197 15198 intel_pipe_update_end(new_crtc_state); 15199 15200 /* 15201 * We usually enable FIFO underrun interrupts as part of the 15202 * CRTC enable sequence during modesets. But when we inherit a 15203 * valid pipe configuration from the BIOS we need to take care 15204 * of enabling them on the CRTC's first fastset. 15205 */ 15206 if (new_crtc_state->update_pipe && !modeset && 15207 old_crtc_state->inherited) 15208 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 15209 } 15210 15211 15212 static void intel_old_crtc_state_disables(struct intel_atomic_state *state, 15213 struct intel_crtc_state *old_crtc_state, 15214 struct intel_crtc_state *new_crtc_state, 15215 struct intel_crtc *crtc) 15216 { 15217 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 15218 15219 intel_crtc_disable_planes(state, crtc); 15220 15221 /* 15222 * We need to disable pipe CRC before disabling the pipe, 15223 * or we race against vblank off. 15224 */ 15225 intel_crtc_disable_pipe_crc(crtc); 15226 15227 dev_priv->display.crtc_disable(state, crtc); 15228 crtc->active = false; 15229 intel_fbc_disable(crtc); 15230 intel_disable_shared_dpll(old_crtc_state); 15231 15232 /* FIXME unify this for all platforms */ 15233 if (!new_crtc_state->hw.active && 15234 !HAS_GMCH(dev_priv) && 15235 dev_priv->display.initial_watermarks) 15236 dev_priv->display.initial_watermarks(state, crtc); 15237 } 15238 15239 static void intel_commit_modeset_disables(struct intel_atomic_state *state) 15240 { 15241 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 15242 struct intel_crtc *crtc; 15243 u32 handled = 0; 15244 int i; 15245 15246 /* Only disable port sync and MST slaves */ 15247 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15248 new_crtc_state, i) { 15249 if (!needs_modeset(new_crtc_state)) 15250 continue; 15251 15252 if (!old_crtc_state->hw.active) 15253 continue; 15254 15255 /* In case of Transcoder port Sync master slave CRTCs can be 15256 * assigned in any order and we need to make sure that 15257 * slave CRTCs are disabled first and then master CRTC since 15258 * Slave vblanks are masked till Master Vblanks. 15259 */ 15260 if (!is_trans_port_sync_slave(old_crtc_state) && 15261 !intel_dp_mst_is_slave_trans(old_crtc_state)) 15262 continue; 15263 15264 intel_pre_plane_update(state, crtc); 15265 intel_old_crtc_state_disables(state, old_crtc_state, 15266 new_crtc_state, crtc); 15267 handled |= BIT(crtc->pipe); 15268 } 15269 15270 /* Disable everything else left on */ 15271 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15272 new_crtc_state, i) { 15273 if (!needs_modeset(new_crtc_state) || 15274 (handled & BIT(crtc->pipe))) 15275 continue; 15276 15277 intel_pre_plane_update(state, crtc); 15278 if (old_crtc_state->hw.active) 15279 intel_old_crtc_state_disables(state, old_crtc_state, 15280 new_crtc_state, crtc); 15281 } 15282 } 15283 15284 static void intel_commit_modeset_enables(struct intel_atomic_state *state) 15285 { 15286 struct intel_crtc_state *new_crtc_state; 15287 struct intel_crtc *crtc; 15288 int i; 15289 15290 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15291 if (!new_crtc_state->hw.active) 15292 continue; 15293 15294 intel_enable_crtc(state, crtc); 15295 intel_update_crtc(state, crtc); 15296 } 15297 } 15298 15299 static void skl_commit_modeset_enables(struct intel_atomic_state *state) 15300 { 15301 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 15302 struct intel_crtc *crtc; 15303 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 15304 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 15305 u8 update_pipes = 0, modeset_pipes = 0; 15306 int i; 15307 15308 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 15309 enum pipe pipe = crtc->pipe; 15310 15311 if (!new_crtc_state->hw.active) 15312 continue; 15313 15314 /* ignore allocations for crtc's that have been turned off. */ 15315 if (!needs_modeset(new_crtc_state)) { 15316 entries[pipe] = old_crtc_state->wm.skl.ddb; 15317 update_pipes |= BIT(pipe); 15318 } else { 15319 modeset_pipes |= BIT(pipe); 15320 } 15321 } 15322 15323 /* 15324 * Whenever the number of active pipes changes, we need to make sure we 15325 * update the pipes in the right order so that their ddb allocations 15326 * never overlap with each other between CRTC updates. Otherwise we'll 15327 * cause pipe underruns and other bad stuff. 15328 * 15329 * So first lets enable all pipes that do not need a fullmodeset as 15330 * those don't have any external dependency. 15331 */ 15332 while (update_pipes) { 15333 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15334 new_crtc_state, i) { 15335 enum pipe pipe = crtc->pipe; 15336 15337 if ((update_pipes & BIT(pipe)) == 0) 15338 continue; 15339 15340 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 15341 entries, I915_MAX_PIPES, pipe)) 15342 continue; 15343 15344 entries[pipe] = new_crtc_state->wm.skl.ddb; 15345 update_pipes &= ~BIT(pipe); 15346 15347 intel_update_crtc(state, crtc); 15348 15349 /* 15350 * If this is an already active pipe, it's DDB changed, 15351 * and this isn't the last pipe that needs updating 15352 * then we need to wait for a vblank to pass for the 15353 * new ddb allocation to take effect. 15354 */ 15355 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 15356 &old_crtc_state->wm.skl.ddb) && 15357 (update_pipes | modeset_pipes)) 15358 intel_wait_for_vblank(dev_priv, pipe); 15359 } 15360 } 15361 15362 update_pipes = modeset_pipes; 15363 15364 /* 15365 * Enable all pipes that needs a modeset and do not depends on other 15366 * pipes 15367 */ 15368 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15369 enum pipe pipe = crtc->pipe; 15370 15371 if ((modeset_pipes & BIT(pipe)) == 0) 15372 continue; 15373 15374 if (intel_dp_mst_is_slave_trans(new_crtc_state) || 15375 is_trans_port_sync_master(new_crtc_state)) 15376 continue; 15377 15378 modeset_pipes &= ~BIT(pipe); 15379 15380 intel_enable_crtc(state, crtc); 15381 } 15382 15383 /* 15384 * Then we enable all remaining pipes that depend on other 15385 * pipes: MST slaves and port sync masters. 15386 */ 15387 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15388 enum pipe pipe = crtc->pipe; 15389 15390 if ((modeset_pipes & BIT(pipe)) == 0) 15391 continue; 15392 15393 modeset_pipes &= ~BIT(pipe); 15394 15395 intel_enable_crtc(state, crtc); 15396 } 15397 15398 /* 15399 * Finally we do the plane updates/etc. for all pipes that got enabled. 15400 */ 15401 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15402 enum pipe pipe = crtc->pipe; 15403 15404 if ((update_pipes & BIT(pipe)) == 0) 15405 continue; 15406 15407 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 15408 entries, I915_MAX_PIPES, pipe)); 15409 15410 entries[pipe] = new_crtc_state->wm.skl.ddb; 15411 update_pipes &= ~BIT(pipe); 15412 15413 intel_update_crtc(state, crtc); 15414 } 15415 15416 drm_WARN_ON(&dev_priv->drm, modeset_pipes); 15417 drm_WARN_ON(&dev_priv->drm, update_pipes); 15418 } 15419 15420 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) 15421 { 15422 struct intel_atomic_state *state, *next; 15423 struct llist_node *freed; 15424 15425 freed = llist_del_all(&dev_priv->atomic_helper.free_list); 15426 llist_for_each_entry_safe(state, next, freed, freed) 15427 drm_atomic_state_put(&state->base); 15428 } 15429 15430 static void intel_atomic_helper_free_state_worker(struct work_struct *work) 15431 { 15432 struct drm_i915_private *dev_priv = 15433 container_of(work, typeof(*dev_priv), atomic_helper.free_work); 15434 15435 intel_atomic_helper_free_state(dev_priv); 15436 } 15437 15438 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 15439 { 15440 struct wait_queue_entry wait_fence, wait_reset; 15441 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); 15442 15443 init_wait_entry(&wait_fence, 0); 15444 init_wait_entry(&wait_reset, 0); 15445 for (;;) { 15446 prepare_to_wait(&intel_state->commit_ready.wait, 15447 &wait_fence, TASK_UNINTERRUPTIBLE); 15448 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags, 15449 I915_RESET_MODESET), 15450 &wait_reset, TASK_UNINTERRUPTIBLE); 15451 15452 15453 if (i915_sw_fence_done(&intel_state->commit_ready) || 15454 test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) 15455 break; 15456 15457 schedule(); 15458 } 15459 finish_wait(&intel_state->commit_ready.wait, &wait_fence); 15460 finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags, 15461 I915_RESET_MODESET), 15462 &wait_reset); 15463 } 15464 15465 static void intel_cleanup_dsbs(struct intel_atomic_state *state) 15466 { 15467 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 15468 struct intel_crtc *crtc; 15469 int i; 15470 15471 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15472 new_crtc_state, i) 15473 intel_dsb_cleanup(old_crtc_state); 15474 } 15475 15476 static void intel_atomic_cleanup_work(struct work_struct *work) 15477 { 15478 struct intel_atomic_state *state = 15479 container_of(work, struct intel_atomic_state, base.commit_work); 15480 struct drm_i915_private *i915 = to_i915(state->base.dev); 15481 15482 intel_cleanup_dsbs(state); 15483 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base); 15484 drm_atomic_helper_commit_cleanup_done(&state->base); 15485 drm_atomic_state_put(&state->base); 15486 15487 intel_atomic_helper_free_state(i915); 15488 } 15489 15490 static void intel_atomic_commit_tail(struct intel_atomic_state *state) 15491 { 15492 struct drm_device *dev = state->base.dev; 15493 struct drm_i915_private *dev_priv = to_i915(dev); 15494 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 15495 struct intel_crtc *crtc; 15496 u64 put_domains[I915_MAX_PIPES] = {}; 15497 intel_wakeref_t wakeref = 0; 15498 int i; 15499 15500 intel_atomic_commit_fence_wait(state); 15501 15502 drm_atomic_helper_wait_for_dependencies(&state->base); 15503 15504 if (state->modeset) 15505 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 15506 15507 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15508 new_crtc_state, i) { 15509 if (needs_modeset(new_crtc_state) || 15510 new_crtc_state->update_pipe) { 15511 15512 put_domains[crtc->pipe] = 15513 modeset_get_crtc_power_domains(new_crtc_state); 15514 } 15515 } 15516 15517 intel_commit_modeset_disables(state); 15518 15519 /* FIXME: Eventually get rid of our crtc->config pointer */ 15520 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 15521 crtc->config = new_crtc_state; 15522 15523 if (state->modeset) { 15524 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); 15525 15526 intel_set_cdclk_pre_plane_update(state); 15527 15528 intel_modeset_verify_disabled(dev_priv, state); 15529 } 15530 15531 intel_sagv_pre_plane_update(state); 15532 15533 /* Complete the events for pipes that have now been disabled */ 15534 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15535 bool modeset = needs_modeset(new_crtc_state); 15536 15537 /* Complete events for now disable pipes here. */ 15538 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { 15539 spin_lock_irq(&dev->event_lock); 15540 drm_crtc_send_vblank_event(&crtc->base, 15541 new_crtc_state->uapi.event); 15542 spin_unlock_irq(&dev->event_lock); 15543 15544 new_crtc_state->uapi.event = NULL; 15545 } 15546 } 15547 15548 if (state->modeset) 15549 intel_encoders_update_prepare(state); 15550 15551 intel_dbuf_pre_plane_update(state); 15552 15553 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 15554 dev_priv->display.commit_modeset_enables(state); 15555 15556 if (state->modeset) { 15557 intel_encoders_update_complete(state); 15558 15559 intel_set_cdclk_post_plane_update(state); 15560 } 15561 15562 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 15563 * already, but still need the state for the delayed optimization. To 15564 * fix this: 15565 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 15566 * - schedule that vblank worker _before_ calling hw_done 15567 * - at the start of commit_tail, cancel it _synchrously 15568 * - switch over to the vblank wait helper in the core after that since 15569 * we don't need out special handling any more. 15570 */ 15571 drm_atomic_helper_wait_for_flip_done(dev, &state->base); 15572 15573 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15574 if (new_crtc_state->hw.active && 15575 !needs_modeset(new_crtc_state) && 15576 !new_crtc_state->preload_luts && 15577 (new_crtc_state->uapi.color_mgmt_changed || 15578 new_crtc_state->update_pipe)) 15579 intel_color_load_luts(new_crtc_state); 15580 } 15581 15582 /* 15583 * Now that the vblank has passed, we can go ahead and program the 15584 * optimal watermarks on platforms that need two-step watermark 15585 * programming. 15586 * 15587 * TODO: Move this (and other cleanup) to an async worker eventually. 15588 */ 15589 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15590 new_crtc_state, i) { 15591 /* 15592 * Gen2 reports pipe underruns whenever all planes are disabled. 15593 * So re-enable underrun reporting after some planes get enabled. 15594 * 15595 * We do this before .optimize_watermarks() so that we have a 15596 * chance of catching underruns with the intermediate watermarks 15597 * vs. the new plane configuration. 15598 */ 15599 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state)) 15600 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 15601 15602 if (dev_priv->display.optimize_watermarks) 15603 dev_priv->display.optimize_watermarks(state, crtc); 15604 } 15605 15606 intel_dbuf_post_plane_update(state); 15607 15608 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 15609 intel_post_plane_update(state, crtc); 15610 15611 if (put_domains[i]) 15612 modeset_put_power_domains(dev_priv, put_domains[i]); 15613 15614 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); 15615 15616 /* 15617 * DSB cleanup is done in cleanup_work aligning with framebuffer 15618 * cleanup. So copy and reset the dsb structure to sync with 15619 * commit_done and later do dsb cleanup in cleanup_work. 15620 */ 15621 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb); 15622 } 15623 15624 /* Underruns don't always raise interrupts, so check manually */ 15625 intel_check_cpu_fifo_underruns(dev_priv); 15626 intel_check_pch_fifo_underruns(dev_priv); 15627 15628 if (state->modeset) 15629 intel_verify_planes(state); 15630 15631 intel_sagv_post_plane_update(state); 15632 15633 drm_atomic_helper_commit_hw_done(&state->base); 15634 15635 if (state->modeset) { 15636 /* As one of the primary mmio accessors, KMS has a high 15637 * likelihood of triggering bugs in unclaimed access. After we 15638 * finish modesetting, see if an error has been flagged, and if 15639 * so enable debugging for the next modeset - and hope we catch 15640 * the culprit. 15641 */ 15642 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 15643 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); 15644 } 15645 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 15646 15647 /* 15648 * Defer the cleanup of the old state to a separate worker to not 15649 * impede the current task (userspace for blocking modesets) that 15650 * are executed inline. For out-of-line asynchronous modesets/flips, 15651 * deferring to a new worker seems overkill, but we would place a 15652 * schedule point (cond_resched()) here anyway to keep latencies 15653 * down. 15654 */ 15655 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work); 15656 queue_work(system_highpri_wq, &state->base.commit_work); 15657 } 15658 15659 static void intel_atomic_commit_work(struct work_struct *work) 15660 { 15661 struct intel_atomic_state *state = 15662 container_of(work, struct intel_atomic_state, base.commit_work); 15663 15664 intel_atomic_commit_tail(state); 15665 } 15666 15667 static int __i915_sw_fence_call 15668 intel_atomic_commit_ready(struct i915_sw_fence *fence, 15669 enum i915_sw_fence_notify notify) 15670 { 15671 struct intel_atomic_state *state = 15672 container_of(fence, struct intel_atomic_state, commit_ready); 15673 15674 switch (notify) { 15675 case FENCE_COMPLETE: 15676 /* we do blocking waits in the worker, nothing to do here */ 15677 break; 15678 case FENCE_FREE: 15679 { 15680 struct intel_atomic_helper *helper = 15681 &to_i915(state->base.dev)->atomic_helper; 15682 15683 if (llist_add(&state->freed, &helper->free_list)) 15684 schedule_work(&helper->free_work); 15685 break; 15686 } 15687 } 15688 15689 return NOTIFY_DONE; 15690 } 15691 15692 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 15693 { 15694 struct intel_plane_state *old_plane_state, *new_plane_state; 15695 struct intel_plane *plane; 15696 int i; 15697 15698 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 15699 new_plane_state, i) 15700 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 15701 to_intel_frontbuffer(new_plane_state->hw.fb), 15702 plane->frontbuffer_bit); 15703 } 15704 15705 static void assert_global_state_locked(struct drm_i915_private *dev_priv) 15706 { 15707 struct intel_crtc *crtc; 15708 15709 for_each_intel_crtc(&dev_priv->drm, crtc) 15710 drm_modeset_lock_assert_held(&crtc->base.mutex); 15711 } 15712 15713 static int intel_atomic_commit(struct drm_device *dev, 15714 struct drm_atomic_state *_state, 15715 bool nonblock) 15716 { 15717 struct intel_atomic_state *state = to_intel_atomic_state(_state); 15718 struct drm_i915_private *dev_priv = to_i915(dev); 15719 int ret = 0; 15720 15721 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 15722 15723 drm_atomic_state_get(&state->base); 15724 i915_sw_fence_init(&state->commit_ready, 15725 intel_atomic_commit_ready); 15726 15727 /* 15728 * The intel_legacy_cursor_update() fast path takes care 15729 * of avoiding the vblank waits for simple cursor 15730 * movement and flips. For cursor on/off and size changes, 15731 * we want to perform the vblank waits so that watermark 15732 * updates happen during the correct frames. Gen9+ have 15733 * double buffered watermarks and so shouldn't need this. 15734 * 15735 * Unset state->legacy_cursor_update before the call to 15736 * drm_atomic_helper_setup_commit() because otherwise 15737 * drm_atomic_helper_wait_for_flip_done() is a noop and 15738 * we get FIFO underruns because we didn't wait 15739 * for vblank. 15740 * 15741 * FIXME doing watermarks and fb cleanup from a vblank worker 15742 * (assuming we had any) would solve these problems. 15743 */ 15744 if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) { 15745 struct intel_crtc_state *new_crtc_state; 15746 struct intel_crtc *crtc; 15747 int i; 15748 15749 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 15750 if (new_crtc_state->wm.need_postvbl_update || 15751 new_crtc_state->update_wm_post) 15752 state->base.legacy_cursor_update = false; 15753 } 15754 15755 ret = intel_atomic_prepare_commit(state); 15756 if (ret) { 15757 drm_dbg_atomic(&dev_priv->drm, 15758 "Preparing state failed with %i\n", ret); 15759 i915_sw_fence_commit(&state->commit_ready); 15760 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 15761 return ret; 15762 } 15763 15764 ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 15765 if (!ret) 15766 ret = drm_atomic_helper_swap_state(&state->base, true); 15767 if (!ret) 15768 intel_atomic_swap_global_state(state); 15769 15770 if (ret) { 15771 struct intel_crtc_state *new_crtc_state; 15772 struct intel_crtc *crtc; 15773 int i; 15774 15775 i915_sw_fence_commit(&state->commit_ready); 15776 15777 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 15778 intel_dsb_cleanup(new_crtc_state); 15779 15780 drm_atomic_helper_cleanup_planes(dev, &state->base); 15781 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 15782 return ret; 15783 } 15784 dev_priv->wm.distrust_bios_wm = false; 15785 intel_shared_dpll_swap_state(state); 15786 intel_atomic_track_fbs(state); 15787 15788 if (state->global_state_changed) { 15789 assert_global_state_locked(dev_priv); 15790 15791 dev_priv->active_pipes = state->active_pipes; 15792 } 15793 15794 drm_atomic_state_get(&state->base); 15795 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 15796 15797 i915_sw_fence_commit(&state->commit_ready); 15798 if (nonblock && state->modeset) { 15799 queue_work(dev_priv->modeset_wq, &state->base.commit_work); 15800 } else if (nonblock) { 15801 queue_work(dev_priv->flip_wq, &state->base.commit_work); 15802 } else { 15803 if (state->modeset) 15804 flush_workqueue(dev_priv->modeset_wq); 15805 intel_atomic_commit_tail(state); 15806 } 15807 15808 return 0; 15809 } 15810 15811 struct wait_rps_boost { 15812 struct wait_queue_entry wait; 15813 15814 struct drm_crtc *crtc; 15815 struct i915_request *request; 15816 }; 15817 15818 static int do_rps_boost(struct wait_queue_entry *_wait, 15819 unsigned mode, int sync, void *key) 15820 { 15821 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); 15822 struct i915_request *rq = wait->request; 15823 15824 /* 15825 * If we missed the vblank, but the request is already running it 15826 * is reasonable to assume that it will complete before the next 15827 * vblank without our intervention, so leave RPS alone. 15828 */ 15829 if (!i915_request_started(rq)) 15830 intel_rps_boost(rq); 15831 i915_request_put(rq); 15832 15833 drm_crtc_vblank_put(wait->crtc); 15834 15835 list_del(&wait->wait.entry); 15836 kfree(wait); 15837 return 1; 15838 } 15839 15840 static void add_rps_boost_after_vblank(struct drm_crtc *crtc, 15841 struct dma_fence *fence) 15842 { 15843 struct wait_rps_boost *wait; 15844 15845 if (!dma_fence_is_i915(fence)) 15846 return; 15847 15848 if (INTEL_GEN(to_i915(crtc->dev)) < 6) 15849 return; 15850 15851 if (drm_crtc_vblank_get(crtc)) 15852 return; 15853 15854 wait = kmalloc(sizeof(*wait), GFP_KERNEL); 15855 if (!wait) { 15856 drm_crtc_vblank_put(crtc); 15857 return; 15858 } 15859 15860 wait->request = to_request(dma_fence_get(fence)); 15861 wait->crtc = crtc; 15862 15863 wait->wait.func = do_rps_boost; 15864 wait->wait.flags = 0; 15865 15866 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); 15867 } 15868 15869 static int intel_plane_pin_fb(struct intel_plane_state *plane_state) 15870 { 15871 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 15872 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 15873 struct drm_framebuffer *fb = plane_state->hw.fb; 15874 struct i915_vma *vma; 15875 15876 if (plane->id == PLANE_CURSOR && 15877 INTEL_INFO(dev_priv)->display.cursor_needs_physical) { 15878 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 15879 const int align = intel_cursor_alignment(dev_priv); 15880 int err; 15881 15882 err = i915_gem_object_attach_phys(obj, align); 15883 if (err) 15884 return err; 15885 } 15886 15887 vma = intel_pin_and_fence_fb_obj(fb, 15888 &plane_state->view, 15889 intel_plane_uses_fence(plane_state), 15890 &plane_state->flags); 15891 if (IS_ERR(vma)) 15892 return PTR_ERR(vma); 15893 15894 plane_state->vma = vma; 15895 15896 return 0; 15897 } 15898 15899 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) 15900 { 15901 struct i915_vma *vma; 15902 15903 vma = fetch_and_zero(&old_plane_state->vma); 15904 if (vma) 15905 intel_unpin_fb_vma(vma, old_plane_state->flags); 15906 } 15907 15908 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj) 15909 { 15910 struct i915_sched_attr attr = { 15911 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY), 15912 }; 15913 15914 i915_gem_object_wait_priority(obj, 0, &attr); 15915 } 15916 15917 /** 15918 * intel_prepare_plane_fb - Prepare fb for usage on plane 15919 * @_plane: drm plane to prepare for 15920 * @_new_plane_state: the plane state being prepared 15921 * 15922 * Prepares a framebuffer for usage on a display plane. Generally this 15923 * involves pinning the underlying object and updating the frontbuffer tracking 15924 * bits. Some older platforms need special physical address handling for 15925 * cursor planes. 15926 * 15927 * Returns 0 on success, negative error code on failure. 15928 */ 15929 int 15930 intel_prepare_plane_fb(struct drm_plane *_plane, 15931 struct drm_plane_state *_new_plane_state) 15932 { 15933 struct intel_plane *plane = to_intel_plane(_plane); 15934 struct intel_plane_state *new_plane_state = 15935 to_intel_plane_state(_new_plane_state); 15936 struct intel_atomic_state *state = 15937 to_intel_atomic_state(new_plane_state->uapi.state); 15938 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 15939 const struct intel_plane_state *old_plane_state = 15940 intel_atomic_get_old_plane_state(state, plane); 15941 struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb); 15942 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb); 15943 int ret; 15944 15945 if (old_obj) { 15946 const struct intel_crtc_state *crtc_state = 15947 intel_atomic_get_new_crtc_state(state, 15948 to_intel_crtc(old_plane_state->hw.crtc)); 15949 15950 /* Big Hammer, we also need to ensure that any pending 15951 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 15952 * current scanout is retired before unpinning the old 15953 * framebuffer. Note that we rely on userspace rendering 15954 * into the buffer attached to the pipe they are waiting 15955 * on. If not, userspace generates a GPU hang with IPEHR 15956 * point to the MI_WAIT_FOR_EVENT. 15957 * 15958 * This should only fail upon a hung GPU, in which case we 15959 * can safely continue. 15960 */ 15961 if (needs_modeset(crtc_state)) { 15962 ret = i915_sw_fence_await_reservation(&state->commit_ready, 15963 old_obj->base.resv, NULL, 15964 false, 0, 15965 GFP_KERNEL); 15966 if (ret < 0) 15967 return ret; 15968 } 15969 } 15970 15971 if (new_plane_state->uapi.fence) { /* explicit fencing */ 15972 ret = i915_sw_fence_await_dma_fence(&state->commit_ready, 15973 new_plane_state->uapi.fence, 15974 i915_fence_timeout(dev_priv), 15975 GFP_KERNEL); 15976 if (ret < 0) 15977 return ret; 15978 } 15979 15980 if (!obj) 15981 return 0; 15982 15983 ret = i915_gem_object_pin_pages(obj); 15984 if (ret) 15985 return ret; 15986 15987 ret = intel_plane_pin_fb(new_plane_state); 15988 15989 i915_gem_object_unpin_pages(obj); 15990 if (ret) 15991 return ret; 15992 15993 fb_obj_bump_render_priority(obj); 15994 i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB); 15995 15996 if (!new_plane_state->uapi.fence) { /* implicit fencing */ 15997 struct dma_fence *fence; 15998 15999 ret = i915_sw_fence_await_reservation(&state->commit_ready, 16000 obj->base.resv, NULL, 16001 false, 16002 i915_fence_timeout(dev_priv), 16003 GFP_KERNEL); 16004 if (ret < 0) 16005 goto unpin_fb; 16006 16007 fence = dma_resv_get_excl_rcu(obj->base.resv); 16008 if (fence) { 16009 add_rps_boost_after_vblank(new_plane_state->hw.crtc, 16010 fence); 16011 dma_fence_put(fence); 16012 } 16013 } else { 16014 add_rps_boost_after_vblank(new_plane_state->hw.crtc, 16015 new_plane_state->uapi.fence); 16016 } 16017 16018 /* 16019 * We declare pageflips to be interactive and so merit a small bias 16020 * towards upclocking to deliver the frame on time. By only changing 16021 * the RPS thresholds to sample more regularly and aim for higher 16022 * clocks we can hopefully deliver low power workloads (like kodi) 16023 * that are not quite steady state without resorting to forcing 16024 * maximum clocks following a vblank miss (see do_rps_boost()). 16025 */ 16026 if (!state->rps_interactive) { 16027 intel_rps_mark_interactive(&dev_priv->gt.rps, true); 16028 state->rps_interactive = true; 16029 } 16030 16031 return 0; 16032 16033 unpin_fb: 16034 intel_plane_unpin_fb(new_plane_state); 16035 16036 return ret; 16037 } 16038 16039 /** 16040 * intel_cleanup_plane_fb - Cleans up an fb after plane use 16041 * @plane: drm plane to clean up for 16042 * @_old_plane_state: the state from the previous modeset 16043 * 16044 * Cleans up a framebuffer that has just been removed from a plane. 16045 */ 16046 void 16047 intel_cleanup_plane_fb(struct drm_plane *plane, 16048 struct drm_plane_state *_old_plane_state) 16049 { 16050 struct intel_plane_state *old_plane_state = 16051 to_intel_plane_state(_old_plane_state); 16052 struct intel_atomic_state *state = 16053 to_intel_atomic_state(old_plane_state->uapi.state); 16054 struct drm_i915_private *dev_priv = to_i915(plane->dev); 16055 struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb); 16056 16057 if (!obj) 16058 return; 16059 16060 if (state->rps_interactive) { 16061 intel_rps_mark_interactive(&dev_priv->gt.rps, false); 16062 state->rps_interactive = false; 16063 } 16064 16065 /* Should only be called after a successful intel_prepare_plane_fb()! */ 16066 intel_plane_unpin_fb(old_plane_state); 16067 } 16068 16069 /** 16070 * intel_plane_destroy - destroy a plane 16071 * @plane: plane to destroy 16072 * 16073 * Common destruction function for all types of planes (primary, cursor, 16074 * sprite). 16075 */ 16076 void intel_plane_destroy(struct drm_plane *plane) 16077 { 16078 drm_plane_cleanup(plane); 16079 kfree(to_intel_plane(plane)); 16080 } 16081 16082 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, 16083 u32 format, u64 modifier) 16084 { 16085 switch (modifier) { 16086 case DRM_FORMAT_MOD_LINEAR: 16087 case I915_FORMAT_MOD_X_TILED: 16088 break; 16089 default: 16090 return false; 16091 } 16092 16093 switch (format) { 16094 case DRM_FORMAT_C8: 16095 case DRM_FORMAT_RGB565: 16096 case DRM_FORMAT_XRGB1555: 16097 case DRM_FORMAT_XRGB8888: 16098 return modifier == DRM_FORMAT_MOD_LINEAR || 16099 modifier == I915_FORMAT_MOD_X_TILED; 16100 default: 16101 return false; 16102 } 16103 } 16104 16105 static bool i965_plane_format_mod_supported(struct drm_plane *_plane, 16106 u32 format, u64 modifier) 16107 { 16108 switch (modifier) { 16109 case DRM_FORMAT_MOD_LINEAR: 16110 case I915_FORMAT_MOD_X_TILED: 16111 break; 16112 default: 16113 return false; 16114 } 16115 16116 switch (format) { 16117 case DRM_FORMAT_C8: 16118 case DRM_FORMAT_RGB565: 16119 case DRM_FORMAT_XRGB8888: 16120 case DRM_FORMAT_XBGR8888: 16121 case DRM_FORMAT_ARGB8888: 16122 case DRM_FORMAT_ABGR8888: 16123 case DRM_FORMAT_XRGB2101010: 16124 case DRM_FORMAT_XBGR2101010: 16125 case DRM_FORMAT_ARGB2101010: 16126 case DRM_FORMAT_ABGR2101010: 16127 case DRM_FORMAT_XBGR16161616F: 16128 return modifier == DRM_FORMAT_MOD_LINEAR || 16129 modifier == I915_FORMAT_MOD_X_TILED; 16130 default: 16131 return false; 16132 } 16133 } 16134 16135 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, 16136 u32 format, u64 modifier) 16137 { 16138 return modifier == DRM_FORMAT_MOD_LINEAR && 16139 format == DRM_FORMAT_ARGB8888; 16140 } 16141 16142 static const struct drm_plane_funcs i965_plane_funcs = { 16143 .update_plane = drm_atomic_helper_update_plane, 16144 .disable_plane = drm_atomic_helper_disable_plane, 16145 .destroy = intel_plane_destroy, 16146 .atomic_duplicate_state = intel_plane_duplicate_state, 16147 .atomic_destroy_state = intel_plane_destroy_state, 16148 .format_mod_supported = i965_plane_format_mod_supported, 16149 }; 16150 16151 static const struct drm_plane_funcs i8xx_plane_funcs = { 16152 .update_plane = drm_atomic_helper_update_plane, 16153 .disable_plane = drm_atomic_helper_disable_plane, 16154 .destroy = intel_plane_destroy, 16155 .atomic_duplicate_state = intel_plane_duplicate_state, 16156 .atomic_destroy_state = intel_plane_destroy_state, 16157 .format_mod_supported = i8xx_plane_format_mod_supported, 16158 }; 16159 16160 static int 16161 intel_legacy_cursor_update(struct drm_plane *_plane, 16162 struct drm_crtc *_crtc, 16163 struct drm_framebuffer *fb, 16164 int crtc_x, int crtc_y, 16165 unsigned int crtc_w, unsigned int crtc_h, 16166 u32 src_x, u32 src_y, 16167 u32 src_w, u32 src_h, 16168 struct drm_modeset_acquire_ctx *ctx) 16169 { 16170 struct intel_plane *plane = to_intel_plane(_plane); 16171 struct intel_crtc *crtc = to_intel_crtc(_crtc); 16172 struct intel_plane_state *old_plane_state = 16173 to_intel_plane_state(plane->base.state); 16174 struct intel_plane_state *new_plane_state; 16175 struct intel_crtc_state *crtc_state = 16176 to_intel_crtc_state(crtc->base.state); 16177 struct intel_crtc_state *new_crtc_state; 16178 int ret; 16179 16180 /* 16181 * When crtc is inactive or there is a modeset pending, 16182 * wait for it to complete in the slowpath 16183 */ 16184 if (!crtc_state->hw.active || needs_modeset(crtc_state) || 16185 crtc_state->update_pipe) 16186 goto slow; 16187 16188 /* 16189 * Don't do an async update if there is an outstanding commit modifying 16190 * the plane. This prevents our async update's changes from getting 16191 * overridden by a previous synchronous update's state. 16192 */ 16193 if (old_plane_state->uapi.commit && 16194 !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done)) 16195 goto slow; 16196 16197 /* 16198 * If any parameters change that may affect watermarks, 16199 * take the slowpath. Only changing fb or position should be 16200 * in the fastpath. 16201 */ 16202 if (old_plane_state->uapi.crtc != &crtc->base || 16203 old_plane_state->uapi.src_w != src_w || 16204 old_plane_state->uapi.src_h != src_h || 16205 old_plane_state->uapi.crtc_w != crtc_w || 16206 old_plane_state->uapi.crtc_h != crtc_h || 16207 !old_plane_state->uapi.fb != !fb) 16208 goto slow; 16209 16210 new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base)); 16211 if (!new_plane_state) 16212 return -ENOMEM; 16213 16214 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base)); 16215 if (!new_crtc_state) { 16216 ret = -ENOMEM; 16217 goto out_free; 16218 } 16219 16220 drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb); 16221 16222 new_plane_state->uapi.src_x = src_x; 16223 new_plane_state->uapi.src_y = src_y; 16224 new_plane_state->uapi.src_w = src_w; 16225 new_plane_state->uapi.src_h = src_h; 16226 new_plane_state->uapi.crtc_x = crtc_x; 16227 new_plane_state->uapi.crtc_y = crtc_y; 16228 new_plane_state->uapi.crtc_w = crtc_w; 16229 new_plane_state->uapi.crtc_h = crtc_h; 16230 16231 intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state); 16232 16233 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state, 16234 old_plane_state, new_plane_state); 16235 if (ret) 16236 goto out_free; 16237 16238 ret = intel_plane_pin_fb(new_plane_state); 16239 if (ret) 16240 goto out_free; 16241 16242 intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb), 16243 ORIGIN_FLIP); 16244 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 16245 to_intel_frontbuffer(new_plane_state->hw.fb), 16246 plane->frontbuffer_bit); 16247 16248 /* Swap plane state */ 16249 plane->base.state = &new_plane_state->uapi; 16250 16251 /* 16252 * We cannot swap crtc_state as it may be in use by an atomic commit or 16253 * page flip that's running simultaneously. If we swap crtc_state and 16254 * destroy the old state, we will cause a use-after-free there. 16255 * 16256 * Only update active_planes, which is needed for our internal 16257 * bookkeeping. Either value will do the right thing when updating 16258 * planes atomically. If the cursor was part of the atomic update then 16259 * we would have taken the slowpath. 16260 */ 16261 crtc_state->active_planes = new_crtc_state->active_planes; 16262 16263 if (new_plane_state->uapi.visible) 16264 intel_update_plane(plane, crtc_state, new_plane_state); 16265 else 16266 intel_disable_plane(plane, crtc_state); 16267 16268 intel_plane_unpin_fb(old_plane_state); 16269 16270 out_free: 16271 if (new_crtc_state) 16272 intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi); 16273 if (ret) 16274 intel_plane_destroy_state(&plane->base, &new_plane_state->uapi); 16275 else 16276 intel_plane_destroy_state(&plane->base, &old_plane_state->uapi); 16277 return ret; 16278 16279 slow: 16280 return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb, 16281 crtc_x, crtc_y, crtc_w, crtc_h, 16282 src_x, src_y, src_w, src_h, ctx); 16283 } 16284 16285 static const struct drm_plane_funcs intel_cursor_plane_funcs = { 16286 .update_plane = intel_legacy_cursor_update, 16287 .disable_plane = drm_atomic_helper_disable_plane, 16288 .destroy = intel_plane_destroy, 16289 .atomic_duplicate_state = intel_plane_duplicate_state, 16290 .atomic_destroy_state = intel_plane_destroy_state, 16291 .format_mod_supported = intel_cursor_format_mod_supported, 16292 }; 16293 16294 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, 16295 enum i9xx_plane_id i9xx_plane) 16296 { 16297 if (!HAS_FBC(dev_priv)) 16298 return false; 16299 16300 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 16301 return i9xx_plane == PLANE_A; /* tied to pipe A */ 16302 else if (IS_IVYBRIDGE(dev_priv)) 16303 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B || 16304 i9xx_plane == PLANE_C; 16305 else if (INTEL_GEN(dev_priv) >= 4) 16306 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B; 16307 else 16308 return i9xx_plane == PLANE_A; 16309 } 16310 16311 static struct intel_plane * 16312 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) 16313 { 16314 struct intel_plane *plane; 16315 const struct drm_plane_funcs *plane_funcs; 16316 unsigned int supported_rotations; 16317 const u32 *formats; 16318 int num_formats; 16319 int ret, zpos; 16320 16321 if (INTEL_GEN(dev_priv) >= 9) 16322 return skl_universal_plane_create(dev_priv, pipe, 16323 PLANE_PRIMARY); 16324 16325 plane = intel_plane_alloc(); 16326 if (IS_ERR(plane)) 16327 return plane; 16328 16329 plane->pipe = pipe; 16330 /* 16331 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS 16332 * port is hooked to pipe B. Hence we want plane A feeding pipe B. 16333 */ 16334 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 && 16335 INTEL_NUM_PIPES(dev_priv) == 2) 16336 plane->i9xx_plane = (enum i9xx_plane_id) !pipe; 16337 else 16338 plane->i9xx_plane = (enum i9xx_plane_id) pipe; 16339 plane->id = PLANE_PRIMARY; 16340 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); 16341 16342 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane); 16343 if (plane->has_fbc) { 16344 struct intel_fbc *fbc = &dev_priv->fbc; 16345 16346 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; 16347 } 16348 16349 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 16350 formats = vlv_primary_formats; 16351 num_formats = ARRAY_SIZE(vlv_primary_formats); 16352 } else if (INTEL_GEN(dev_priv) >= 4) { 16353 /* 16354 * WaFP16GammaEnabling:ivb 16355 * "Workaround : When using the 64-bit format, the plane 16356 * output on each color channel has one quarter amplitude. 16357 * It can be brought up to full amplitude by using pipe 16358 * gamma correction or pipe color space conversion to 16359 * multiply the plane output by four." 16360 * 16361 * There is no dedicated plane gamma for the primary plane, 16362 * and using the pipe gamma/csc could conflict with other 16363 * planes, so we choose not to expose fp16 on IVB primary 16364 * planes. HSW primary planes no longer have this problem. 16365 */ 16366 if (IS_IVYBRIDGE(dev_priv)) { 16367 formats = ivb_primary_formats; 16368 num_formats = ARRAY_SIZE(ivb_primary_formats); 16369 } else { 16370 formats = i965_primary_formats; 16371 num_formats = ARRAY_SIZE(i965_primary_formats); 16372 } 16373 } else { 16374 formats = i8xx_primary_formats; 16375 num_formats = ARRAY_SIZE(i8xx_primary_formats); 16376 } 16377 16378 if (INTEL_GEN(dev_priv) >= 4) 16379 plane_funcs = &i965_plane_funcs; 16380 else 16381 plane_funcs = &i8xx_plane_funcs; 16382 16383 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 16384 plane->min_cdclk = vlv_plane_min_cdclk; 16385 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 16386 plane->min_cdclk = hsw_plane_min_cdclk; 16387 else if (IS_IVYBRIDGE(dev_priv)) 16388 plane->min_cdclk = ivb_plane_min_cdclk; 16389 else 16390 plane->min_cdclk = i9xx_plane_min_cdclk; 16391 16392 plane->max_stride = i9xx_plane_max_stride; 16393 plane->update_plane = i9xx_update_plane; 16394 plane->disable_plane = i9xx_disable_plane; 16395 plane->get_hw_state = i9xx_plane_get_hw_state; 16396 plane->check_plane = i9xx_plane_check; 16397 16398 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 16399 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 16400 0, plane_funcs, 16401 formats, num_formats, 16402 i9xx_format_modifiers, 16403 DRM_PLANE_TYPE_PRIMARY, 16404 "primary %c", pipe_name(pipe)); 16405 else 16406 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 16407 0, plane_funcs, 16408 formats, num_formats, 16409 i9xx_format_modifiers, 16410 DRM_PLANE_TYPE_PRIMARY, 16411 "plane %c", 16412 plane_name(plane->i9xx_plane)); 16413 if (ret) 16414 goto fail; 16415 16416 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 16417 supported_rotations = 16418 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | 16419 DRM_MODE_REFLECT_X; 16420 } else if (INTEL_GEN(dev_priv) >= 4) { 16421 supported_rotations = 16422 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; 16423 } else { 16424 supported_rotations = DRM_MODE_ROTATE_0; 16425 } 16426 16427 if (INTEL_GEN(dev_priv) >= 4) 16428 drm_plane_create_rotation_property(&plane->base, 16429 DRM_MODE_ROTATE_0, 16430 supported_rotations); 16431 16432 zpos = 0; 16433 drm_plane_create_zpos_immutable_property(&plane->base, zpos); 16434 16435 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); 16436 16437 return plane; 16438 16439 fail: 16440 intel_plane_free(plane); 16441 16442 return ERR_PTR(ret); 16443 } 16444 16445 static struct intel_plane * 16446 intel_cursor_plane_create(struct drm_i915_private *dev_priv, 16447 enum pipe pipe) 16448 { 16449 struct intel_plane *cursor; 16450 int ret, zpos; 16451 16452 cursor = intel_plane_alloc(); 16453 if (IS_ERR(cursor)) 16454 return cursor; 16455 16456 cursor->pipe = pipe; 16457 cursor->i9xx_plane = (enum i9xx_plane_id) pipe; 16458 cursor->id = PLANE_CURSOR; 16459 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id); 16460 16461 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 16462 cursor->max_stride = i845_cursor_max_stride; 16463 cursor->update_plane = i845_update_cursor; 16464 cursor->disable_plane = i845_disable_cursor; 16465 cursor->get_hw_state = i845_cursor_get_hw_state; 16466 cursor->check_plane = i845_check_cursor; 16467 } else { 16468 cursor->max_stride = i9xx_cursor_max_stride; 16469 cursor->update_plane = i9xx_update_cursor; 16470 cursor->disable_plane = i9xx_disable_cursor; 16471 cursor->get_hw_state = i9xx_cursor_get_hw_state; 16472 cursor->check_plane = i9xx_check_cursor; 16473 } 16474 16475 cursor->cursor.base = ~0; 16476 cursor->cursor.cntl = ~0; 16477 16478 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) 16479 cursor->cursor.size = ~0; 16480 16481 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 16482 0, &intel_cursor_plane_funcs, 16483 intel_cursor_formats, 16484 ARRAY_SIZE(intel_cursor_formats), 16485 cursor_format_modifiers, 16486 DRM_PLANE_TYPE_CURSOR, 16487 "cursor %c", pipe_name(pipe)); 16488 if (ret) 16489 goto fail; 16490 16491 if (INTEL_GEN(dev_priv) >= 4) 16492 drm_plane_create_rotation_property(&cursor->base, 16493 DRM_MODE_ROTATE_0, 16494 DRM_MODE_ROTATE_0 | 16495 DRM_MODE_ROTATE_180); 16496 16497 zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1; 16498 drm_plane_create_zpos_immutable_property(&cursor->base, zpos); 16499 16500 if (INTEL_GEN(dev_priv) >= 12) 16501 drm_plane_enable_fb_damage_clips(&cursor->base); 16502 16503 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 16504 16505 return cursor; 16506 16507 fail: 16508 intel_plane_free(cursor); 16509 16510 return ERR_PTR(ret); 16511 } 16512 16513 #define INTEL_CRTC_FUNCS \ 16514 .gamma_set = drm_atomic_helper_legacy_gamma_set, \ 16515 .set_config = drm_atomic_helper_set_config, \ 16516 .destroy = intel_crtc_destroy, \ 16517 .page_flip = drm_atomic_helper_page_flip, \ 16518 .atomic_duplicate_state = intel_crtc_duplicate_state, \ 16519 .atomic_destroy_state = intel_crtc_destroy_state, \ 16520 .set_crc_source = intel_crtc_set_crc_source, \ 16521 .verify_crc_source = intel_crtc_verify_crc_source, \ 16522 .get_crc_sources = intel_crtc_get_crc_sources 16523 16524 static const struct drm_crtc_funcs bdw_crtc_funcs = { 16525 INTEL_CRTC_FUNCS, 16526 16527 .get_vblank_counter = g4x_get_vblank_counter, 16528 .enable_vblank = bdw_enable_vblank, 16529 .disable_vblank = bdw_disable_vblank, 16530 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16531 }; 16532 16533 static const struct drm_crtc_funcs ilk_crtc_funcs = { 16534 INTEL_CRTC_FUNCS, 16535 16536 .get_vblank_counter = g4x_get_vblank_counter, 16537 .enable_vblank = ilk_enable_vblank, 16538 .disable_vblank = ilk_disable_vblank, 16539 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16540 }; 16541 16542 static const struct drm_crtc_funcs g4x_crtc_funcs = { 16543 INTEL_CRTC_FUNCS, 16544 16545 .get_vblank_counter = g4x_get_vblank_counter, 16546 .enable_vblank = i965_enable_vblank, 16547 .disable_vblank = i965_disable_vblank, 16548 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16549 }; 16550 16551 static const struct drm_crtc_funcs i965_crtc_funcs = { 16552 INTEL_CRTC_FUNCS, 16553 16554 .get_vblank_counter = i915_get_vblank_counter, 16555 .enable_vblank = i965_enable_vblank, 16556 .disable_vblank = i965_disable_vblank, 16557 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16558 }; 16559 16560 static const struct drm_crtc_funcs i915gm_crtc_funcs = { 16561 INTEL_CRTC_FUNCS, 16562 16563 .get_vblank_counter = i915_get_vblank_counter, 16564 .enable_vblank = i915gm_enable_vblank, 16565 .disable_vblank = i915gm_disable_vblank, 16566 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16567 }; 16568 16569 static const struct drm_crtc_funcs i915_crtc_funcs = { 16570 INTEL_CRTC_FUNCS, 16571 16572 .get_vblank_counter = i915_get_vblank_counter, 16573 .enable_vblank = i8xx_enable_vblank, 16574 .disable_vblank = i8xx_disable_vblank, 16575 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16576 }; 16577 16578 static const struct drm_crtc_funcs i8xx_crtc_funcs = { 16579 INTEL_CRTC_FUNCS, 16580 16581 /* no hw vblank counter */ 16582 .enable_vblank = i8xx_enable_vblank, 16583 .disable_vblank = i8xx_disable_vblank, 16584 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16585 }; 16586 16587 static struct intel_crtc *intel_crtc_alloc(void) 16588 { 16589 struct intel_crtc_state *crtc_state; 16590 struct intel_crtc *crtc; 16591 16592 crtc = kzalloc(sizeof(*crtc), GFP_KERNEL); 16593 if (!crtc) 16594 return ERR_PTR(-ENOMEM); 16595 16596 crtc_state = intel_crtc_state_alloc(crtc); 16597 if (!crtc_state) { 16598 kfree(crtc); 16599 return ERR_PTR(-ENOMEM); 16600 } 16601 16602 crtc->base.state = &crtc_state->uapi; 16603 crtc->config = crtc_state; 16604 16605 return crtc; 16606 } 16607 16608 static void intel_crtc_free(struct intel_crtc *crtc) 16609 { 16610 intel_crtc_destroy_state(&crtc->base, crtc->base.state); 16611 kfree(crtc); 16612 } 16613 16614 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv) 16615 { 16616 struct intel_plane *plane; 16617 16618 for_each_intel_plane(&dev_priv->drm, plane) { 16619 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, 16620 plane->pipe); 16621 16622 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base); 16623 } 16624 } 16625 16626 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) 16627 { 16628 struct intel_plane *primary, *cursor; 16629 const struct drm_crtc_funcs *funcs; 16630 struct intel_crtc *crtc; 16631 int sprite, ret; 16632 16633 crtc = intel_crtc_alloc(); 16634 if (IS_ERR(crtc)) 16635 return PTR_ERR(crtc); 16636 16637 crtc->pipe = pipe; 16638 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe]; 16639 16640 primary = intel_primary_plane_create(dev_priv, pipe); 16641 if (IS_ERR(primary)) { 16642 ret = PTR_ERR(primary); 16643 goto fail; 16644 } 16645 crtc->plane_ids_mask |= BIT(primary->id); 16646 16647 for_each_sprite(dev_priv, pipe, sprite) { 16648 struct intel_plane *plane; 16649 16650 plane = intel_sprite_plane_create(dev_priv, pipe, sprite); 16651 if (IS_ERR(plane)) { 16652 ret = PTR_ERR(plane); 16653 goto fail; 16654 } 16655 crtc->plane_ids_mask |= BIT(plane->id); 16656 } 16657 16658 cursor = intel_cursor_plane_create(dev_priv, pipe); 16659 if (IS_ERR(cursor)) { 16660 ret = PTR_ERR(cursor); 16661 goto fail; 16662 } 16663 crtc->plane_ids_mask |= BIT(cursor->id); 16664 16665 if (HAS_GMCH(dev_priv)) { 16666 if (IS_CHERRYVIEW(dev_priv) || 16667 IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv)) 16668 funcs = &g4x_crtc_funcs; 16669 else if (IS_GEN(dev_priv, 4)) 16670 funcs = &i965_crtc_funcs; 16671 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv)) 16672 funcs = &i915gm_crtc_funcs; 16673 else if (IS_GEN(dev_priv, 3)) 16674 funcs = &i915_crtc_funcs; 16675 else 16676 funcs = &i8xx_crtc_funcs; 16677 } else { 16678 if (INTEL_GEN(dev_priv) >= 8) 16679 funcs = &bdw_crtc_funcs; 16680 else 16681 funcs = &ilk_crtc_funcs; 16682 } 16683 16684 ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base, 16685 &primary->base, &cursor->base, 16686 funcs, "pipe %c", pipe_name(pipe)); 16687 if (ret) 16688 goto fail; 16689 16690 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) || 16691 dev_priv->pipe_to_crtc_mapping[pipe] != NULL); 16692 dev_priv->pipe_to_crtc_mapping[pipe] = crtc; 16693 16694 if (INTEL_GEN(dev_priv) < 9) { 16695 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane; 16696 16697 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 16698 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL); 16699 dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc; 16700 } 16701 16702 intel_color_init(crtc); 16703 16704 intel_crtc_crc_init(crtc); 16705 16706 drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe); 16707 16708 return 0; 16709 16710 fail: 16711 intel_crtc_free(crtc); 16712 16713 return ret; 16714 } 16715 16716 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 16717 struct drm_file *file) 16718 { 16719 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 16720 struct drm_crtc *drmmode_crtc; 16721 struct intel_crtc *crtc; 16722 16723 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); 16724 if (!drmmode_crtc) 16725 return -ENOENT; 16726 16727 crtc = to_intel_crtc(drmmode_crtc); 16728 pipe_from_crtc_id->pipe = crtc->pipe; 16729 16730 return 0; 16731 } 16732 16733 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) 16734 { 16735 struct drm_device *dev = encoder->base.dev; 16736 struct intel_encoder *source_encoder; 16737 u32 possible_clones = 0; 16738 16739 for_each_intel_encoder(dev, source_encoder) { 16740 if (encoders_cloneable(encoder, source_encoder)) 16741 possible_clones |= drm_encoder_mask(&source_encoder->base); 16742 } 16743 16744 return possible_clones; 16745 } 16746 16747 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) 16748 { 16749 struct drm_device *dev = encoder->base.dev; 16750 struct intel_crtc *crtc; 16751 u32 possible_crtcs = 0; 16752 16753 for_each_intel_crtc(dev, crtc) { 16754 if (encoder->pipe_mask & BIT(crtc->pipe)) 16755 possible_crtcs |= drm_crtc_mask(&crtc->base); 16756 } 16757 16758 return possible_crtcs; 16759 } 16760 16761 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) 16762 { 16763 if (!IS_MOBILE(dev_priv)) 16764 return false; 16765 16766 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0) 16767 return false; 16768 16769 if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE)) 16770 return false; 16771 16772 return true; 16773 } 16774 16775 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) 16776 { 16777 if (INTEL_GEN(dev_priv) >= 9) 16778 return false; 16779 16780 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) 16781 return false; 16782 16783 if (HAS_PCH_LPT_H(dev_priv) && 16784 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 16785 return false; 16786 16787 /* DDI E can't be used if DDI A requires 4 lanes */ 16788 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 16789 return false; 16790 16791 if (!dev_priv->vbt.int_crt_support) 16792 return false; 16793 16794 return true; 16795 } 16796 16797 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) 16798 { 16799 int pps_num; 16800 int pps_idx; 16801 16802 if (HAS_DDI(dev_priv)) 16803 return; 16804 /* 16805 * This w/a is needed at least on CPT/PPT, but to be sure apply it 16806 * everywhere where registers can be write protected. 16807 */ 16808 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 16809 pps_num = 2; 16810 else 16811 pps_num = 1; 16812 16813 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { 16814 u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx)); 16815 16816 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS; 16817 intel_de_write(dev_priv, PP_CONTROL(pps_idx), val); 16818 } 16819 } 16820 16821 static void intel_pps_init(struct drm_i915_private *dev_priv) 16822 { 16823 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv)) 16824 dev_priv->pps_mmio_base = PCH_PPS_BASE; 16825 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 16826 dev_priv->pps_mmio_base = VLV_PPS_BASE; 16827 else 16828 dev_priv->pps_mmio_base = PPS_BASE; 16829 16830 intel_pps_unlock_regs_wa(dev_priv); 16831 } 16832 16833 static void intel_setup_outputs(struct drm_i915_private *dev_priv) 16834 { 16835 struct intel_encoder *encoder; 16836 bool dpd_is_edp = false; 16837 16838 intel_pps_init(dev_priv); 16839 16840 if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) 16841 return; 16842 16843 if (IS_ROCKETLAKE(dev_priv)) { 16844 intel_ddi_init(dev_priv, PORT_A); 16845 intel_ddi_init(dev_priv, PORT_B); 16846 intel_ddi_init(dev_priv, PORT_D); /* DDI TC1 */ 16847 intel_ddi_init(dev_priv, PORT_E); /* DDI TC2 */ 16848 } else if (INTEL_GEN(dev_priv) >= 12) { 16849 intel_ddi_init(dev_priv, PORT_A); 16850 intel_ddi_init(dev_priv, PORT_B); 16851 intel_ddi_init(dev_priv, PORT_D); 16852 intel_ddi_init(dev_priv, PORT_E); 16853 intel_ddi_init(dev_priv, PORT_F); 16854 intel_ddi_init(dev_priv, PORT_G); 16855 intel_ddi_init(dev_priv, PORT_H); 16856 intel_ddi_init(dev_priv, PORT_I); 16857 icl_dsi_init(dev_priv); 16858 } else if (IS_ELKHARTLAKE(dev_priv)) { 16859 intel_ddi_init(dev_priv, PORT_A); 16860 intel_ddi_init(dev_priv, PORT_B); 16861 intel_ddi_init(dev_priv, PORT_C); 16862 intel_ddi_init(dev_priv, PORT_D); 16863 icl_dsi_init(dev_priv); 16864 } else if (IS_GEN(dev_priv, 11)) { 16865 intel_ddi_init(dev_priv, PORT_A); 16866 intel_ddi_init(dev_priv, PORT_B); 16867 intel_ddi_init(dev_priv, PORT_C); 16868 intel_ddi_init(dev_priv, PORT_D); 16869 intel_ddi_init(dev_priv, PORT_E); 16870 /* 16871 * On some ICL SKUs port F is not present. No strap bits for 16872 * this, so rely on VBT. 16873 * Work around broken VBTs on SKUs known to have no port F. 16874 */ 16875 if (IS_ICL_WITH_PORT_F(dev_priv) && 16876 intel_bios_is_port_present(dev_priv, PORT_F)) 16877 intel_ddi_init(dev_priv, PORT_F); 16878 16879 icl_dsi_init(dev_priv); 16880 } else if (IS_GEN9_LP(dev_priv)) { 16881 /* 16882 * FIXME: Broxton doesn't support port detection via the 16883 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 16884 * detect the ports. 16885 */ 16886 intel_ddi_init(dev_priv, PORT_A); 16887 intel_ddi_init(dev_priv, PORT_B); 16888 intel_ddi_init(dev_priv, PORT_C); 16889 16890 vlv_dsi_init(dev_priv); 16891 } else if (HAS_DDI(dev_priv)) { 16892 int found; 16893 16894 if (intel_ddi_crt_present(dev_priv)) 16895 intel_crt_init(dev_priv); 16896 16897 /* 16898 * Haswell uses DDI functions to detect digital outputs. 16899 * On SKL pre-D0 the strap isn't connected, so we assume 16900 * it's there. 16901 */ 16902 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 16903 /* WaIgnoreDDIAStrap: skl */ 16904 if (found || IS_GEN9_BC(dev_priv)) 16905 intel_ddi_init(dev_priv, PORT_A); 16906 16907 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP 16908 * register */ 16909 found = intel_de_read(dev_priv, SFUSE_STRAP); 16910 16911 if (found & SFUSE_STRAP_DDIB_DETECTED) 16912 intel_ddi_init(dev_priv, PORT_B); 16913 if (found & SFUSE_STRAP_DDIC_DETECTED) 16914 intel_ddi_init(dev_priv, PORT_C); 16915 if (found & SFUSE_STRAP_DDID_DETECTED) 16916 intel_ddi_init(dev_priv, PORT_D); 16917 if (found & SFUSE_STRAP_DDIF_DETECTED) 16918 intel_ddi_init(dev_priv, PORT_F); 16919 /* 16920 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 16921 */ 16922 if (IS_GEN9_BC(dev_priv) && 16923 intel_bios_is_port_present(dev_priv, PORT_E)) 16924 intel_ddi_init(dev_priv, PORT_E); 16925 16926 } else if (HAS_PCH_SPLIT(dev_priv)) { 16927 int found; 16928 16929 /* 16930 * intel_edp_init_connector() depends on this completing first, 16931 * to prevent the registration of both eDP and LVDS and the 16932 * incorrect sharing of the PPS. 16933 */ 16934 intel_lvds_init(dev_priv); 16935 intel_crt_init(dev_priv); 16936 16937 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); 16938 16939 if (ilk_has_edp_a(dev_priv)) 16940 intel_dp_init(dev_priv, DP_A, PORT_A); 16941 16942 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) { 16943 /* PCH SDVOB multiplex with HDMIB */ 16944 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 16945 if (!found) 16946 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 16947 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED)) 16948 intel_dp_init(dev_priv, PCH_DP_B, PORT_B); 16949 } 16950 16951 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED) 16952 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 16953 16954 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED) 16955 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 16956 16957 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED) 16958 intel_dp_init(dev_priv, PCH_DP_C, PORT_C); 16959 16960 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED) 16961 intel_dp_init(dev_priv, PCH_DP_D, PORT_D); 16962 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 16963 bool has_edp, has_port; 16964 16965 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support) 16966 intel_crt_init(dev_priv); 16967 16968 /* 16969 * The DP_DETECTED bit is the latched state of the DDC 16970 * SDA pin at boot. However since eDP doesn't require DDC 16971 * (no way to plug in a DP->HDMI dongle) the DDC pins for 16972 * eDP ports may have been muxed to an alternate function. 16973 * Thus we can't rely on the DP_DETECTED bit alone to detect 16974 * eDP ports. Consult the VBT as well as DP_DETECTED to 16975 * detect eDP ports. 16976 * 16977 * Sadly the straps seem to be missing sometimes even for HDMI 16978 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 16979 * and VBT for the presence of the port. Additionally we can't 16980 * trust the port type the VBT declares as we've seen at least 16981 * HDMI ports that the VBT claim are DP or eDP. 16982 */ 16983 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); 16984 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 16985 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port) 16986 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B); 16987 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 16988 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 16989 16990 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); 16991 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 16992 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port) 16993 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C); 16994 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 16995 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 16996 16997 if (IS_CHERRYVIEW(dev_priv)) { 16998 /* 16999 * eDP not supported on port D, 17000 * so no need to worry about it 17001 */ 17002 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 17003 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port) 17004 intel_dp_init(dev_priv, CHV_DP_D, PORT_D); 17005 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port) 17006 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 17007 } 17008 17009 vlv_dsi_init(dev_priv); 17010 } else if (IS_PINEVIEW(dev_priv)) { 17011 intel_lvds_init(dev_priv); 17012 intel_crt_init(dev_priv); 17013 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) { 17014 bool found = false; 17015 17016 if (IS_MOBILE(dev_priv)) 17017 intel_lvds_init(dev_priv); 17018 17019 intel_crt_init(dev_priv); 17020 17021 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 17022 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n"); 17023 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 17024 if (!found && IS_G4X(dev_priv)) { 17025 drm_dbg_kms(&dev_priv->drm, 17026 "probing HDMI on SDVOB\n"); 17027 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 17028 } 17029 17030 if (!found && IS_G4X(dev_priv)) 17031 intel_dp_init(dev_priv, DP_B, PORT_B); 17032 } 17033 17034 /* Before G4X SDVOC doesn't have its own detect register */ 17035 17036 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 17037 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n"); 17038 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 17039 } 17040 17041 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) { 17042 17043 if (IS_G4X(dev_priv)) { 17044 drm_dbg_kms(&dev_priv->drm, 17045 "probing HDMI on SDVOC\n"); 17046 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 17047 } 17048 if (IS_G4X(dev_priv)) 17049 intel_dp_init(dev_priv, DP_C, PORT_C); 17050 } 17051 17052 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED)) 17053 intel_dp_init(dev_priv, DP_D, PORT_D); 17054 17055 if (SUPPORTS_TV(dev_priv)) 17056 intel_tv_init(dev_priv); 17057 } else if (IS_GEN(dev_priv, 2)) { 17058 if (IS_I85X(dev_priv)) 17059 intel_lvds_init(dev_priv); 17060 17061 intel_crt_init(dev_priv); 17062 intel_dvo_init(dev_priv); 17063 } 17064 17065 intel_psr_init(dev_priv); 17066 17067 for_each_intel_encoder(&dev_priv->drm, encoder) { 17068 encoder->base.possible_crtcs = 17069 intel_encoder_possible_crtcs(encoder); 17070 encoder->base.possible_clones = 17071 intel_encoder_possible_clones(encoder); 17072 } 17073 17074 intel_init_pch_refclk(dev_priv); 17075 17076 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 17077 } 17078 17079 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 17080 { 17081 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 17082 17083 drm_framebuffer_cleanup(fb); 17084 intel_frontbuffer_put(intel_fb->frontbuffer); 17085 17086 kfree(intel_fb); 17087 } 17088 17089 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 17090 struct drm_file *file, 17091 unsigned int *handle) 17092 { 17093 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 17094 struct drm_i915_private *i915 = to_i915(obj->base.dev); 17095 17096 if (obj->userptr.mm) { 17097 drm_dbg(&i915->drm, 17098 "attempting to use a userptr for a framebuffer, denied\n"); 17099 return -EINVAL; 17100 } 17101 17102 return drm_gem_handle_create(file, &obj->base, handle); 17103 } 17104 17105 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, 17106 struct drm_file *file, 17107 unsigned flags, unsigned color, 17108 struct drm_clip_rect *clips, 17109 unsigned num_clips) 17110 { 17111 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 17112 17113 i915_gem_object_flush_if_display(obj); 17114 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); 17115 17116 return 0; 17117 } 17118 17119 static const struct drm_framebuffer_funcs intel_fb_funcs = { 17120 .destroy = intel_user_framebuffer_destroy, 17121 .create_handle = intel_user_framebuffer_create_handle, 17122 .dirty = intel_user_framebuffer_dirty, 17123 }; 17124 17125 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, 17126 struct drm_i915_gem_object *obj, 17127 struct drm_mode_fb_cmd2 *mode_cmd) 17128 { 17129 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 17130 struct drm_framebuffer *fb = &intel_fb->base; 17131 u32 max_stride; 17132 unsigned int tiling, stride; 17133 int ret = -EINVAL; 17134 int i; 17135 17136 intel_fb->frontbuffer = intel_frontbuffer_get(obj); 17137 if (!intel_fb->frontbuffer) 17138 return -ENOMEM; 17139 17140 i915_gem_object_lock(obj); 17141 tiling = i915_gem_object_get_tiling(obj); 17142 stride = i915_gem_object_get_stride(obj); 17143 i915_gem_object_unlock(obj); 17144 17145 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 17146 /* 17147 * If there's a fence, enforce that 17148 * the fb modifier and tiling mode match. 17149 */ 17150 if (tiling != I915_TILING_NONE && 17151 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 17152 drm_dbg_kms(&dev_priv->drm, 17153 "tiling_mode doesn't match fb modifier\n"); 17154 goto err; 17155 } 17156 } else { 17157 if (tiling == I915_TILING_X) { 17158 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 17159 } else if (tiling == I915_TILING_Y) { 17160 drm_dbg_kms(&dev_priv->drm, 17161 "No Y tiling for legacy addfb\n"); 17162 goto err; 17163 } 17164 } 17165 17166 if (!drm_any_plane_has_format(&dev_priv->drm, 17167 mode_cmd->pixel_format, 17168 mode_cmd->modifier[0])) { 17169 struct drm_format_name_buf format_name; 17170 17171 drm_dbg_kms(&dev_priv->drm, 17172 "unsupported pixel format %s / modifier 0x%llx\n", 17173 drm_get_format_name(mode_cmd->pixel_format, 17174 &format_name), 17175 mode_cmd->modifier[0]); 17176 goto err; 17177 } 17178 17179 /* 17180 * gen2/3 display engine uses the fence if present, 17181 * so the tiling mode must match the fb modifier exactly. 17182 */ 17183 if (INTEL_GEN(dev_priv) < 4 && 17184 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 17185 drm_dbg_kms(&dev_priv->drm, 17186 "tiling_mode must match fb modifier exactly on gen2/3\n"); 17187 goto err; 17188 } 17189 17190 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format, 17191 mode_cmd->modifier[0]); 17192 if (mode_cmd->pitches[0] > max_stride) { 17193 drm_dbg_kms(&dev_priv->drm, 17194 "%s pitch (%u) must be at most %d\n", 17195 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? 17196 "tiled" : "linear", 17197 mode_cmd->pitches[0], max_stride); 17198 goto err; 17199 } 17200 17201 /* 17202 * If there's a fence, enforce that 17203 * the fb pitch and fence stride match. 17204 */ 17205 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) { 17206 drm_dbg_kms(&dev_priv->drm, 17207 "pitch (%d) must match tiling stride (%d)\n", 17208 mode_cmd->pitches[0], stride); 17209 goto err; 17210 } 17211 17212 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 17213 if (mode_cmd->offsets[0] != 0) { 17214 drm_dbg_kms(&dev_priv->drm, 17215 "plane 0 offset (0x%08x) must be 0\n", 17216 mode_cmd->offsets[0]); 17217 goto err; 17218 } 17219 17220 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd); 17221 17222 for (i = 0; i < fb->format->num_planes; i++) { 17223 u32 stride_alignment; 17224 17225 if (mode_cmd->handles[i] != mode_cmd->handles[0]) { 17226 drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n", 17227 i); 17228 goto err; 17229 } 17230 17231 stride_alignment = intel_fb_stride_alignment(fb, i); 17232 if (fb->pitches[i] & (stride_alignment - 1)) { 17233 drm_dbg_kms(&dev_priv->drm, 17234 "plane %d pitch (%d) must be at least %u byte aligned\n", 17235 i, fb->pitches[i], stride_alignment); 17236 goto err; 17237 } 17238 17239 if (is_gen12_ccs_plane(fb, i)) { 17240 int ccs_aux_stride = gen12_ccs_aux_stride(fb, i); 17241 17242 if (fb->pitches[i] != ccs_aux_stride) { 17243 drm_dbg_kms(&dev_priv->drm, 17244 "ccs aux plane %d pitch (%d) must be %d\n", 17245 i, 17246 fb->pitches[i], ccs_aux_stride); 17247 goto err; 17248 } 17249 } 17250 17251 fb->obj[i] = &obj->base; 17252 } 17253 17254 ret = intel_fill_fb_info(dev_priv, fb); 17255 if (ret) 17256 goto err; 17257 17258 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs); 17259 if (ret) { 17260 drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret); 17261 goto err; 17262 } 17263 17264 return 0; 17265 17266 err: 17267 intel_frontbuffer_put(intel_fb->frontbuffer); 17268 return ret; 17269 } 17270 17271 static struct drm_framebuffer * 17272 intel_user_framebuffer_create(struct drm_device *dev, 17273 struct drm_file *filp, 17274 const struct drm_mode_fb_cmd2 *user_mode_cmd) 17275 { 17276 struct drm_framebuffer *fb; 17277 struct drm_i915_gem_object *obj; 17278 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 17279 17280 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); 17281 if (!obj) 17282 return ERR_PTR(-ENOENT); 17283 17284 fb = intel_framebuffer_create(obj, &mode_cmd); 17285 i915_gem_object_put(obj); 17286 17287 return fb; 17288 } 17289 17290 static enum drm_mode_status 17291 intel_mode_valid(struct drm_device *dev, 17292 const struct drm_display_mode *mode) 17293 { 17294 struct drm_i915_private *dev_priv = to_i915(dev); 17295 int hdisplay_max, htotal_max; 17296 int vdisplay_max, vtotal_max; 17297 17298 /* 17299 * Can't reject DBLSCAN here because Xorg ddxen can add piles 17300 * of DBLSCAN modes to the output's mode list when they detect 17301 * the scaling mode property on the connector. And they don't 17302 * ask the kernel to validate those modes in any way until 17303 * modeset time at which point the client gets a protocol error. 17304 * So in order to not upset those clients we silently ignore the 17305 * DBLSCAN flag on such connectors. For other connectors we will 17306 * reject modes with the DBLSCAN flag in encoder->compute_config(). 17307 * And we always reject DBLSCAN modes in connector->mode_valid() 17308 * as we never want such modes on the connector's mode list. 17309 */ 17310 17311 if (mode->vscan > 1) 17312 return MODE_NO_VSCAN; 17313 17314 if (mode->flags & DRM_MODE_FLAG_HSKEW) 17315 return MODE_H_ILLEGAL; 17316 17317 if (mode->flags & (DRM_MODE_FLAG_CSYNC | 17318 DRM_MODE_FLAG_NCSYNC | 17319 DRM_MODE_FLAG_PCSYNC)) 17320 return MODE_HSYNC; 17321 17322 if (mode->flags & (DRM_MODE_FLAG_BCAST | 17323 DRM_MODE_FLAG_PIXMUX | 17324 DRM_MODE_FLAG_CLKDIV2)) 17325 return MODE_BAD; 17326 17327 /* Transcoder timing limits */ 17328 if (INTEL_GEN(dev_priv) >= 11) { 17329 hdisplay_max = 16384; 17330 vdisplay_max = 8192; 17331 htotal_max = 16384; 17332 vtotal_max = 8192; 17333 } else if (INTEL_GEN(dev_priv) >= 9 || 17334 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 17335 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 17336 vdisplay_max = 4096; 17337 htotal_max = 8192; 17338 vtotal_max = 8192; 17339 } else if (INTEL_GEN(dev_priv) >= 3) { 17340 hdisplay_max = 4096; 17341 vdisplay_max = 4096; 17342 htotal_max = 8192; 17343 vtotal_max = 8192; 17344 } else { 17345 hdisplay_max = 2048; 17346 vdisplay_max = 2048; 17347 htotal_max = 4096; 17348 vtotal_max = 4096; 17349 } 17350 17351 if (mode->hdisplay > hdisplay_max || 17352 mode->hsync_start > htotal_max || 17353 mode->hsync_end > htotal_max || 17354 mode->htotal > htotal_max) 17355 return MODE_H_ILLEGAL; 17356 17357 if (mode->vdisplay > vdisplay_max || 17358 mode->vsync_start > vtotal_max || 17359 mode->vsync_end > vtotal_max || 17360 mode->vtotal > vtotal_max) 17361 return MODE_V_ILLEGAL; 17362 17363 if (INTEL_GEN(dev_priv) >= 5) { 17364 if (mode->hdisplay < 64 || 17365 mode->htotal - mode->hdisplay < 32) 17366 return MODE_H_ILLEGAL; 17367 17368 if (mode->vtotal - mode->vdisplay < 5) 17369 return MODE_V_ILLEGAL; 17370 } else { 17371 if (mode->htotal - mode->hdisplay < 32) 17372 return MODE_H_ILLEGAL; 17373 17374 if (mode->vtotal - mode->vdisplay < 3) 17375 return MODE_V_ILLEGAL; 17376 } 17377 17378 return MODE_OK; 17379 } 17380 17381 enum drm_mode_status 17382 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, 17383 const struct drm_display_mode *mode) 17384 { 17385 int plane_width_max, plane_height_max; 17386 17387 /* 17388 * intel_mode_valid() should be 17389 * sufficient on older platforms. 17390 */ 17391 if (INTEL_GEN(dev_priv) < 9) 17392 return MODE_OK; 17393 17394 /* 17395 * Most people will probably want a fullscreen 17396 * plane so let's not advertize modes that are 17397 * too big for that. 17398 */ 17399 if (INTEL_GEN(dev_priv) >= 11) { 17400 plane_width_max = 5120; 17401 plane_height_max = 4320; 17402 } else { 17403 plane_width_max = 5120; 17404 plane_height_max = 4096; 17405 } 17406 17407 if (mode->hdisplay > plane_width_max) 17408 return MODE_H_ILLEGAL; 17409 17410 if (mode->vdisplay > plane_height_max) 17411 return MODE_V_ILLEGAL; 17412 17413 return MODE_OK; 17414 } 17415 17416 static const struct drm_mode_config_funcs intel_mode_funcs = { 17417 .fb_create = intel_user_framebuffer_create, 17418 .get_format_info = intel_get_format_info, 17419 .output_poll_changed = intel_fbdev_output_poll_changed, 17420 .mode_valid = intel_mode_valid, 17421 .atomic_check = intel_atomic_check, 17422 .atomic_commit = intel_atomic_commit, 17423 .atomic_state_alloc = intel_atomic_state_alloc, 17424 .atomic_state_clear = intel_atomic_state_clear, 17425 .atomic_state_free = intel_atomic_state_free, 17426 }; 17427 17428 /** 17429 * intel_init_display_hooks - initialize the display modesetting hooks 17430 * @dev_priv: device private 17431 */ 17432 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 17433 { 17434 intel_init_cdclk_hooks(dev_priv); 17435 17436 if (INTEL_GEN(dev_priv) >= 9) { 17437 dev_priv->display.get_pipe_config = hsw_get_pipe_config; 17438 dev_priv->display.get_initial_plane_config = 17439 skl_get_initial_plane_config; 17440 dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock; 17441 dev_priv->display.crtc_enable = hsw_crtc_enable; 17442 dev_priv->display.crtc_disable = hsw_crtc_disable; 17443 } else if (HAS_DDI(dev_priv)) { 17444 dev_priv->display.get_pipe_config = hsw_get_pipe_config; 17445 dev_priv->display.get_initial_plane_config = 17446 i9xx_get_initial_plane_config; 17447 dev_priv->display.crtc_compute_clock = 17448 hsw_crtc_compute_clock; 17449 dev_priv->display.crtc_enable = hsw_crtc_enable; 17450 dev_priv->display.crtc_disable = hsw_crtc_disable; 17451 } else if (HAS_PCH_SPLIT(dev_priv)) { 17452 dev_priv->display.get_pipe_config = ilk_get_pipe_config; 17453 dev_priv->display.get_initial_plane_config = 17454 i9xx_get_initial_plane_config; 17455 dev_priv->display.crtc_compute_clock = 17456 ilk_crtc_compute_clock; 17457 dev_priv->display.crtc_enable = ilk_crtc_enable; 17458 dev_priv->display.crtc_disable = ilk_crtc_disable; 17459 } else if (IS_CHERRYVIEW(dev_priv)) { 17460 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17461 dev_priv->display.get_initial_plane_config = 17462 i9xx_get_initial_plane_config; 17463 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock; 17464 dev_priv->display.crtc_enable = valleyview_crtc_enable; 17465 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17466 } else if (IS_VALLEYVIEW(dev_priv)) { 17467 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17468 dev_priv->display.get_initial_plane_config = 17469 i9xx_get_initial_plane_config; 17470 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock; 17471 dev_priv->display.crtc_enable = valleyview_crtc_enable; 17472 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17473 } else if (IS_G4X(dev_priv)) { 17474 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17475 dev_priv->display.get_initial_plane_config = 17476 i9xx_get_initial_plane_config; 17477 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock; 17478 dev_priv->display.crtc_enable = i9xx_crtc_enable; 17479 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17480 } else if (IS_PINEVIEW(dev_priv)) { 17481 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17482 dev_priv->display.get_initial_plane_config = 17483 i9xx_get_initial_plane_config; 17484 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock; 17485 dev_priv->display.crtc_enable = i9xx_crtc_enable; 17486 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17487 } else if (!IS_GEN(dev_priv, 2)) { 17488 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17489 dev_priv->display.get_initial_plane_config = 17490 i9xx_get_initial_plane_config; 17491 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 17492 dev_priv->display.crtc_enable = i9xx_crtc_enable; 17493 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17494 } else { 17495 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17496 dev_priv->display.get_initial_plane_config = 17497 i9xx_get_initial_plane_config; 17498 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock; 17499 dev_priv->display.crtc_enable = i9xx_crtc_enable; 17500 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17501 } 17502 17503 if (IS_GEN(dev_priv, 5)) { 17504 dev_priv->display.fdi_link_train = ilk_fdi_link_train; 17505 } else if (IS_GEN(dev_priv, 6)) { 17506 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 17507 } else if (IS_IVYBRIDGE(dev_priv)) { 17508 /* FIXME: detect B0+ stepping and use auto training */ 17509 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 17510 } 17511 17512 if (INTEL_GEN(dev_priv) >= 9) 17513 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables; 17514 else 17515 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables; 17516 17517 } 17518 17519 void intel_modeset_init_hw(struct drm_i915_private *i915) 17520 { 17521 struct intel_cdclk_state *cdclk_state = 17522 to_intel_cdclk_state(i915->cdclk.obj.state); 17523 struct intel_dbuf_state *dbuf_state = 17524 to_intel_dbuf_state(i915->dbuf.obj.state); 17525 17526 intel_update_cdclk(i915); 17527 intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK"); 17528 cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw; 17529 17530 dbuf_state->enabled_slices = i915->dbuf.enabled_slices; 17531 } 17532 17533 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state) 17534 { 17535 struct drm_plane *plane; 17536 struct intel_crtc *crtc; 17537 17538 for_each_intel_crtc(state->dev, crtc) { 17539 struct intel_crtc_state *crtc_state; 17540 17541 crtc_state = intel_atomic_get_crtc_state(state, crtc); 17542 if (IS_ERR(crtc_state)) 17543 return PTR_ERR(crtc_state); 17544 17545 if (crtc_state->hw.active) { 17546 /* 17547 * Preserve the inherited flag to avoid 17548 * taking the full modeset path. 17549 */ 17550 crtc_state->inherited = true; 17551 } 17552 } 17553 17554 drm_for_each_plane(plane, state->dev) { 17555 struct drm_plane_state *plane_state; 17556 17557 plane_state = drm_atomic_get_plane_state(state, plane); 17558 if (IS_ERR(plane_state)) 17559 return PTR_ERR(plane_state); 17560 } 17561 17562 return 0; 17563 } 17564 17565 /* 17566 * Calculate what we think the watermarks should be for the state we've read 17567 * out of the hardware and then immediately program those watermarks so that 17568 * we ensure the hardware settings match our internal state. 17569 * 17570 * We can calculate what we think WM's should be by creating a duplicate of the 17571 * current state (which was constructed during hardware readout) and running it 17572 * through the atomic check code to calculate new watermark values in the 17573 * state object. 17574 */ 17575 static void sanitize_watermarks(struct drm_i915_private *dev_priv) 17576 { 17577 struct drm_atomic_state *state; 17578 struct intel_atomic_state *intel_state; 17579 struct intel_crtc *crtc; 17580 struct intel_crtc_state *crtc_state; 17581 struct drm_modeset_acquire_ctx ctx; 17582 int ret; 17583 int i; 17584 17585 /* Only supported on platforms that use atomic watermark design */ 17586 if (!dev_priv->display.optimize_watermarks) 17587 return; 17588 17589 state = drm_atomic_state_alloc(&dev_priv->drm); 17590 if (drm_WARN_ON(&dev_priv->drm, !state)) 17591 return; 17592 17593 intel_state = to_intel_atomic_state(state); 17594 17595 drm_modeset_acquire_init(&ctx, 0); 17596 17597 retry: 17598 state->acquire_ctx = &ctx; 17599 17600 /* 17601 * Hardware readout is the only time we don't want to calculate 17602 * intermediate watermarks (since we don't trust the current 17603 * watermarks). 17604 */ 17605 if (!HAS_GMCH(dev_priv)) 17606 intel_state->skip_intermediate_wm = true; 17607 17608 ret = sanitize_watermarks_add_affected(state); 17609 if (ret) 17610 goto fail; 17611 17612 ret = intel_atomic_check(&dev_priv->drm, state); 17613 if (ret) 17614 goto fail; 17615 17616 /* Write calculated watermark values back */ 17617 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { 17618 crtc_state->wm.need_postvbl_update = true; 17619 dev_priv->display.optimize_watermarks(intel_state, crtc); 17620 17621 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; 17622 } 17623 17624 fail: 17625 if (ret == -EDEADLK) { 17626 drm_atomic_state_clear(state); 17627 drm_modeset_backoff(&ctx); 17628 goto retry; 17629 } 17630 17631 /* 17632 * If we fail here, it means that the hardware appears to be 17633 * programmed in a way that shouldn't be possible, given our 17634 * understanding of watermark requirements. This might mean a 17635 * mistake in the hardware readout code or a mistake in the 17636 * watermark calculations for a given platform. Raise a WARN 17637 * so that this is noticeable. 17638 * 17639 * If this actually happens, we'll have to just leave the 17640 * BIOS-programmed watermarks untouched and hope for the best. 17641 */ 17642 drm_WARN(&dev_priv->drm, ret, 17643 "Could not determine valid watermarks for inherited state\n"); 17644 17645 drm_atomic_state_put(state); 17646 17647 drm_modeset_drop_locks(&ctx); 17648 drm_modeset_acquire_fini(&ctx); 17649 } 17650 17651 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv) 17652 { 17653 if (IS_GEN(dev_priv, 5)) { 17654 u32 fdi_pll_clk = 17655 intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; 17656 17657 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000; 17658 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) { 17659 dev_priv->fdi_pll_freq = 270000; 17660 } else { 17661 return; 17662 } 17663 17664 drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq); 17665 } 17666 17667 static int intel_initial_commit(struct drm_device *dev) 17668 { 17669 struct drm_atomic_state *state = NULL; 17670 struct drm_modeset_acquire_ctx ctx; 17671 struct intel_crtc *crtc; 17672 int ret = 0; 17673 17674 state = drm_atomic_state_alloc(dev); 17675 if (!state) 17676 return -ENOMEM; 17677 17678 drm_modeset_acquire_init(&ctx, 0); 17679 17680 retry: 17681 state->acquire_ctx = &ctx; 17682 17683 for_each_intel_crtc(dev, crtc) { 17684 struct intel_crtc_state *crtc_state = 17685 intel_atomic_get_crtc_state(state, crtc); 17686 17687 if (IS_ERR(crtc_state)) { 17688 ret = PTR_ERR(crtc_state); 17689 goto out; 17690 } 17691 17692 if (crtc_state->hw.active) { 17693 /* 17694 * We've not yet detected sink capabilities 17695 * (audio,infoframes,etc.) and thus we don't want to 17696 * force a full state recomputation yet. We want that to 17697 * happen only for the first real commit from userspace. 17698 * So preserve the inherited flag for the time being. 17699 */ 17700 crtc_state->inherited = true; 17701 17702 ret = drm_atomic_add_affected_planes(state, &crtc->base); 17703 if (ret) 17704 goto out; 17705 17706 /* 17707 * FIXME hack to force a LUT update to avoid the 17708 * plane update forcing the pipe gamma on without 17709 * having a proper LUT loaded. Remove once we 17710 * have readout for pipe gamma enable. 17711 */ 17712 crtc_state->uapi.color_mgmt_changed = true; 17713 17714 /* 17715 * FIXME hack to force full modeset when DSC is being 17716 * used. 17717 * 17718 * As long as we do not have full state readout and 17719 * config comparison of crtc_state->dsc, we have no way 17720 * to ensure reliable fastset. Remove once we have 17721 * readout for DSC. 17722 */ 17723 if (crtc_state->dsc.compression_enable) { 17724 ret = drm_atomic_add_affected_connectors(state, 17725 &crtc->base); 17726 if (ret) 17727 goto out; 17728 crtc_state->uapi.mode_changed = true; 17729 drm_dbg_kms(dev, "Force full modeset for DSC\n"); 17730 } 17731 } 17732 } 17733 17734 ret = drm_atomic_commit(state); 17735 17736 out: 17737 if (ret == -EDEADLK) { 17738 drm_atomic_state_clear(state); 17739 drm_modeset_backoff(&ctx); 17740 goto retry; 17741 } 17742 17743 drm_atomic_state_put(state); 17744 17745 drm_modeset_drop_locks(&ctx); 17746 drm_modeset_acquire_fini(&ctx); 17747 17748 return ret; 17749 } 17750 17751 static void intel_mode_config_init(struct drm_i915_private *i915) 17752 { 17753 struct drm_mode_config *mode_config = &i915->drm.mode_config; 17754 17755 drm_mode_config_init(&i915->drm); 17756 INIT_LIST_HEAD(&i915->global_obj_list); 17757 17758 mode_config->min_width = 0; 17759 mode_config->min_height = 0; 17760 17761 mode_config->preferred_depth = 24; 17762 mode_config->prefer_shadow = 1; 17763 17764 mode_config->allow_fb_modifiers = true; 17765 17766 mode_config->funcs = &intel_mode_funcs; 17767 17768 /* 17769 * Maximum framebuffer dimensions, chosen to match 17770 * the maximum render engine surface size on gen4+. 17771 */ 17772 if (INTEL_GEN(i915) >= 7) { 17773 mode_config->max_width = 16384; 17774 mode_config->max_height = 16384; 17775 } else if (INTEL_GEN(i915) >= 4) { 17776 mode_config->max_width = 8192; 17777 mode_config->max_height = 8192; 17778 } else if (IS_GEN(i915, 3)) { 17779 mode_config->max_width = 4096; 17780 mode_config->max_height = 4096; 17781 } else { 17782 mode_config->max_width = 2048; 17783 mode_config->max_height = 2048; 17784 } 17785 17786 if (IS_I845G(i915) || IS_I865G(i915)) { 17787 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512; 17788 mode_config->cursor_height = 1023; 17789 } else if (IS_I830(i915) || IS_I85X(i915) || 17790 IS_I915G(i915) || IS_I915GM(i915)) { 17791 mode_config->cursor_width = 64; 17792 mode_config->cursor_height = 64; 17793 } else { 17794 mode_config->cursor_width = 256; 17795 mode_config->cursor_height = 256; 17796 } 17797 } 17798 17799 static void intel_mode_config_cleanup(struct drm_i915_private *i915) 17800 { 17801 intel_atomic_global_obj_cleanup(i915); 17802 drm_mode_config_cleanup(&i915->drm); 17803 } 17804 17805 static void plane_config_fini(struct intel_initial_plane_config *plane_config) 17806 { 17807 if (plane_config->fb) { 17808 struct drm_framebuffer *fb = &plane_config->fb->base; 17809 17810 /* We may only have the stub and not a full framebuffer */ 17811 if (drm_framebuffer_read_refcount(fb)) 17812 drm_framebuffer_put(fb); 17813 else 17814 kfree(fb); 17815 } 17816 17817 if (plane_config->vma) 17818 i915_vma_put(plane_config->vma); 17819 } 17820 17821 /* part #1: call before irq install */ 17822 int intel_modeset_init_noirq(struct drm_i915_private *i915) 17823 { 17824 int ret; 17825 17826 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); 17827 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI | 17828 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); 17829 17830 intel_mode_config_init(i915); 17831 17832 ret = intel_cdclk_init(i915); 17833 if (ret) 17834 return ret; 17835 17836 ret = intel_dbuf_init(i915); 17837 if (ret) 17838 return ret; 17839 17840 ret = intel_bw_init(i915); 17841 if (ret) 17842 return ret; 17843 17844 init_llist_head(&i915->atomic_helper.free_list); 17845 INIT_WORK(&i915->atomic_helper.free_work, 17846 intel_atomic_helper_free_state_worker); 17847 17848 intel_init_quirks(i915); 17849 17850 intel_fbc_init(i915); 17851 17852 return 0; 17853 } 17854 17855 /* part #2: call after irq install */ 17856 int intel_modeset_init(struct drm_i915_private *i915) 17857 { 17858 struct drm_device *dev = &i915->drm; 17859 enum pipe pipe; 17860 struct intel_crtc *crtc; 17861 int ret; 17862 17863 intel_init_pm(i915); 17864 17865 intel_panel_sanitize_ssc(i915); 17866 17867 intel_gmbus_setup(i915); 17868 17869 drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n", 17870 INTEL_NUM_PIPES(i915), 17871 INTEL_NUM_PIPES(i915) > 1 ? "s" : ""); 17872 17873 if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) { 17874 for_each_pipe(i915, pipe) { 17875 ret = intel_crtc_init(i915, pipe); 17876 if (ret) { 17877 intel_mode_config_cleanup(i915); 17878 return ret; 17879 } 17880 } 17881 } 17882 17883 intel_plane_possible_crtcs_init(i915); 17884 intel_shared_dpll_init(dev); 17885 intel_update_fdi_pll_freq(i915); 17886 17887 intel_update_czclk(i915); 17888 intel_modeset_init_hw(i915); 17889 17890 intel_hdcp_component_init(i915); 17891 17892 if (i915->max_cdclk_freq == 0) 17893 intel_update_max_cdclk(i915); 17894 17895 /* Just disable it once at startup */ 17896 intel_vga_disable(i915); 17897 intel_setup_outputs(i915); 17898 17899 drm_modeset_lock_all(dev); 17900 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); 17901 drm_modeset_unlock_all(dev); 17902 17903 for_each_intel_crtc(dev, crtc) { 17904 struct intel_initial_plane_config plane_config = {}; 17905 17906 if (!crtc->active) 17907 continue; 17908 17909 /* 17910 * Note that reserving the BIOS fb up front prevents us 17911 * from stuffing other stolen allocations like the ring 17912 * on top. This prevents some ugliness at boot time, and 17913 * can even allow for smooth boot transitions if the BIOS 17914 * fb is large enough for the active pipe configuration. 17915 */ 17916 i915->display.get_initial_plane_config(crtc, &plane_config); 17917 17918 /* 17919 * If the fb is shared between multiple heads, we'll 17920 * just get the first one. 17921 */ 17922 intel_find_initial_plane_obj(crtc, &plane_config); 17923 17924 plane_config_fini(&plane_config); 17925 } 17926 17927 /* 17928 * Make sure hardware watermarks really match the state we read out. 17929 * Note that we need to do this after reconstructing the BIOS fb's 17930 * since the watermark calculation done here will use pstate->fb. 17931 */ 17932 if (!HAS_GMCH(i915)) 17933 sanitize_watermarks(i915); 17934 17935 /* 17936 * Force all active planes to recompute their states. So that on 17937 * mode_setcrtc after probe, all the intel_plane_state variables 17938 * are already calculated and there is no assert_plane warnings 17939 * during bootup. 17940 */ 17941 ret = intel_initial_commit(dev); 17942 if (ret) 17943 drm_dbg_kms(&i915->drm, "Initial commit in probe failed.\n"); 17944 17945 return 0; 17946 } 17947 17948 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 17949 { 17950 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17951 /* 640x480@60Hz, ~25175 kHz */ 17952 struct dpll clock = { 17953 .m1 = 18, 17954 .m2 = 7, 17955 .p1 = 13, 17956 .p2 = 4, 17957 .n = 2, 17958 }; 17959 u32 dpll, fp; 17960 int i; 17961 17962 drm_WARN_ON(&dev_priv->drm, 17963 i9xx_calc_dpll_params(48000, &clock) != 25154); 17964 17965 drm_dbg_kms(&dev_priv->drm, 17966 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 17967 pipe_name(pipe), clock.vco, clock.dot); 17968 17969 fp = i9xx_dpll_compute_fp(&clock); 17970 dpll = DPLL_DVO_2X_MODE | 17971 DPLL_VGA_MODE_DIS | 17972 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 17973 PLL_P2_DIVIDE_BY_4 | 17974 PLL_REF_INPUT_DREFCLK | 17975 DPLL_VCO_ENABLE; 17976 17977 intel_de_write(dev_priv, FP0(pipe), fp); 17978 intel_de_write(dev_priv, FP1(pipe), fp); 17979 17980 intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); 17981 intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); 17982 intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); 17983 intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16)); 17984 intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16)); 17985 intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); 17986 intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); 17987 17988 /* 17989 * Apparently we need to have VGA mode enabled prior to changing 17990 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 17991 * dividers, even though the register value does change. 17992 */ 17993 intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); 17994 intel_de_write(dev_priv, DPLL(pipe), dpll); 17995 17996 /* Wait for the clocks to stabilize. */ 17997 intel_de_posting_read(dev_priv, DPLL(pipe)); 17998 udelay(150); 17999 18000 /* The pixel multiplier can only be updated once the 18001 * DPLL is enabled and the clocks are stable. 18002 * 18003 * So write it again. 18004 */ 18005 intel_de_write(dev_priv, DPLL(pipe), dpll); 18006 18007 /* We do this three times for luck */ 18008 for (i = 0; i < 3 ; i++) { 18009 intel_de_write(dev_priv, DPLL(pipe), dpll); 18010 intel_de_posting_read(dev_priv, DPLL(pipe)); 18011 udelay(150); /* wait for warmup */ 18012 } 18013 18014 intel_de_write(dev_priv, PIPECONF(pipe), 18015 PIPECONF_ENABLE | PIPECONF_PROGRESSIVE); 18016 intel_de_posting_read(dev_priv, PIPECONF(pipe)); 18017 18018 intel_wait_for_pipe_scanline_moving(crtc); 18019 } 18020 18021 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 18022 { 18023 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 18024 18025 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n", 18026 pipe_name(pipe)); 18027 18028 drm_WARN_ON(&dev_priv->drm, 18029 intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & 18030 DISPLAY_PLANE_ENABLE); 18031 drm_WARN_ON(&dev_priv->drm, 18032 intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & 18033 DISPLAY_PLANE_ENABLE); 18034 drm_WARN_ON(&dev_priv->drm, 18035 intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & 18036 DISPLAY_PLANE_ENABLE); 18037 drm_WARN_ON(&dev_priv->drm, 18038 intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE); 18039 drm_WARN_ON(&dev_priv->drm, 18040 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE); 18041 18042 intel_de_write(dev_priv, PIPECONF(pipe), 0); 18043 intel_de_posting_read(dev_priv, PIPECONF(pipe)); 18044 18045 intel_wait_for_pipe_scanline_stopped(crtc); 18046 18047 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS); 18048 intel_de_posting_read(dev_priv, DPLL(pipe)); 18049 } 18050 18051 static void 18052 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) 18053 { 18054 struct intel_crtc *crtc; 18055 18056 if (INTEL_GEN(dev_priv) >= 4) 18057 return; 18058 18059 for_each_intel_crtc(&dev_priv->drm, crtc) { 18060 struct intel_plane *plane = 18061 to_intel_plane(crtc->base.primary); 18062 struct intel_crtc *plane_crtc; 18063 enum pipe pipe; 18064 18065 if (!plane->get_hw_state(plane, &pipe)) 18066 continue; 18067 18068 if (pipe == crtc->pipe) 18069 continue; 18070 18071 drm_dbg_kms(&dev_priv->drm, 18072 "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", 18073 plane->base.base.id, plane->base.name); 18074 18075 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 18076 intel_plane_disable_noatomic(plane_crtc, plane); 18077 } 18078 } 18079 18080 static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 18081 { 18082 struct drm_device *dev = crtc->base.dev; 18083 struct intel_encoder *encoder; 18084 18085 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 18086 return true; 18087 18088 return false; 18089 } 18090 18091 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder) 18092 { 18093 struct drm_device *dev = encoder->base.dev; 18094 struct intel_connector *connector; 18095 18096 for_each_connector_on_encoder(dev, &encoder->base, connector) 18097 return connector; 18098 18099 return NULL; 18100 } 18101 18102 static bool has_pch_trancoder(struct drm_i915_private *dev_priv, 18103 enum pipe pch_transcoder) 18104 { 18105 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 18106 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A); 18107 } 18108 18109 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state) 18110 { 18111 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 18112 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 18113 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 18114 18115 if (INTEL_GEN(dev_priv) >= 9 || 18116 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 18117 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder); 18118 u32 val; 18119 18120 if (transcoder_is_dsi(cpu_transcoder)) 18121 return; 18122 18123 val = intel_de_read(dev_priv, reg); 18124 val &= ~HSW_FRAME_START_DELAY_MASK; 18125 val |= HSW_FRAME_START_DELAY(0); 18126 intel_de_write(dev_priv, reg, val); 18127 } else { 18128 i915_reg_t reg = PIPECONF(cpu_transcoder); 18129 u32 val; 18130 18131 val = intel_de_read(dev_priv, reg); 18132 val &= ~PIPECONF_FRAME_START_DELAY_MASK; 18133 val |= PIPECONF_FRAME_START_DELAY(0); 18134 intel_de_write(dev_priv, reg, val); 18135 } 18136 18137 if (!crtc_state->has_pch_encoder) 18138 return; 18139 18140 if (HAS_PCH_IBX(dev_priv)) { 18141 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe); 18142 u32 val; 18143 18144 val = intel_de_read(dev_priv, reg); 18145 val &= ~TRANS_FRAME_START_DELAY_MASK; 18146 val |= TRANS_FRAME_START_DELAY(0); 18147 intel_de_write(dev_priv, reg, val); 18148 } else { 18149 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc); 18150 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder); 18151 u32 val; 18152 18153 val = intel_de_read(dev_priv, reg); 18154 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 18155 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); 18156 intel_de_write(dev_priv, reg, val); 18157 } 18158 } 18159 18160 static void intel_sanitize_crtc(struct intel_crtc *crtc, 18161 struct drm_modeset_acquire_ctx *ctx) 18162 { 18163 struct drm_device *dev = crtc->base.dev; 18164 struct drm_i915_private *dev_priv = to_i915(dev); 18165 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); 18166 18167 if (crtc_state->hw.active) { 18168 struct intel_plane *plane; 18169 18170 /* Clear any frame start delays used for debugging left by the BIOS */ 18171 intel_sanitize_frame_start_delay(crtc_state); 18172 18173 /* Disable everything but the primary plane */ 18174 for_each_intel_plane_on_crtc(dev, crtc, plane) { 18175 const struct intel_plane_state *plane_state = 18176 to_intel_plane_state(plane->base.state); 18177 18178 if (plane_state->uapi.visible && 18179 plane->base.type != DRM_PLANE_TYPE_PRIMARY) 18180 intel_plane_disable_noatomic(crtc, plane); 18181 } 18182 18183 /* 18184 * Disable any background color set by the BIOS, but enable the 18185 * gamma and CSC to match how we program our planes. 18186 */ 18187 if (INTEL_GEN(dev_priv) >= 9) 18188 intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe), 18189 SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE); 18190 } 18191 18192 /* Adjust the state of the output pipe according to whether we 18193 * have active connectors/encoders. */ 18194 if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc)) 18195 intel_crtc_disable_noatomic(crtc, ctx); 18196 18197 if (crtc_state->hw.active || HAS_GMCH(dev_priv)) { 18198 /* 18199 * We start out with underrun reporting disabled to avoid races. 18200 * For correct bookkeeping mark this on active crtcs. 18201 * 18202 * Also on gmch platforms we dont have any hardware bits to 18203 * disable the underrun reporting. Which means we need to start 18204 * out with underrun reporting disabled also on inactive pipes, 18205 * since otherwise we'll complain about the garbage we read when 18206 * e.g. coming up after runtime pm. 18207 * 18208 * No protection against concurrent access is required - at 18209 * worst a fifo underrun happens which also sets this to false. 18210 */ 18211 crtc->cpu_fifo_underrun_disabled = true; 18212 /* 18213 * We track the PCH trancoder underrun reporting state 18214 * within the crtc. With crtc for pipe A housing the underrun 18215 * reporting state for PCH transcoder A, crtc for pipe B housing 18216 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, 18217 * and marking underrun reporting as disabled for the non-existing 18218 * PCH transcoders B and C would prevent enabling the south 18219 * error interrupt (see cpt_can_enable_serr_int()). 18220 */ 18221 if (has_pch_trancoder(dev_priv, crtc->pipe)) 18222 crtc->pch_fifo_underrun_disabled = true; 18223 } 18224 } 18225 18226 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) 18227 { 18228 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 18229 18230 /* 18231 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram 18232 * the hardware when a high res displays plugged in. DPLL P 18233 * divider is zero, and the pipe timings are bonkers. We'll 18234 * try to disable everything in that case. 18235 * 18236 * FIXME would be nice to be able to sanitize this state 18237 * without several WARNs, but for now let's take the easy 18238 * road. 18239 */ 18240 return IS_GEN(dev_priv, 6) && 18241 crtc_state->hw.active && 18242 crtc_state->shared_dpll && 18243 crtc_state->port_clock == 0; 18244 } 18245 18246 static void intel_sanitize_encoder(struct intel_encoder *encoder) 18247 { 18248 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 18249 struct intel_connector *connector; 18250 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 18251 struct intel_crtc_state *crtc_state = crtc ? 18252 to_intel_crtc_state(crtc->base.state) : NULL; 18253 18254 /* We need to check both for a crtc link (meaning that the 18255 * encoder is active and trying to read from a pipe) and the 18256 * pipe itself being active. */ 18257 bool has_active_crtc = crtc_state && 18258 crtc_state->hw.active; 18259 18260 if (crtc_state && has_bogus_dpll_config(crtc_state)) { 18261 drm_dbg_kms(&dev_priv->drm, 18262 "BIOS has misprogrammed the hardware. Disabling pipe %c\n", 18263 pipe_name(crtc->pipe)); 18264 has_active_crtc = false; 18265 } 18266 18267 connector = intel_encoder_find_connector(encoder); 18268 if (connector && !has_active_crtc) { 18269 drm_dbg_kms(&dev_priv->drm, 18270 "[ENCODER:%d:%s] has active connectors but no active pipe!\n", 18271 encoder->base.base.id, 18272 encoder->base.name); 18273 18274 /* Connector is active, but has no active pipe. This is 18275 * fallout from our resume register restoring. Disable 18276 * the encoder manually again. */ 18277 if (crtc_state) { 18278 struct drm_encoder *best_encoder; 18279 18280 drm_dbg_kms(&dev_priv->drm, 18281 "[ENCODER:%d:%s] manually disabled\n", 18282 encoder->base.base.id, 18283 encoder->base.name); 18284 18285 /* avoid oopsing in case the hooks consult best_encoder */ 18286 best_encoder = connector->base.state->best_encoder; 18287 connector->base.state->best_encoder = &encoder->base; 18288 18289 /* FIXME NULL atomic state passed! */ 18290 if (encoder->disable) 18291 encoder->disable(NULL, encoder, crtc_state, 18292 connector->base.state); 18293 if (encoder->post_disable) 18294 encoder->post_disable(NULL, encoder, crtc_state, 18295 connector->base.state); 18296 18297 connector->base.state->best_encoder = best_encoder; 18298 } 18299 encoder->base.crtc = NULL; 18300 18301 /* Inconsistent output/port/pipe state happens presumably due to 18302 * a bug in one of the get_hw_state functions. Or someplace else 18303 * in our code, like the register restore mess on resume. Clamp 18304 * things to off as a safer default. */ 18305 18306 connector->base.dpms = DRM_MODE_DPMS_OFF; 18307 connector->base.encoder = NULL; 18308 } 18309 18310 /* notify opregion of the sanitized encoder state */ 18311 intel_opregion_notify_encoder(encoder, connector && has_active_crtc); 18312 18313 if (INTEL_GEN(dev_priv) >= 11) 18314 icl_sanitize_encoder_pll_mapping(encoder); 18315 } 18316 18317 /* FIXME read out full plane state for all planes */ 18318 static void readout_plane_state(struct drm_i915_private *dev_priv) 18319 { 18320 struct intel_plane *plane; 18321 struct intel_crtc *crtc; 18322 18323 for_each_intel_plane(&dev_priv->drm, plane) { 18324 struct intel_plane_state *plane_state = 18325 to_intel_plane_state(plane->base.state); 18326 struct intel_crtc_state *crtc_state; 18327 enum pipe pipe = PIPE_A; 18328 bool visible; 18329 18330 visible = plane->get_hw_state(plane, &pipe); 18331 18332 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 18333 crtc_state = to_intel_crtc_state(crtc->base.state); 18334 18335 intel_set_plane_visible(crtc_state, plane_state, visible); 18336 18337 drm_dbg_kms(&dev_priv->drm, 18338 "[PLANE:%d:%s] hw state readout: %s, pipe %c\n", 18339 plane->base.base.id, plane->base.name, 18340 enableddisabled(visible), pipe_name(pipe)); 18341 } 18342 18343 for_each_intel_crtc(&dev_priv->drm, crtc) { 18344 struct intel_crtc_state *crtc_state = 18345 to_intel_crtc_state(crtc->base.state); 18346 18347 fixup_active_planes(crtc_state); 18348 } 18349 } 18350 18351 static void intel_modeset_readout_hw_state(struct drm_device *dev) 18352 { 18353 struct drm_i915_private *dev_priv = to_i915(dev); 18354 struct intel_cdclk_state *cdclk_state = 18355 to_intel_cdclk_state(dev_priv->cdclk.obj.state); 18356 struct intel_dbuf_state *dbuf_state = 18357 to_intel_dbuf_state(dev_priv->dbuf.obj.state); 18358 enum pipe pipe; 18359 struct intel_crtc *crtc; 18360 struct intel_encoder *encoder; 18361 struct intel_connector *connector; 18362 struct drm_connector_list_iter conn_iter; 18363 u8 active_pipes = 0; 18364 18365 for_each_intel_crtc(dev, crtc) { 18366 struct intel_crtc_state *crtc_state = 18367 to_intel_crtc_state(crtc->base.state); 18368 18369 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi); 18370 intel_crtc_free_hw_state(crtc_state); 18371 intel_crtc_state_reset(crtc_state, crtc); 18372 18373 crtc_state->hw.active = crtc_state->hw.enable = 18374 dev_priv->display.get_pipe_config(crtc, crtc_state); 18375 18376 crtc->base.enabled = crtc_state->hw.enable; 18377 crtc->active = crtc_state->hw.active; 18378 18379 if (crtc_state->hw.active) 18380 active_pipes |= BIT(crtc->pipe); 18381 18382 drm_dbg_kms(&dev_priv->drm, 18383 "[CRTC:%d:%s] hw state readout: %s\n", 18384 crtc->base.base.id, crtc->base.name, 18385 enableddisabled(crtc_state->hw.active)); 18386 } 18387 18388 dev_priv->active_pipes = cdclk_state->active_pipes = 18389 dbuf_state->active_pipes = active_pipes; 18390 18391 readout_plane_state(dev_priv); 18392 18393 intel_dpll_readout_hw_state(dev_priv); 18394 18395 for_each_intel_encoder(dev, encoder) { 18396 pipe = 0; 18397 18398 if (encoder->get_hw_state(encoder, &pipe)) { 18399 struct intel_crtc_state *crtc_state; 18400 18401 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 18402 crtc_state = to_intel_crtc_state(crtc->base.state); 18403 18404 encoder->base.crtc = &crtc->base; 18405 encoder->get_config(encoder, crtc_state); 18406 } else { 18407 encoder->base.crtc = NULL; 18408 } 18409 18410 drm_dbg_kms(&dev_priv->drm, 18411 "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 18412 encoder->base.base.id, encoder->base.name, 18413 enableddisabled(encoder->base.crtc), 18414 pipe_name(pipe)); 18415 } 18416 18417 drm_connector_list_iter_begin(dev, &conn_iter); 18418 for_each_intel_connector_iter(connector, &conn_iter) { 18419 if (connector->get_hw_state(connector)) { 18420 struct intel_crtc_state *crtc_state; 18421 struct intel_crtc *crtc; 18422 18423 connector->base.dpms = DRM_MODE_DPMS_ON; 18424 18425 encoder = intel_attached_encoder(connector); 18426 connector->base.encoder = &encoder->base; 18427 18428 crtc = to_intel_crtc(encoder->base.crtc); 18429 crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL; 18430 18431 if (crtc_state && crtc_state->hw.active) { 18432 /* 18433 * This has to be done during hardware readout 18434 * because anything calling .crtc_disable may 18435 * rely on the connector_mask being accurate. 18436 */ 18437 crtc_state->uapi.connector_mask |= 18438 drm_connector_mask(&connector->base); 18439 crtc_state->uapi.encoder_mask |= 18440 drm_encoder_mask(&encoder->base); 18441 } 18442 } else { 18443 connector->base.dpms = DRM_MODE_DPMS_OFF; 18444 connector->base.encoder = NULL; 18445 } 18446 drm_dbg_kms(&dev_priv->drm, 18447 "[CONNECTOR:%d:%s] hw state readout: %s\n", 18448 connector->base.base.id, connector->base.name, 18449 enableddisabled(connector->base.encoder)); 18450 } 18451 drm_connector_list_iter_end(&conn_iter); 18452 18453 for_each_intel_crtc(dev, crtc) { 18454 struct intel_bw_state *bw_state = 18455 to_intel_bw_state(dev_priv->bw_obj.state); 18456 struct intel_crtc_state *crtc_state = 18457 to_intel_crtc_state(crtc->base.state); 18458 struct intel_plane *plane; 18459 int min_cdclk = 0; 18460 18461 if (crtc_state->hw.active) { 18462 struct drm_display_mode *mode = &crtc_state->hw.mode; 18463 18464 intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode, 18465 crtc_state); 18466 18467 *mode = crtc_state->hw.adjusted_mode; 18468 mode->hdisplay = crtc_state->pipe_src_w; 18469 mode->vdisplay = crtc_state->pipe_src_h; 18470 18471 /* 18472 * The initial mode needs to be set in order to keep 18473 * the atomic core happy. It wants a valid mode if the 18474 * crtc's enabled, so we do the above call. 18475 * 18476 * But we don't set all the derived state fully, hence 18477 * set a flag to indicate that a full recalculation is 18478 * needed on the next commit. 18479 */ 18480 crtc_state->inherited = true; 18481 18482 intel_crtc_compute_pixel_rate(crtc_state); 18483 18484 intel_crtc_update_active_timings(crtc_state); 18485 18486 intel_crtc_copy_hw_to_uapi_state(crtc_state); 18487 } 18488 18489 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 18490 const struct intel_plane_state *plane_state = 18491 to_intel_plane_state(plane->base.state); 18492 18493 /* 18494 * FIXME don't have the fb yet, so can't 18495 * use intel_plane_data_rate() :( 18496 */ 18497 if (plane_state->uapi.visible) 18498 crtc_state->data_rate[plane->id] = 18499 4 * crtc_state->pixel_rate; 18500 /* 18501 * FIXME don't have the fb yet, so can't 18502 * use plane->min_cdclk() :( 18503 */ 18504 if (plane_state->uapi.visible && plane->min_cdclk) { 18505 if (crtc_state->double_wide || 18506 INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 18507 crtc_state->min_cdclk[plane->id] = 18508 DIV_ROUND_UP(crtc_state->pixel_rate, 2); 18509 else 18510 crtc_state->min_cdclk[plane->id] = 18511 crtc_state->pixel_rate; 18512 } 18513 drm_dbg_kms(&dev_priv->drm, 18514 "[PLANE:%d:%s] min_cdclk %d kHz\n", 18515 plane->base.base.id, plane->base.name, 18516 crtc_state->min_cdclk[plane->id]); 18517 } 18518 18519 if (crtc_state->hw.active) { 18520 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); 18521 if (drm_WARN_ON(dev, min_cdclk < 0)) 18522 min_cdclk = 0; 18523 } 18524 18525 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk; 18526 cdclk_state->min_voltage_level[crtc->pipe] = 18527 crtc_state->min_voltage_level; 18528 18529 intel_bw_crtc_update(bw_state, crtc_state); 18530 18531 intel_pipe_config_sanity_check(dev_priv, crtc_state); 18532 } 18533 } 18534 18535 static void 18536 get_encoder_power_domains(struct drm_i915_private *dev_priv) 18537 { 18538 struct intel_encoder *encoder; 18539 18540 for_each_intel_encoder(&dev_priv->drm, encoder) { 18541 struct intel_crtc_state *crtc_state; 18542 18543 if (!encoder->get_power_domains) 18544 continue; 18545 18546 /* 18547 * MST-primary and inactive encoders don't have a crtc state 18548 * and neither of these require any power domain references. 18549 */ 18550 if (!encoder->base.crtc) 18551 continue; 18552 18553 crtc_state = to_intel_crtc_state(encoder->base.crtc->state); 18554 encoder->get_power_domains(encoder, crtc_state); 18555 } 18556 } 18557 18558 static void intel_early_display_was(struct drm_i915_private *dev_priv) 18559 { 18560 /* 18561 * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl 18562 * Also known as Wa_14010480278. 18563 */ 18564 if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv)) 18565 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0, 18566 intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS); 18567 18568 if (IS_HASWELL(dev_priv)) { 18569 /* 18570 * WaRsPkgCStateDisplayPMReq:hsw 18571 * System hang if this isn't done before disabling all planes! 18572 */ 18573 intel_de_write(dev_priv, CHICKEN_PAR1_1, 18574 intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); 18575 } 18576 } 18577 18578 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv, 18579 enum port port, i915_reg_t hdmi_reg) 18580 { 18581 u32 val = intel_de_read(dev_priv, hdmi_reg); 18582 18583 if (val & SDVO_ENABLE || 18584 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A)) 18585 return; 18586 18587 drm_dbg_kms(&dev_priv->drm, 18588 "Sanitizing transcoder select for HDMI %c\n", 18589 port_name(port)); 18590 18591 val &= ~SDVO_PIPE_SEL_MASK; 18592 val |= SDVO_PIPE_SEL(PIPE_A); 18593 18594 intel_de_write(dev_priv, hdmi_reg, val); 18595 } 18596 18597 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv, 18598 enum port port, i915_reg_t dp_reg) 18599 { 18600 u32 val = intel_de_read(dev_priv, dp_reg); 18601 18602 if (val & DP_PORT_EN || 18603 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A)) 18604 return; 18605 18606 drm_dbg_kms(&dev_priv->drm, 18607 "Sanitizing transcoder select for DP %c\n", 18608 port_name(port)); 18609 18610 val &= ~DP_PIPE_SEL_MASK; 18611 val |= DP_PIPE_SEL(PIPE_A); 18612 18613 intel_de_write(dev_priv, dp_reg, val); 18614 } 18615 18616 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv) 18617 { 18618 /* 18619 * The BIOS may select transcoder B on some of the PCH 18620 * ports even it doesn't enable the port. This would trip 18621 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled(). 18622 * Sanitize the transcoder select bits to prevent that. We 18623 * assume that the BIOS never actually enabled the port, 18624 * because if it did we'd actually have to toggle the port 18625 * on and back off to make the transcoder A select stick 18626 * (see. intel_dp_link_down(), intel_disable_hdmi(), 18627 * intel_disable_sdvo()). 18628 */ 18629 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B); 18630 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C); 18631 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D); 18632 18633 /* PCH SDVOB multiplex with HDMIB */ 18634 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB); 18635 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC); 18636 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID); 18637 } 18638 18639 /* Scan out the current hw modeset state, 18640 * and sanitizes it to the current state 18641 */ 18642 static void 18643 intel_modeset_setup_hw_state(struct drm_device *dev, 18644 struct drm_modeset_acquire_ctx *ctx) 18645 { 18646 struct drm_i915_private *dev_priv = to_i915(dev); 18647 struct intel_encoder *encoder; 18648 struct intel_crtc *crtc; 18649 intel_wakeref_t wakeref; 18650 18651 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 18652 18653 intel_early_display_was(dev_priv); 18654 intel_modeset_readout_hw_state(dev); 18655 18656 /* HW state is read out, now we need to sanitize this mess. */ 18657 18658 /* Sanitize the TypeC port mode upfront, encoders depend on this */ 18659 for_each_intel_encoder(dev, encoder) { 18660 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 18661 18662 /* We need to sanitize only the MST primary port. */ 18663 if (encoder->type != INTEL_OUTPUT_DP_MST && 18664 intel_phy_is_tc(dev_priv, phy)) 18665 intel_tc_port_sanitize(enc_to_dig_port(encoder)); 18666 } 18667 18668 get_encoder_power_domains(dev_priv); 18669 18670 if (HAS_PCH_IBX(dev_priv)) 18671 ibx_sanitize_pch_ports(dev_priv); 18672 18673 /* 18674 * intel_sanitize_plane_mapping() may need to do vblank 18675 * waits, so we need vblank interrupts restored beforehand. 18676 */ 18677 for_each_intel_crtc(&dev_priv->drm, crtc) { 18678 struct intel_crtc_state *crtc_state = 18679 to_intel_crtc_state(crtc->base.state); 18680 18681 drm_crtc_vblank_reset(&crtc->base); 18682 18683 if (crtc_state->hw.active) 18684 intel_crtc_vblank_on(crtc_state); 18685 } 18686 18687 intel_sanitize_plane_mapping(dev_priv); 18688 18689 for_each_intel_encoder(dev, encoder) 18690 intel_sanitize_encoder(encoder); 18691 18692 for_each_intel_crtc(&dev_priv->drm, crtc) { 18693 struct intel_crtc_state *crtc_state = 18694 to_intel_crtc_state(crtc->base.state); 18695 18696 intel_sanitize_crtc(crtc, ctx); 18697 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]"); 18698 } 18699 18700 intel_modeset_update_connector_atomic_state(dev); 18701 18702 intel_dpll_sanitize_state(dev_priv); 18703 18704 if (IS_G4X(dev_priv)) { 18705 g4x_wm_get_hw_state(dev_priv); 18706 g4x_wm_sanitize(dev_priv); 18707 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 18708 vlv_wm_get_hw_state(dev_priv); 18709 vlv_wm_sanitize(dev_priv); 18710 } else if (INTEL_GEN(dev_priv) >= 9) { 18711 skl_wm_get_hw_state(dev_priv); 18712 } else if (HAS_PCH_SPLIT(dev_priv)) { 18713 ilk_wm_get_hw_state(dev_priv); 18714 } 18715 18716 for_each_intel_crtc(dev, crtc) { 18717 struct intel_crtc_state *crtc_state = 18718 to_intel_crtc_state(crtc->base.state); 18719 u64 put_domains; 18720 18721 put_domains = modeset_get_crtc_power_domains(crtc_state); 18722 if (drm_WARN_ON(dev, put_domains)) 18723 modeset_put_power_domains(dev_priv, put_domains); 18724 } 18725 18726 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); 18727 } 18728 18729 void intel_display_resume(struct drm_device *dev) 18730 { 18731 struct drm_i915_private *dev_priv = to_i915(dev); 18732 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 18733 struct drm_modeset_acquire_ctx ctx; 18734 int ret; 18735 18736 dev_priv->modeset_restore_state = NULL; 18737 if (state) 18738 state->acquire_ctx = &ctx; 18739 18740 drm_modeset_acquire_init(&ctx, 0); 18741 18742 while (1) { 18743 ret = drm_modeset_lock_all_ctx(dev, &ctx); 18744 if (ret != -EDEADLK) 18745 break; 18746 18747 drm_modeset_backoff(&ctx); 18748 } 18749 18750 if (!ret) 18751 ret = __intel_display_resume(dev, state, &ctx); 18752 18753 intel_enable_ipc(dev_priv); 18754 drm_modeset_drop_locks(&ctx); 18755 drm_modeset_acquire_fini(&ctx); 18756 18757 if (ret) 18758 drm_err(&dev_priv->drm, 18759 "Restoring old state failed with %i\n", ret); 18760 if (state) 18761 drm_atomic_state_put(state); 18762 } 18763 18764 static void intel_hpd_poll_fini(struct drm_i915_private *i915) 18765 { 18766 struct intel_connector *connector; 18767 struct drm_connector_list_iter conn_iter; 18768 18769 /* Kill all the work that may have been queued by hpd. */ 18770 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 18771 for_each_intel_connector_iter(connector, &conn_iter) { 18772 if (connector->modeset_retry_work.func) 18773 cancel_work_sync(&connector->modeset_retry_work); 18774 if (connector->hdcp.shim) { 18775 cancel_delayed_work_sync(&connector->hdcp.check_work); 18776 cancel_work_sync(&connector->hdcp.prop_work); 18777 } 18778 } 18779 drm_connector_list_iter_end(&conn_iter); 18780 } 18781 18782 /* part #1: call before irq uninstall */ 18783 void intel_modeset_driver_remove(struct drm_i915_private *i915) 18784 { 18785 flush_workqueue(i915->flip_wq); 18786 flush_workqueue(i915->modeset_wq); 18787 18788 flush_work(&i915->atomic_helper.free_work); 18789 drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list)); 18790 } 18791 18792 /* part #2: call after irq uninstall */ 18793 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) 18794 { 18795 /* 18796 * Due to the hpd irq storm handling the hotplug work can re-arm the 18797 * poll handlers. Hence disable polling after hpd handling is shut down. 18798 */ 18799 intel_hpd_poll_fini(i915); 18800 18801 /* 18802 * MST topology needs to be suspended so we don't have any calls to 18803 * fbdev after it's finalized. MST will be destroyed later as part of 18804 * drm_mode_config_cleanup() 18805 */ 18806 intel_dp_mst_suspend(i915); 18807 18808 /* poll work can call into fbdev, hence clean that up afterwards */ 18809 intel_fbdev_fini(i915); 18810 18811 intel_unregister_dsm_handler(); 18812 18813 intel_fbc_global_disable(i915); 18814 18815 /* flush any delayed tasks or pending work */ 18816 flush_scheduled_work(); 18817 18818 intel_hdcp_component_fini(i915); 18819 18820 intel_mode_config_cleanup(i915); 18821 18822 intel_overlay_cleanup(i915); 18823 18824 intel_gmbus_teardown(i915); 18825 18826 destroy_workqueue(i915->flip_wq); 18827 destroy_workqueue(i915->modeset_wq); 18828 18829 intel_fbc_cleanup_cfb(i915); 18830 } 18831 18832 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 18833 18834 struct intel_display_error_state { 18835 18836 u32 power_well_driver; 18837 18838 struct intel_cursor_error_state { 18839 u32 control; 18840 u32 position; 18841 u32 base; 18842 u32 size; 18843 } cursor[I915_MAX_PIPES]; 18844 18845 struct intel_pipe_error_state { 18846 bool power_domain_on; 18847 u32 source; 18848 u32 stat; 18849 } pipe[I915_MAX_PIPES]; 18850 18851 struct intel_plane_error_state { 18852 u32 control; 18853 u32 stride; 18854 u32 size; 18855 u32 pos; 18856 u32 addr; 18857 u32 surface; 18858 u32 tile_offset; 18859 } plane[I915_MAX_PIPES]; 18860 18861 struct intel_transcoder_error_state { 18862 bool available; 18863 bool power_domain_on; 18864 enum transcoder cpu_transcoder; 18865 18866 u32 conf; 18867 18868 u32 htotal; 18869 u32 hblank; 18870 u32 hsync; 18871 u32 vtotal; 18872 u32 vblank; 18873 u32 vsync; 18874 } transcoder[5]; 18875 }; 18876 18877 struct intel_display_error_state * 18878 intel_display_capture_error_state(struct drm_i915_private *dev_priv) 18879 { 18880 struct intel_display_error_state *error; 18881 int transcoders[] = { 18882 TRANSCODER_A, 18883 TRANSCODER_B, 18884 TRANSCODER_C, 18885 TRANSCODER_D, 18886 TRANSCODER_EDP, 18887 }; 18888 int i; 18889 18890 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder)); 18891 18892 if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) 18893 return NULL; 18894 18895 error = kzalloc(sizeof(*error), GFP_ATOMIC); 18896 if (error == NULL) 18897 return NULL; 18898 18899 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 18900 error->power_well_driver = intel_de_read(dev_priv, 18901 HSW_PWR_WELL_CTL2); 18902 18903 for_each_pipe(dev_priv, i) { 18904 error->pipe[i].power_domain_on = 18905 __intel_display_power_is_enabled(dev_priv, 18906 POWER_DOMAIN_PIPE(i)); 18907 if (!error->pipe[i].power_domain_on) 18908 continue; 18909 18910 error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i)); 18911 error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i)); 18912 error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i)); 18913 18914 error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i)); 18915 error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i)); 18916 if (INTEL_GEN(dev_priv) <= 3) { 18917 error->plane[i].size = intel_de_read(dev_priv, 18918 DSPSIZE(i)); 18919 error->plane[i].pos = intel_de_read(dev_priv, 18920 DSPPOS(i)); 18921 } 18922 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 18923 error->plane[i].addr = intel_de_read(dev_priv, 18924 DSPADDR(i)); 18925 if (INTEL_GEN(dev_priv) >= 4) { 18926 error->plane[i].surface = intel_de_read(dev_priv, 18927 DSPSURF(i)); 18928 error->plane[i].tile_offset = intel_de_read(dev_priv, 18929 DSPTILEOFF(i)); 18930 } 18931 18932 error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i)); 18933 18934 if (HAS_GMCH(dev_priv)) 18935 error->pipe[i].stat = intel_de_read(dev_priv, 18936 PIPESTAT(i)); 18937 } 18938 18939 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { 18940 enum transcoder cpu_transcoder = transcoders[i]; 18941 18942 if (!HAS_TRANSCODER(dev_priv, cpu_transcoder)) 18943 continue; 18944 18945 error->transcoder[i].available = true; 18946 error->transcoder[i].power_domain_on = 18947 __intel_display_power_is_enabled(dev_priv, 18948 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 18949 if (!error->transcoder[i].power_domain_on) 18950 continue; 18951 18952 error->transcoder[i].cpu_transcoder = cpu_transcoder; 18953 18954 error->transcoder[i].conf = intel_de_read(dev_priv, 18955 PIPECONF(cpu_transcoder)); 18956 error->transcoder[i].htotal = intel_de_read(dev_priv, 18957 HTOTAL(cpu_transcoder)); 18958 error->transcoder[i].hblank = intel_de_read(dev_priv, 18959 HBLANK(cpu_transcoder)); 18960 error->transcoder[i].hsync = intel_de_read(dev_priv, 18961 HSYNC(cpu_transcoder)); 18962 error->transcoder[i].vtotal = intel_de_read(dev_priv, 18963 VTOTAL(cpu_transcoder)); 18964 error->transcoder[i].vblank = intel_de_read(dev_priv, 18965 VBLANK(cpu_transcoder)); 18966 error->transcoder[i].vsync = intel_de_read(dev_priv, 18967 VSYNC(cpu_transcoder)); 18968 } 18969 18970 return error; 18971 } 18972 18973 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 18974 18975 void 18976 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 18977 struct intel_display_error_state *error) 18978 { 18979 struct drm_i915_private *dev_priv = m->i915; 18980 int i; 18981 18982 if (!error) 18983 return; 18984 18985 err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv)); 18986 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 18987 err_printf(m, "PWR_WELL_CTL2: %08x\n", 18988 error->power_well_driver); 18989 for_each_pipe(dev_priv, i) { 18990 err_printf(m, "Pipe [%d]:\n", i); 18991 err_printf(m, " Power: %s\n", 18992 onoff(error->pipe[i].power_domain_on)); 18993 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 18994 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 18995 18996 err_printf(m, "Plane [%d]:\n", i); 18997 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 18998 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 18999 if (INTEL_GEN(dev_priv) <= 3) { 19000 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 19001 err_printf(m, " POS: %08x\n", error->plane[i].pos); 19002 } 19003 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 19004 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 19005 if (INTEL_GEN(dev_priv) >= 4) { 19006 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 19007 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 19008 } 19009 19010 err_printf(m, "Cursor [%d]:\n", i); 19011 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 19012 err_printf(m, " POS: %08x\n", error->cursor[i].position); 19013 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 19014 } 19015 19016 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { 19017 if (!error->transcoder[i].available) 19018 continue; 19019 19020 err_printf(m, "CPU transcoder: %s\n", 19021 transcoder_name(error->transcoder[i].cpu_transcoder)); 19022 err_printf(m, " Power: %s\n", 19023 onoff(error->transcoder[i].power_domain_on)); 19024 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 19025 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 19026 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 19027 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 19028 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 19029 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 19030 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 19031 } 19032 } 19033 19034 #endif 19035