1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/i2c.h> 28 #include <linux/input.h> 29 #include <linux/intel-iommu.h> 30 #include <linux/kernel.h> 31 #include <linux/module.h> 32 #include <linux/dma-resv.h> 33 #include <linux/slab.h> 34 35 #include <drm/drm_atomic.h> 36 #include <drm/drm_atomic_helper.h> 37 #include <drm/drm_atomic_uapi.h> 38 #include <drm/drm_dp_helper.h> 39 #include <drm/drm_edid.h> 40 #include <drm/drm_fourcc.h> 41 #include <drm/drm_plane_helper.h> 42 #include <drm/drm_probe_helper.h> 43 #include <drm/drm_rect.h> 44 45 #include "display/intel_crt.h" 46 #include "display/intel_ddi.h" 47 #include "display/intel_dp.h" 48 #include "display/intel_dp_mst.h" 49 #include "display/intel_dsi.h" 50 #include "display/intel_dvo.h" 51 #include "display/intel_gmbus.h" 52 #include "display/intel_hdmi.h" 53 #include "display/intel_lvds.h" 54 #include "display/intel_sdvo.h" 55 #include "display/intel_tv.h" 56 #include "display/intel_vdsc.h" 57 58 #include "gt/intel_rps.h" 59 60 #include "i915_drv.h" 61 #include "i915_trace.h" 62 #include "intel_acpi.h" 63 #include "intel_atomic.h" 64 #include "intel_atomic_plane.h" 65 #include "intel_bw.h" 66 #include "intel_cdclk.h" 67 #include "intel_color.h" 68 #include "intel_display_types.h" 69 #include "intel_dp_link_training.h" 70 #include "intel_fbc.h" 71 #include "intel_fbdev.h" 72 #include "intel_fifo_underrun.h" 73 #include "intel_frontbuffer.h" 74 #include "intel_hdcp.h" 75 #include "intel_hotplug.h" 76 #include "intel_overlay.h" 77 #include "intel_pipe_crc.h" 78 #include "intel_pm.h" 79 #include "intel_psr.h" 80 #include "intel_quirks.h" 81 #include "intel_sideband.h" 82 #include "intel_sprite.h" 83 #include "intel_tc.h" 84 #include "intel_vga.h" 85 86 /* Primary plane formats for gen <= 3 */ 87 static const u32 i8xx_primary_formats[] = { 88 DRM_FORMAT_C8, 89 DRM_FORMAT_XRGB1555, 90 DRM_FORMAT_RGB565, 91 DRM_FORMAT_XRGB8888, 92 }; 93 94 /* Primary plane formats for ivb (no fp16 due to hw issue) */ 95 static const u32 ivb_primary_formats[] = { 96 DRM_FORMAT_C8, 97 DRM_FORMAT_RGB565, 98 DRM_FORMAT_XRGB8888, 99 DRM_FORMAT_XBGR8888, 100 DRM_FORMAT_XRGB2101010, 101 DRM_FORMAT_XBGR2101010, 102 }; 103 104 /* Primary plane formats for gen >= 4, except ivb */ 105 static const u32 i965_primary_formats[] = { 106 DRM_FORMAT_C8, 107 DRM_FORMAT_RGB565, 108 DRM_FORMAT_XRGB8888, 109 DRM_FORMAT_XBGR8888, 110 DRM_FORMAT_XRGB2101010, 111 DRM_FORMAT_XBGR2101010, 112 DRM_FORMAT_XBGR16161616F, 113 }; 114 115 /* Primary plane formats for vlv/chv */ 116 static const u32 vlv_primary_formats[] = { 117 DRM_FORMAT_C8, 118 DRM_FORMAT_RGB565, 119 DRM_FORMAT_XRGB8888, 120 DRM_FORMAT_XBGR8888, 121 DRM_FORMAT_ARGB8888, 122 DRM_FORMAT_ABGR8888, 123 DRM_FORMAT_XRGB2101010, 124 DRM_FORMAT_XBGR2101010, 125 DRM_FORMAT_ARGB2101010, 126 DRM_FORMAT_ABGR2101010, 127 DRM_FORMAT_XBGR16161616F, 128 }; 129 130 static const u64 i9xx_format_modifiers[] = { 131 I915_FORMAT_MOD_X_TILED, 132 DRM_FORMAT_MOD_LINEAR, 133 DRM_FORMAT_MOD_INVALID 134 }; 135 136 /* Cursor formats */ 137 static const u32 intel_cursor_formats[] = { 138 DRM_FORMAT_ARGB8888, 139 }; 140 141 static const u64 cursor_format_modifiers[] = { 142 DRM_FORMAT_MOD_LINEAR, 143 DRM_FORMAT_MOD_INVALID 144 }; 145 146 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 147 struct intel_crtc_state *pipe_config); 148 static void ilk_pch_clock_get(struct intel_crtc *crtc, 149 struct intel_crtc_state *pipe_config); 150 151 static int intel_framebuffer_init(struct intel_framebuffer *ifb, 152 struct drm_i915_gem_object *obj, 153 struct drm_mode_fb_cmd2 *mode_cmd); 154 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state); 155 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 156 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 157 const struct intel_link_m_n *m_n, 158 const struct intel_link_m_n *m2_n2); 159 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); 160 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state); 161 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state); 162 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); 163 static void vlv_prepare_pll(struct intel_crtc *crtc, 164 const struct intel_crtc_state *pipe_config); 165 static void chv_prepare_pll(struct intel_crtc *crtc, 166 const struct intel_crtc_state *pipe_config); 167 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state); 168 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); 169 static void intel_modeset_setup_hw_state(struct drm_device *dev, 170 struct drm_modeset_acquire_ctx *ctx); 171 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc); 172 173 struct intel_limit { 174 struct { 175 int min, max; 176 } dot, vco, n, m, m1, m2, p, p1; 177 178 struct { 179 int dot_limit; 180 int p2_slow, p2_fast; 181 } p2; 182 }; 183 184 /* returns HPLL frequency in kHz */ 185 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 186 { 187 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 188 189 /* Obtain SKU information */ 190 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 191 CCK_FUSE_HPLL_FREQ_MASK; 192 193 return vco_freq[hpll_freq] * 1000; 194 } 195 196 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 197 const char *name, u32 reg, int ref_freq) 198 { 199 u32 val; 200 int divider; 201 202 val = vlv_cck_read(dev_priv, reg); 203 divider = val & CCK_FREQUENCY_VALUES; 204 205 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) != 206 (divider << CCK_FREQUENCY_STATUS_SHIFT), 207 "%s change in progress\n", name); 208 209 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 210 } 211 212 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 213 const char *name, u32 reg) 214 { 215 int hpll; 216 217 vlv_cck_get(dev_priv); 218 219 if (dev_priv->hpll_freq == 0) 220 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 221 222 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); 223 224 vlv_cck_put(dev_priv); 225 226 return hpll; 227 } 228 229 static void intel_update_czclk(struct drm_i915_private *dev_priv) 230 { 231 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 232 return; 233 234 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 235 CCK_CZ_CLOCK_CONTROL); 236 237 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n", 238 dev_priv->czclk_freq); 239 } 240 241 /* units of 100MHz */ 242 static u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv, 243 const struct intel_crtc_state *pipe_config) 244 { 245 if (HAS_DDI(dev_priv)) 246 return pipe_config->port_clock; /* SPLL */ 247 else 248 return dev_priv->fdi_pll_freq; 249 } 250 251 static const struct intel_limit intel_limits_i8xx_dac = { 252 .dot = { .min = 25000, .max = 350000 }, 253 .vco = { .min = 908000, .max = 1512000 }, 254 .n = { .min = 2, .max = 16 }, 255 .m = { .min = 96, .max = 140 }, 256 .m1 = { .min = 18, .max = 26 }, 257 .m2 = { .min = 6, .max = 16 }, 258 .p = { .min = 4, .max = 128 }, 259 .p1 = { .min = 2, .max = 33 }, 260 .p2 = { .dot_limit = 165000, 261 .p2_slow = 4, .p2_fast = 2 }, 262 }; 263 264 static const struct intel_limit intel_limits_i8xx_dvo = { 265 .dot = { .min = 25000, .max = 350000 }, 266 .vco = { .min = 908000, .max = 1512000 }, 267 .n = { .min = 2, .max = 16 }, 268 .m = { .min = 96, .max = 140 }, 269 .m1 = { .min = 18, .max = 26 }, 270 .m2 = { .min = 6, .max = 16 }, 271 .p = { .min = 4, .max = 128 }, 272 .p1 = { .min = 2, .max = 33 }, 273 .p2 = { .dot_limit = 165000, 274 .p2_slow = 4, .p2_fast = 4 }, 275 }; 276 277 static const struct intel_limit intel_limits_i8xx_lvds = { 278 .dot = { .min = 25000, .max = 350000 }, 279 .vco = { .min = 908000, .max = 1512000 }, 280 .n = { .min = 2, .max = 16 }, 281 .m = { .min = 96, .max = 140 }, 282 .m1 = { .min = 18, .max = 26 }, 283 .m2 = { .min = 6, .max = 16 }, 284 .p = { .min = 4, .max = 128 }, 285 .p1 = { .min = 1, .max = 6 }, 286 .p2 = { .dot_limit = 165000, 287 .p2_slow = 14, .p2_fast = 7 }, 288 }; 289 290 static const struct intel_limit intel_limits_i9xx_sdvo = { 291 .dot = { .min = 20000, .max = 400000 }, 292 .vco = { .min = 1400000, .max = 2800000 }, 293 .n = { .min = 1, .max = 6 }, 294 .m = { .min = 70, .max = 120 }, 295 .m1 = { .min = 8, .max = 18 }, 296 .m2 = { .min = 3, .max = 7 }, 297 .p = { .min = 5, .max = 80 }, 298 .p1 = { .min = 1, .max = 8 }, 299 .p2 = { .dot_limit = 200000, 300 .p2_slow = 10, .p2_fast = 5 }, 301 }; 302 303 static const struct intel_limit intel_limits_i9xx_lvds = { 304 .dot = { .min = 20000, .max = 400000 }, 305 .vco = { .min = 1400000, .max = 2800000 }, 306 .n = { .min = 1, .max = 6 }, 307 .m = { .min = 70, .max = 120 }, 308 .m1 = { .min = 8, .max = 18 }, 309 .m2 = { .min = 3, .max = 7 }, 310 .p = { .min = 7, .max = 98 }, 311 .p1 = { .min = 1, .max = 8 }, 312 .p2 = { .dot_limit = 112000, 313 .p2_slow = 14, .p2_fast = 7 }, 314 }; 315 316 317 static const struct intel_limit intel_limits_g4x_sdvo = { 318 .dot = { .min = 25000, .max = 270000 }, 319 .vco = { .min = 1750000, .max = 3500000}, 320 .n = { .min = 1, .max = 4 }, 321 .m = { .min = 104, .max = 138 }, 322 .m1 = { .min = 17, .max = 23 }, 323 .m2 = { .min = 5, .max = 11 }, 324 .p = { .min = 10, .max = 30 }, 325 .p1 = { .min = 1, .max = 3}, 326 .p2 = { .dot_limit = 270000, 327 .p2_slow = 10, 328 .p2_fast = 10 329 }, 330 }; 331 332 static const struct intel_limit intel_limits_g4x_hdmi = { 333 .dot = { .min = 22000, .max = 400000 }, 334 .vco = { .min = 1750000, .max = 3500000}, 335 .n = { .min = 1, .max = 4 }, 336 .m = { .min = 104, .max = 138 }, 337 .m1 = { .min = 16, .max = 23 }, 338 .m2 = { .min = 5, .max = 11 }, 339 .p = { .min = 5, .max = 80 }, 340 .p1 = { .min = 1, .max = 8}, 341 .p2 = { .dot_limit = 165000, 342 .p2_slow = 10, .p2_fast = 5 }, 343 }; 344 345 static const struct intel_limit intel_limits_g4x_single_channel_lvds = { 346 .dot = { .min = 20000, .max = 115000 }, 347 .vco = { .min = 1750000, .max = 3500000 }, 348 .n = { .min = 1, .max = 3 }, 349 .m = { .min = 104, .max = 138 }, 350 .m1 = { .min = 17, .max = 23 }, 351 .m2 = { .min = 5, .max = 11 }, 352 .p = { .min = 28, .max = 112 }, 353 .p1 = { .min = 2, .max = 8 }, 354 .p2 = { .dot_limit = 0, 355 .p2_slow = 14, .p2_fast = 14 356 }, 357 }; 358 359 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { 360 .dot = { .min = 80000, .max = 224000 }, 361 .vco = { .min = 1750000, .max = 3500000 }, 362 .n = { .min = 1, .max = 3 }, 363 .m = { .min = 104, .max = 138 }, 364 .m1 = { .min = 17, .max = 23 }, 365 .m2 = { .min = 5, .max = 11 }, 366 .p = { .min = 14, .max = 42 }, 367 .p1 = { .min = 2, .max = 6 }, 368 .p2 = { .dot_limit = 0, 369 .p2_slow = 7, .p2_fast = 7 370 }, 371 }; 372 373 static const struct intel_limit pnv_limits_sdvo = { 374 .dot = { .min = 20000, .max = 400000}, 375 .vco = { .min = 1700000, .max = 3500000 }, 376 /* Pineview's Ncounter is a ring counter */ 377 .n = { .min = 3, .max = 6 }, 378 .m = { .min = 2, .max = 256 }, 379 /* Pineview only has one combined m divider, which we treat as m2. */ 380 .m1 = { .min = 0, .max = 0 }, 381 .m2 = { .min = 0, .max = 254 }, 382 .p = { .min = 5, .max = 80 }, 383 .p1 = { .min = 1, .max = 8 }, 384 .p2 = { .dot_limit = 200000, 385 .p2_slow = 10, .p2_fast = 5 }, 386 }; 387 388 static const struct intel_limit pnv_limits_lvds = { 389 .dot = { .min = 20000, .max = 400000 }, 390 .vco = { .min = 1700000, .max = 3500000 }, 391 .n = { .min = 3, .max = 6 }, 392 .m = { .min = 2, .max = 256 }, 393 .m1 = { .min = 0, .max = 0 }, 394 .m2 = { .min = 0, .max = 254 }, 395 .p = { .min = 7, .max = 112 }, 396 .p1 = { .min = 1, .max = 8 }, 397 .p2 = { .dot_limit = 112000, 398 .p2_slow = 14, .p2_fast = 14 }, 399 }; 400 401 /* Ironlake / Sandybridge 402 * 403 * We calculate clock using (register_value + 2) for N/M1/M2, so here 404 * the range value for them is (actual_value - 2). 405 */ 406 static const struct intel_limit ilk_limits_dac = { 407 .dot = { .min = 25000, .max = 350000 }, 408 .vco = { .min = 1760000, .max = 3510000 }, 409 .n = { .min = 1, .max = 5 }, 410 .m = { .min = 79, .max = 127 }, 411 .m1 = { .min = 12, .max = 22 }, 412 .m2 = { .min = 5, .max = 9 }, 413 .p = { .min = 5, .max = 80 }, 414 .p1 = { .min = 1, .max = 8 }, 415 .p2 = { .dot_limit = 225000, 416 .p2_slow = 10, .p2_fast = 5 }, 417 }; 418 419 static const struct intel_limit ilk_limits_single_lvds = { 420 .dot = { .min = 25000, .max = 350000 }, 421 .vco = { .min = 1760000, .max = 3510000 }, 422 .n = { .min = 1, .max = 3 }, 423 .m = { .min = 79, .max = 118 }, 424 .m1 = { .min = 12, .max = 22 }, 425 .m2 = { .min = 5, .max = 9 }, 426 .p = { .min = 28, .max = 112 }, 427 .p1 = { .min = 2, .max = 8 }, 428 .p2 = { .dot_limit = 225000, 429 .p2_slow = 14, .p2_fast = 14 }, 430 }; 431 432 static const struct intel_limit ilk_limits_dual_lvds = { 433 .dot = { .min = 25000, .max = 350000 }, 434 .vco = { .min = 1760000, .max = 3510000 }, 435 .n = { .min = 1, .max = 3 }, 436 .m = { .min = 79, .max = 127 }, 437 .m1 = { .min = 12, .max = 22 }, 438 .m2 = { .min = 5, .max = 9 }, 439 .p = { .min = 14, .max = 56 }, 440 .p1 = { .min = 2, .max = 8 }, 441 .p2 = { .dot_limit = 225000, 442 .p2_slow = 7, .p2_fast = 7 }, 443 }; 444 445 /* LVDS 100mhz refclk limits. */ 446 static const struct intel_limit ilk_limits_single_lvds_100m = { 447 .dot = { .min = 25000, .max = 350000 }, 448 .vco = { .min = 1760000, .max = 3510000 }, 449 .n = { .min = 1, .max = 2 }, 450 .m = { .min = 79, .max = 126 }, 451 .m1 = { .min = 12, .max = 22 }, 452 .m2 = { .min = 5, .max = 9 }, 453 .p = { .min = 28, .max = 112 }, 454 .p1 = { .min = 2, .max = 8 }, 455 .p2 = { .dot_limit = 225000, 456 .p2_slow = 14, .p2_fast = 14 }, 457 }; 458 459 static const struct intel_limit ilk_limits_dual_lvds_100m = { 460 .dot = { .min = 25000, .max = 350000 }, 461 .vco = { .min = 1760000, .max = 3510000 }, 462 .n = { .min = 1, .max = 3 }, 463 .m = { .min = 79, .max = 126 }, 464 .m1 = { .min = 12, .max = 22 }, 465 .m2 = { .min = 5, .max = 9 }, 466 .p = { .min = 14, .max = 42 }, 467 .p1 = { .min = 2, .max = 6 }, 468 .p2 = { .dot_limit = 225000, 469 .p2_slow = 7, .p2_fast = 7 }, 470 }; 471 472 static const struct intel_limit intel_limits_vlv = { 473 /* 474 * These are the data rate limits (measured in fast clocks) 475 * since those are the strictest limits we have. The fast 476 * clock and actual rate limits are more relaxed, so checking 477 * them would make no difference. 478 */ 479 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 480 .vco = { .min = 4000000, .max = 6000000 }, 481 .n = { .min = 1, .max = 7 }, 482 .m1 = { .min = 2, .max = 3 }, 483 .m2 = { .min = 11, .max = 156 }, 484 .p1 = { .min = 2, .max = 3 }, 485 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 486 }; 487 488 static const struct intel_limit intel_limits_chv = { 489 /* 490 * These are the data rate limits (measured in fast clocks) 491 * since those are the strictest limits we have. The fast 492 * clock and actual rate limits are more relaxed, so checking 493 * them would make no difference. 494 */ 495 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 496 .vco = { .min = 4800000, .max = 6480000 }, 497 .n = { .min = 1, .max = 1 }, 498 .m1 = { .min = 2, .max = 2 }, 499 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 500 .p1 = { .min = 2, .max = 4 }, 501 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 502 }; 503 504 static const struct intel_limit intel_limits_bxt = { 505 /* FIXME: find real dot limits */ 506 .dot = { .min = 0, .max = INT_MAX }, 507 .vco = { .min = 4800000, .max = 6700000 }, 508 .n = { .min = 1, .max = 1 }, 509 .m1 = { .min = 2, .max = 2 }, 510 /* FIXME: find real m2 limits */ 511 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 512 .p1 = { .min = 2, .max = 4 }, 513 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 514 }; 515 516 /* WA Display #0827: Gen9:all */ 517 static void 518 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) 519 { 520 if (enable) 521 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 522 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS); 523 else 524 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 525 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS)); 526 } 527 528 /* Wa_2006604312:icl,ehl */ 529 static void 530 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 531 bool enable) 532 { 533 if (enable) 534 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 535 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS); 536 else 537 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 538 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS); 539 } 540 541 static bool 542 needs_modeset(const struct intel_crtc_state *state) 543 { 544 return drm_atomic_crtc_needs_modeset(&state->uapi); 545 } 546 547 static bool 548 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) 549 { 550 return crtc_state->master_transcoder != INVALID_TRANSCODER; 551 } 552 553 static bool 554 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) 555 { 556 return crtc_state->sync_mode_slaves_mask != 0; 557 } 558 559 bool 560 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) 561 { 562 return is_trans_port_sync_master(crtc_state) || 563 is_trans_port_sync_slave(crtc_state); 564 } 565 566 /* 567 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 568 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 569 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 570 * The helpers' return value is the rate of the clock that is fed to the 571 * display engine's pipe which can be the above fast dot clock rate or a 572 * divided-down version of it. 573 */ 574 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 575 static int pnv_calc_dpll_params(int refclk, struct dpll *clock) 576 { 577 clock->m = clock->m2 + 2; 578 clock->p = clock->p1 * clock->p2; 579 if (WARN_ON(clock->n == 0 || clock->p == 0)) 580 return 0; 581 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 582 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 583 584 return clock->dot; 585 } 586 587 static u32 i9xx_dpll_compute_m(struct dpll *dpll) 588 { 589 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 590 } 591 592 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock) 593 { 594 clock->m = i9xx_dpll_compute_m(clock); 595 clock->p = clock->p1 * clock->p2; 596 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 597 return 0; 598 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 599 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 600 601 return clock->dot; 602 } 603 604 static int vlv_calc_dpll_params(int refclk, struct dpll *clock) 605 { 606 clock->m = clock->m1 * clock->m2; 607 clock->p = clock->p1 * clock->p2; 608 if (WARN_ON(clock->n == 0 || clock->p == 0)) 609 return 0; 610 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 611 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 612 613 return clock->dot / 5; 614 } 615 616 int chv_calc_dpll_params(int refclk, struct dpll *clock) 617 { 618 clock->m = clock->m1 * clock->m2; 619 clock->p = clock->p1 * clock->p2; 620 if (WARN_ON(clock->n == 0 || clock->p == 0)) 621 return 0; 622 clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), 623 clock->n << 22); 624 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 625 626 return clock->dot / 5; 627 } 628 629 /* 630 * Returns whether the given set of divisors are valid for a given refclk with 631 * the given connectors. 632 */ 633 static bool intel_pll_is_valid(struct drm_i915_private *dev_priv, 634 const struct intel_limit *limit, 635 const struct dpll *clock) 636 { 637 if (clock->n < limit->n.min || limit->n.max < clock->n) 638 return false; 639 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 640 return false; 641 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 642 return false; 643 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 644 return false; 645 646 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) && 647 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv)) 648 if (clock->m1 <= clock->m2) 649 return false; 650 651 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 652 !IS_GEN9_LP(dev_priv)) { 653 if (clock->p < limit->p.min || limit->p.max < clock->p) 654 return false; 655 if (clock->m < limit->m.min || limit->m.max < clock->m) 656 return false; 657 } 658 659 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 660 return false; 661 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 662 * connector, etc., rather than just a single range. 663 */ 664 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 665 return false; 666 667 return true; 668 } 669 670 static int 671 i9xx_select_p2_div(const struct intel_limit *limit, 672 const struct intel_crtc_state *crtc_state, 673 int target) 674 { 675 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 676 677 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 678 /* 679 * For LVDS just rely on its current settings for dual-channel. 680 * We haven't figured out how to reliably set up different 681 * single/dual channel state, if we even can. 682 */ 683 if (intel_is_dual_link_lvds(dev_priv)) 684 return limit->p2.p2_fast; 685 else 686 return limit->p2.p2_slow; 687 } else { 688 if (target < limit->p2.dot_limit) 689 return limit->p2.p2_slow; 690 else 691 return limit->p2.p2_fast; 692 } 693 } 694 695 /* 696 * Returns a set of divisors for the desired target clock with the given 697 * refclk, or FALSE. The returned values represent the clock equation: 698 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 699 * 700 * Target and reference clocks are specified in kHz. 701 * 702 * If match_clock is provided, then best_clock P divider must match the P 703 * divider from @match_clock used for LVDS downclocking. 704 */ 705 static bool 706 i9xx_find_best_dpll(const struct intel_limit *limit, 707 struct intel_crtc_state *crtc_state, 708 int target, int refclk, struct dpll *match_clock, 709 struct dpll *best_clock) 710 { 711 struct drm_device *dev = crtc_state->uapi.crtc->dev; 712 struct dpll clock; 713 int err = target; 714 715 memset(best_clock, 0, sizeof(*best_clock)); 716 717 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 718 719 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 720 clock.m1++) { 721 for (clock.m2 = limit->m2.min; 722 clock.m2 <= limit->m2.max; clock.m2++) { 723 if (clock.m2 >= clock.m1) 724 break; 725 for (clock.n = limit->n.min; 726 clock.n <= limit->n.max; clock.n++) { 727 for (clock.p1 = limit->p1.min; 728 clock.p1 <= limit->p1.max; clock.p1++) { 729 int this_err; 730 731 i9xx_calc_dpll_params(refclk, &clock); 732 if (!intel_pll_is_valid(to_i915(dev), 733 limit, 734 &clock)) 735 continue; 736 if (match_clock && 737 clock.p != match_clock->p) 738 continue; 739 740 this_err = abs(clock.dot - target); 741 if (this_err < err) { 742 *best_clock = clock; 743 err = this_err; 744 } 745 } 746 } 747 } 748 } 749 750 return (err != target); 751 } 752 753 /* 754 * Returns a set of divisors for the desired target clock with the given 755 * refclk, or FALSE. The returned values represent the clock equation: 756 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 757 * 758 * Target and reference clocks are specified in kHz. 759 * 760 * If match_clock is provided, then best_clock P divider must match the P 761 * divider from @match_clock used for LVDS downclocking. 762 */ 763 static bool 764 pnv_find_best_dpll(const struct intel_limit *limit, 765 struct intel_crtc_state *crtc_state, 766 int target, int refclk, struct dpll *match_clock, 767 struct dpll *best_clock) 768 { 769 struct drm_device *dev = crtc_state->uapi.crtc->dev; 770 struct dpll clock; 771 int err = target; 772 773 memset(best_clock, 0, sizeof(*best_clock)); 774 775 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 776 777 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 778 clock.m1++) { 779 for (clock.m2 = limit->m2.min; 780 clock.m2 <= limit->m2.max; clock.m2++) { 781 for (clock.n = limit->n.min; 782 clock.n <= limit->n.max; clock.n++) { 783 for (clock.p1 = limit->p1.min; 784 clock.p1 <= limit->p1.max; clock.p1++) { 785 int this_err; 786 787 pnv_calc_dpll_params(refclk, &clock); 788 if (!intel_pll_is_valid(to_i915(dev), 789 limit, 790 &clock)) 791 continue; 792 if (match_clock && 793 clock.p != match_clock->p) 794 continue; 795 796 this_err = abs(clock.dot - target); 797 if (this_err < err) { 798 *best_clock = clock; 799 err = this_err; 800 } 801 } 802 } 803 } 804 } 805 806 return (err != target); 807 } 808 809 /* 810 * Returns a set of divisors for the desired target clock with the given 811 * refclk, or FALSE. The returned values represent the clock equation: 812 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 813 * 814 * Target and reference clocks are specified in kHz. 815 * 816 * If match_clock is provided, then best_clock P divider must match the P 817 * divider from @match_clock used for LVDS downclocking. 818 */ 819 static bool 820 g4x_find_best_dpll(const struct intel_limit *limit, 821 struct intel_crtc_state *crtc_state, 822 int target, int refclk, struct dpll *match_clock, 823 struct dpll *best_clock) 824 { 825 struct drm_device *dev = crtc_state->uapi.crtc->dev; 826 struct dpll clock; 827 int max_n; 828 bool found = false; 829 /* approximately equals target * 0.00585 */ 830 int err_most = (target >> 8) + (target >> 9); 831 832 memset(best_clock, 0, sizeof(*best_clock)); 833 834 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 835 836 max_n = limit->n.max; 837 /* based on hardware requirement, prefer smaller n to precision */ 838 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 839 /* based on hardware requirement, prefere larger m1,m2 */ 840 for (clock.m1 = limit->m1.max; 841 clock.m1 >= limit->m1.min; clock.m1--) { 842 for (clock.m2 = limit->m2.max; 843 clock.m2 >= limit->m2.min; clock.m2--) { 844 for (clock.p1 = limit->p1.max; 845 clock.p1 >= limit->p1.min; clock.p1--) { 846 int this_err; 847 848 i9xx_calc_dpll_params(refclk, &clock); 849 if (!intel_pll_is_valid(to_i915(dev), 850 limit, 851 &clock)) 852 continue; 853 854 this_err = abs(clock.dot - target); 855 if (this_err < err_most) { 856 *best_clock = clock; 857 err_most = this_err; 858 max_n = clock.n; 859 found = true; 860 } 861 } 862 } 863 } 864 } 865 return found; 866 } 867 868 /* 869 * Check if the calculated PLL configuration is more optimal compared to the 870 * best configuration and error found so far. Return the calculated error. 871 */ 872 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 873 const struct dpll *calculated_clock, 874 const struct dpll *best_clock, 875 unsigned int best_error_ppm, 876 unsigned int *error_ppm) 877 { 878 /* 879 * For CHV ignore the error and consider only the P value. 880 * Prefer a bigger P value based on HW requirements. 881 */ 882 if (IS_CHERRYVIEW(to_i915(dev))) { 883 *error_ppm = 0; 884 885 return calculated_clock->p > best_clock->p; 886 } 887 888 if (drm_WARN_ON_ONCE(dev, !target_freq)) 889 return false; 890 891 *error_ppm = div_u64(1000000ULL * 892 abs(target_freq - calculated_clock->dot), 893 target_freq); 894 /* 895 * Prefer a better P value over a better (smaller) error if the error 896 * is small. Ensure this preference for future configurations too by 897 * setting the error to 0. 898 */ 899 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 900 *error_ppm = 0; 901 902 return true; 903 } 904 905 return *error_ppm + 10 < best_error_ppm; 906 } 907 908 /* 909 * Returns a set of divisors for the desired target clock with the given 910 * refclk, or FALSE. The returned values represent the clock equation: 911 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 912 */ 913 static bool 914 vlv_find_best_dpll(const struct intel_limit *limit, 915 struct intel_crtc_state *crtc_state, 916 int target, int refclk, struct dpll *match_clock, 917 struct dpll *best_clock) 918 { 919 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 920 struct drm_device *dev = crtc->base.dev; 921 struct dpll clock; 922 unsigned int bestppm = 1000000; 923 /* min update 19.2 MHz */ 924 int max_n = min(limit->n.max, refclk / 19200); 925 bool found = false; 926 927 target *= 5; /* fast clock */ 928 929 memset(best_clock, 0, sizeof(*best_clock)); 930 931 /* based on hardware requirement, prefer smaller n to precision */ 932 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 933 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 934 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 935 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 936 clock.p = clock.p1 * clock.p2; 937 /* based on hardware requirement, prefer bigger m1,m2 values */ 938 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 939 unsigned int ppm; 940 941 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 942 refclk * clock.m1); 943 944 vlv_calc_dpll_params(refclk, &clock); 945 946 if (!intel_pll_is_valid(to_i915(dev), 947 limit, 948 &clock)) 949 continue; 950 951 if (!vlv_PLL_is_optimal(dev, target, 952 &clock, 953 best_clock, 954 bestppm, &ppm)) 955 continue; 956 957 *best_clock = clock; 958 bestppm = ppm; 959 found = true; 960 } 961 } 962 } 963 } 964 965 return found; 966 } 967 968 /* 969 * Returns a set of divisors for the desired target clock with the given 970 * refclk, or FALSE. The returned values represent the clock equation: 971 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 972 */ 973 static bool 974 chv_find_best_dpll(const struct intel_limit *limit, 975 struct intel_crtc_state *crtc_state, 976 int target, int refclk, struct dpll *match_clock, 977 struct dpll *best_clock) 978 { 979 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 980 struct drm_device *dev = crtc->base.dev; 981 unsigned int best_error_ppm; 982 struct dpll clock; 983 u64 m2; 984 int found = false; 985 986 memset(best_clock, 0, sizeof(*best_clock)); 987 best_error_ppm = 1000000; 988 989 /* 990 * Based on hardware doc, the n always set to 1, and m1 always 991 * set to 2. If requires to support 200Mhz refclk, we need to 992 * revisit this because n may not 1 anymore. 993 */ 994 clock.n = 1, clock.m1 = 2; 995 target *= 5; /* fast clock */ 996 997 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 998 for (clock.p2 = limit->p2.p2_fast; 999 clock.p2 >= limit->p2.p2_slow; 1000 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 1001 unsigned int error_ppm; 1002 1003 clock.p = clock.p1 * clock.p2; 1004 1005 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22, 1006 refclk * clock.m1); 1007 1008 if (m2 > INT_MAX/clock.m1) 1009 continue; 1010 1011 clock.m2 = m2; 1012 1013 chv_calc_dpll_params(refclk, &clock); 1014 1015 if (!intel_pll_is_valid(to_i915(dev), limit, &clock)) 1016 continue; 1017 1018 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 1019 best_error_ppm, &error_ppm)) 1020 continue; 1021 1022 *best_clock = clock; 1023 best_error_ppm = error_ppm; 1024 found = true; 1025 } 1026 } 1027 1028 return found; 1029 } 1030 1031 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, 1032 struct dpll *best_clock) 1033 { 1034 int refclk = 100000; 1035 const struct intel_limit *limit = &intel_limits_bxt; 1036 1037 return chv_find_best_dpll(limit, crtc_state, 1038 crtc_state->port_clock, refclk, 1039 NULL, best_clock); 1040 } 1041 1042 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, 1043 enum pipe pipe) 1044 { 1045 i915_reg_t reg = PIPEDSL(pipe); 1046 u32 line1, line2; 1047 u32 line_mask; 1048 1049 if (IS_GEN(dev_priv, 2)) 1050 line_mask = DSL_LINEMASK_GEN2; 1051 else 1052 line_mask = DSL_LINEMASK_GEN3; 1053 1054 line1 = intel_de_read(dev_priv, reg) & line_mask; 1055 msleep(5); 1056 line2 = intel_de_read(dev_priv, reg) & line_mask; 1057 1058 return line1 != line2; 1059 } 1060 1061 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state) 1062 { 1063 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1064 enum pipe pipe = crtc->pipe; 1065 1066 /* Wait for the display line to settle/start moving */ 1067 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100)) 1068 drm_err(&dev_priv->drm, 1069 "pipe %c scanline %s wait timed out\n", 1070 pipe_name(pipe), onoff(state)); 1071 } 1072 1073 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc) 1074 { 1075 wait_for_pipe_scanline_moving(crtc, false); 1076 } 1077 1078 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) 1079 { 1080 wait_for_pipe_scanline_moving(crtc, true); 1081 } 1082 1083 static void 1084 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 1085 { 1086 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1087 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1088 1089 if (INTEL_GEN(dev_priv) >= 4) { 1090 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 1091 i915_reg_t reg = PIPECONF(cpu_transcoder); 1092 1093 /* Wait for the Pipe State to go off */ 1094 if (intel_de_wait_for_clear(dev_priv, reg, 1095 I965_PIPECONF_ACTIVE, 100)) 1096 drm_WARN(&dev_priv->drm, 1, 1097 "pipe_off wait timed out\n"); 1098 } else { 1099 intel_wait_for_pipe_scanline_stopped(crtc); 1100 } 1101 } 1102 1103 /* Only for pre-ILK configs */ 1104 void assert_pll(struct drm_i915_private *dev_priv, 1105 enum pipe pipe, bool state) 1106 { 1107 u32 val; 1108 bool cur_state; 1109 1110 val = intel_de_read(dev_priv, DPLL(pipe)); 1111 cur_state = !!(val & DPLL_VCO_ENABLE); 1112 I915_STATE_WARN(cur_state != state, 1113 "PLL state assertion failure (expected %s, current %s)\n", 1114 onoff(state), onoff(cur_state)); 1115 } 1116 1117 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1118 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1119 { 1120 u32 val; 1121 bool cur_state; 1122 1123 vlv_cck_get(dev_priv); 1124 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1125 vlv_cck_put(dev_priv); 1126 1127 cur_state = val & DSI_PLL_VCO_EN; 1128 I915_STATE_WARN(cur_state != state, 1129 "DSI PLL state assertion failure (expected %s, current %s)\n", 1130 onoff(state), onoff(cur_state)); 1131 } 1132 1133 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1134 enum pipe pipe, bool state) 1135 { 1136 bool cur_state; 1137 1138 if (HAS_DDI(dev_priv)) { 1139 /* 1140 * DDI does not have a specific FDI_TX register. 1141 * 1142 * FDI is never fed from EDP transcoder 1143 * so pipe->transcoder cast is fine here. 1144 */ 1145 enum transcoder cpu_transcoder = (enum transcoder)pipe; 1146 u32 val = intel_de_read(dev_priv, 1147 TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1148 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1149 } else { 1150 u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe)); 1151 cur_state = !!(val & FDI_TX_ENABLE); 1152 } 1153 I915_STATE_WARN(cur_state != state, 1154 "FDI TX state assertion failure (expected %s, current %s)\n", 1155 onoff(state), onoff(cur_state)); 1156 } 1157 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1158 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1159 1160 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1161 enum pipe pipe, bool state) 1162 { 1163 u32 val; 1164 bool cur_state; 1165 1166 val = intel_de_read(dev_priv, FDI_RX_CTL(pipe)); 1167 cur_state = !!(val & FDI_RX_ENABLE); 1168 I915_STATE_WARN(cur_state != state, 1169 "FDI RX state assertion failure (expected %s, current %s)\n", 1170 onoff(state), onoff(cur_state)); 1171 } 1172 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1173 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1174 1175 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1176 enum pipe pipe) 1177 { 1178 u32 val; 1179 1180 /* ILK FDI PLL is always enabled */ 1181 if (IS_GEN(dev_priv, 5)) 1182 return; 1183 1184 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1185 if (HAS_DDI(dev_priv)) 1186 return; 1187 1188 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe)); 1189 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1190 } 1191 1192 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1193 enum pipe pipe, bool state) 1194 { 1195 u32 val; 1196 bool cur_state; 1197 1198 val = intel_de_read(dev_priv, FDI_RX_CTL(pipe)); 1199 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1200 I915_STATE_WARN(cur_state != state, 1201 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1202 onoff(state), onoff(cur_state)); 1203 } 1204 1205 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) 1206 { 1207 i915_reg_t pp_reg; 1208 u32 val; 1209 enum pipe panel_pipe = INVALID_PIPE; 1210 bool locked = true; 1211 1212 if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv))) 1213 return; 1214 1215 if (HAS_PCH_SPLIT(dev_priv)) { 1216 u32 port_sel; 1217 1218 pp_reg = PP_CONTROL(0); 1219 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1220 1221 switch (port_sel) { 1222 case PANEL_PORT_SELECT_LVDS: 1223 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe); 1224 break; 1225 case PANEL_PORT_SELECT_DPA: 1226 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe); 1227 break; 1228 case PANEL_PORT_SELECT_DPC: 1229 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe); 1230 break; 1231 case PANEL_PORT_SELECT_DPD: 1232 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe); 1233 break; 1234 default: 1235 MISSING_CASE(port_sel); 1236 break; 1237 } 1238 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1239 /* presumably write lock depends on pipe, not port select */ 1240 pp_reg = PP_CONTROL(pipe); 1241 panel_pipe = pipe; 1242 } else { 1243 u32 port_sel; 1244 1245 pp_reg = PP_CONTROL(0); 1246 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1247 1248 drm_WARN_ON(&dev_priv->drm, 1249 port_sel != PANEL_PORT_SELECT_LVDS); 1250 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe); 1251 } 1252 1253 val = intel_de_read(dev_priv, pp_reg); 1254 if (!(val & PANEL_POWER_ON) || 1255 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1256 locked = false; 1257 1258 I915_STATE_WARN(panel_pipe == pipe && locked, 1259 "panel assertion failure, pipe %c regs locked\n", 1260 pipe_name(pipe)); 1261 } 1262 1263 void assert_pipe(struct drm_i915_private *dev_priv, 1264 enum transcoder cpu_transcoder, bool state) 1265 { 1266 bool cur_state; 1267 enum intel_display_power_domain power_domain; 1268 intel_wakeref_t wakeref; 1269 1270 /* we keep both pipes enabled on 830 */ 1271 if (IS_I830(dev_priv)) 1272 state = true; 1273 1274 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 1275 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 1276 if (wakeref) { 1277 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); 1278 cur_state = !!(val & PIPECONF_ENABLE); 1279 1280 intel_display_power_put(dev_priv, power_domain, wakeref); 1281 } else { 1282 cur_state = false; 1283 } 1284 1285 I915_STATE_WARN(cur_state != state, 1286 "transcoder %s assertion failure (expected %s, current %s)\n", 1287 transcoder_name(cpu_transcoder), 1288 onoff(state), onoff(cur_state)); 1289 } 1290 1291 static void assert_plane(struct intel_plane *plane, bool state) 1292 { 1293 enum pipe pipe; 1294 bool cur_state; 1295 1296 cur_state = plane->get_hw_state(plane, &pipe); 1297 1298 I915_STATE_WARN(cur_state != state, 1299 "%s assertion failure (expected %s, current %s)\n", 1300 plane->base.name, onoff(state), onoff(cur_state)); 1301 } 1302 1303 #define assert_plane_enabled(p) assert_plane(p, true) 1304 #define assert_plane_disabled(p) assert_plane(p, false) 1305 1306 static void assert_planes_disabled(struct intel_crtc *crtc) 1307 { 1308 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1309 struct intel_plane *plane; 1310 1311 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 1312 assert_plane_disabled(plane); 1313 } 1314 1315 static void assert_vblank_disabled(struct drm_crtc *crtc) 1316 { 1317 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1318 drm_crtc_vblank_put(crtc); 1319 } 1320 1321 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1322 enum pipe pipe) 1323 { 1324 u32 val; 1325 bool enabled; 1326 1327 val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe)); 1328 enabled = !!(val & TRANS_ENABLE); 1329 I915_STATE_WARN(enabled, 1330 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1331 pipe_name(pipe)); 1332 } 1333 1334 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1335 enum pipe pipe, enum port port, 1336 i915_reg_t dp_reg) 1337 { 1338 enum pipe port_pipe; 1339 bool state; 1340 1341 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); 1342 1343 I915_STATE_WARN(state && port_pipe == pipe, 1344 "PCH DP %c enabled on transcoder %c, should be disabled\n", 1345 port_name(port), pipe_name(pipe)); 1346 1347 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 1348 "IBX PCH DP %c still using transcoder B\n", 1349 port_name(port)); 1350 } 1351 1352 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1353 enum pipe pipe, enum port port, 1354 i915_reg_t hdmi_reg) 1355 { 1356 enum pipe port_pipe; 1357 bool state; 1358 1359 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); 1360 1361 I915_STATE_WARN(state && port_pipe == pipe, 1362 "PCH HDMI %c enabled on transcoder %c, should be disabled\n", 1363 port_name(port), pipe_name(pipe)); 1364 1365 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 1366 "IBX PCH HDMI %c still using transcoder B\n", 1367 port_name(port)); 1368 } 1369 1370 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1371 enum pipe pipe) 1372 { 1373 enum pipe port_pipe; 1374 1375 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); 1376 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); 1377 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); 1378 1379 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && 1380 port_pipe == pipe, 1381 "PCH VGA enabled on transcoder %c, should be disabled\n", 1382 pipe_name(pipe)); 1383 1384 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && 1385 port_pipe == pipe, 1386 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1387 pipe_name(pipe)); 1388 1389 /* PCH SDVOB multiplex with HDMIB */ 1390 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); 1391 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); 1392 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); 1393 } 1394 1395 static void _vlv_enable_pll(struct intel_crtc *crtc, 1396 const struct intel_crtc_state *pipe_config) 1397 { 1398 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1399 enum pipe pipe = crtc->pipe; 1400 1401 intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1402 intel_de_posting_read(dev_priv, DPLL(pipe)); 1403 udelay(150); 1404 1405 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) 1406 drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe); 1407 } 1408 1409 static void vlv_enable_pll(struct intel_crtc *crtc, 1410 const struct intel_crtc_state *pipe_config) 1411 { 1412 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1413 enum pipe pipe = crtc->pipe; 1414 1415 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 1416 1417 /* PLL is protected by panel, make sure we can write it */ 1418 assert_panel_unlocked(dev_priv, pipe); 1419 1420 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1421 _vlv_enable_pll(crtc, pipe_config); 1422 1423 intel_de_write(dev_priv, DPLL_MD(pipe), 1424 pipe_config->dpll_hw_state.dpll_md); 1425 intel_de_posting_read(dev_priv, DPLL_MD(pipe)); 1426 } 1427 1428 1429 static void _chv_enable_pll(struct intel_crtc *crtc, 1430 const struct intel_crtc_state *pipe_config) 1431 { 1432 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1433 enum pipe pipe = crtc->pipe; 1434 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1435 u32 tmp; 1436 1437 vlv_dpio_get(dev_priv); 1438 1439 /* Enable back the 10bit clock to display controller */ 1440 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1441 tmp |= DPIO_DCLKP_EN; 1442 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1443 1444 vlv_dpio_put(dev_priv); 1445 1446 /* 1447 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1448 */ 1449 udelay(1); 1450 1451 /* Enable PLL */ 1452 intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1453 1454 /* Check PLL is locked */ 1455 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) 1456 drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe); 1457 } 1458 1459 static void chv_enable_pll(struct intel_crtc *crtc, 1460 const struct intel_crtc_state *pipe_config) 1461 { 1462 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1463 enum pipe pipe = crtc->pipe; 1464 1465 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 1466 1467 /* PLL is protected by panel, make sure we can write it */ 1468 assert_panel_unlocked(dev_priv, pipe); 1469 1470 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1471 _chv_enable_pll(crtc, pipe_config); 1472 1473 if (pipe != PIPE_A) { 1474 /* 1475 * WaPixelRepeatModeFixForC0:chv 1476 * 1477 * DPLLCMD is AWOL. Use chicken bits to propagate 1478 * the value from DPLLBMD to either pipe B or C. 1479 */ 1480 intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); 1481 intel_de_write(dev_priv, DPLL_MD(PIPE_B), 1482 pipe_config->dpll_hw_state.dpll_md); 1483 intel_de_write(dev_priv, CBR4_VLV, 0); 1484 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; 1485 1486 /* 1487 * DPLLB VGA mode also seems to cause problems. 1488 * We should always have it disabled. 1489 */ 1490 drm_WARN_ON(&dev_priv->drm, 1491 (intel_de_read(dev_priv, DPLL(PIPE_B)) & 1492 DPLL_VGA_MODE_DIS) == 0); 1493 } else { 1494 intel_de_write(dev_priv, DPLL_MD(pipe), 1495 pipe_config->dpll_hw_state.dpll_md); 1496 intel_de_posting_read(dev_priv, DPLL_MD(pipe)); 1497 } 1498 } 1499 1500 static bool i9xx_has_pps(struct drm_i915_private *dev_priv) 1501 { 1502 if (IS_I830(dev_priv)) 1503 return false; 1504 1505 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 1506 } 1507 1508 static void i9xx_enable_pll(struct intel_crtc *crtc, 1509 const struct intel_crtc_state *crtc_state) 1510 { 1511 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1512 i915_reg_t reg = DPLL(crtc->pipe); 1513 u32 dpll = crtc_state->dpll_hw_state.dpll; 1514 int i; 1515 1516 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); 1517 1518 /* PLL is protected by panel, make sure we can write it */ 1519 if (i9xx_has_pps(dev_priv)) 1520 assert_panel_unlocked(dev_priv, crtc->pipe); 1521 1522 /* 1523 * Apparently we need to have VGA mode enabled prior to changing 1524 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1525 * dividers, even though the register value does change. 1526 */ 1527 intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS); 1528 intel_de_write(dev_priv, reg, dpll); 1529 1530 /* Wait for the clocks to stabilize. */ 1531 intel_de_posting_read(dev_priv, reg); 1532 udelay(150); 1533 1534 if (INTEL_GEN(dev_priv) >= 4) { 1535 intel_de_write(dev_priv, DPLL_MD(crtc->pipe), 1536 crtc_state->dpll_hw_state.dpll_md); 1537 } else { 1538 /* The pixel multiplier can only be updated once the 1539 * DPLL is enabled and the clocks are stable. 1540 * 1541 * So write it again. 1542 */ 1543 intel_de_write(dev_priv, reg, dpll); 1544 } 1545 1546 /* We do this three times for luck */ 1547 for (i = 0; i < 3; i++) { 1548 intel_de_write(dev_priv, reg, dpll); 1549 intel_de_posting_read(dev_priv, reg); 1550 udelay(150); /* wait for warmup */ 1551 } 1552 } 1553 1554 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) 1555 { 1556 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1557 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1558 enum pipe pipe = crtc->pipe; 1559 1560 /* Don't disable pipe or pipe PLLs if needed */ 1561 if (IS_I830(dev_priv)) 1562 return; 1563 1564 /* Make sure the pipe isn't still relying on us */ 1565 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); 1566 1567 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS); 1568 intel_de_posting_read(dev_priv, DPLL(pipe)); 1569 } 1570 1571 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1572 { 1573 u32 val; 1574 1575 /* Make sure the pipe isn't still relying on us */ 1576 assert_pipe_disabled(dev_priv, (enum transcoder)pipe); 1577 1578 val = DPLL_INTEGRATED_REF_CLK_VLV | 1579 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1580 if (pipe != PIPE_A) 1581 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1582 1583 intel_de_write(dev_priv, DPLL(pipe), val); 1584 intel_de_posting_read(dev_priv, DPLL(pipe)); 1585 } 1586 1587 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1588 { 1589 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1590 u32 val; 1591 1592 /* Make sure the pipe isn't still relying on us */ 1593 assert_pipe_disabled(dev_priv, (enum transcoder)pipe); 1594 1595 val = DPLL_SSC_REF_CLK_CHV | 1596 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1597 if (pipe != PIPE_A) 1598 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1599 1600 intel_de_write(dev_priv, DPLL(pipe), val); 1601 intel_de_posting_read(dev_priv, DPLL(pipe)); 1602 1603 vlv_dpio_get(dev_priv); 1604 1605 /* Disable 10bit clock to display controller */ 1606 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1607 val &= ~DPIO_DCLKP_EN; 1608 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1609 1610 vlv_dpio_put(dev_priv); 1611 } 1612 1613 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1614 struct intel_digital_port *dport, 1615 unsigned int expected_mask) 1616 { 1617 u32 port_mask; 1618 i915_reg_t dpll_reg; 1619 1620 switch (dport->base.port) { 1621 case PORT_B: 1622 port_mask = DPLL_PORTB_READY_MASK; 1623 dpll_reg = DPLL(0); 1624 break; 1625 case PORT_C: 1626 port_mask = DPLL_PORTC_READY_MASK; 1627 dpll_reg = DPLL(0); 1628 expected_mask <<= 4; 1629 break; 1630 case PORT_D: 1631 port_mask = DPLL_PORTD_READY_MASK; 1632 dpll_reg = DPIO_PHY_STATUS; 1633 break; 1634 default: 1635 BUG(); 1636 } 1637 1638 if (intel_de_wait_for_register(dev_priv, dpll_reg, 1639 port_mask, expected_mask, 1000)) 1640 drm_WARN(&dev_priv->drm, 1, 1641 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", 1642 dport->base.base.base.id, dport->base.base.name, 1643 intel_de_read(dev_priv, dpll_reg) & port_mask, 1644 expected_mask); 1645 } 1646 1647 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) 1648 { 1649 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1650 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1651 enum pipe pipe = crtc->pipe; 1652 i915_reg_t reg; 1653 u32 val, pipeconf_val; 1654 1655 /* Make sure PCH DPLL is enabled */ 1656 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); 1657 1658 /* FDI must be feeding us bits for PCH ports */ 1659 assert_fdi_tx_enabled(dev_priv, pipe); 1660 assert_fdi_rx_enabled(dev_priv, pipe); 1661 1662 if (HAS_PCH_CPT(dev_priv)) { 1663 reg = TRANS_CHICKEN2(pipe); 1664 val = intel_de_read(dev_priv, reg); 1665 /* 1666 * Workaround: Set the timing override bit 1667 * before enabling the pch transcoder. 1668 */ 1669 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1670 /* Configure frame start delay to match the CPU */ 1671 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 1672 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); 1673 intel_de_write(dev_priv, reg, val); 1674 } 1675 1676 reg = PCH_TRANSCONF(pipe); 1677 val = intel_de_read(dev_priv, reg); 1678 pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe)); 1679 1680 if (HAS_PCH_IBX(dev_priv)) { 1681 /* Configure frame start delay to match the CPU */ 1682 val &= ~TRANS_FRAME_START_DELAY_MASK; 1683 val |= TRANS_FRAME_START_DELAY(0); 1684 1685 /* 1686 * Make the BPC in transcoder be consistent with 1687 * that in pipeconf reg. For HDMI we must use 8bpc 1688 * here for both 8bpc and 12bpc. 1689 */ 1690 val &= ~PIPECONF_BPC_MASK; 1691 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 1692 val |= PIPECONF_8BPC; 1693 else 1694 val |= pipeconf_val & PIPECONF_BPC_MASK; 1695 } 1696 1697 val &= ~TRANS_INTERLACE_MASK; 1698 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { 1699 if (HAS_PCH_IBX(dev_priv) && 1700 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 1701 val |= TRANS_LEGACY_INTERLACED_ILK; 1702 else 1703 val |= TRANS_INTERLACED; 1704 } else { 1705 val |= TRANS_PROGRESSIVE; 1706 } 1707 1708 intel_de_write(dev_priv, reg, val | TRANS_ENABLE); 1709 if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100)) 1710 drm_err(&dev_priv->drm, "failed to enable transcoder %c\n", 1711 pipe_name(pipe)); 1712 } 1713 1714 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1715 enum transcoder cpu_transcoder) 1716 { 1717 u32 val, pipeconf_val; 1718 1719 /* FDI must be feeding us bits for PCH ports */ 1720 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); 1721 assert_fdi_rx_enabled(dev_priv, PIPE_A); 1722 1723 val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); 1724 /* Workaround: set timing override bit. */ 1725 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1726 /* Configure frame start delay to match the CPU */ 1727 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 1728 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); 1729 intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); 1730 1731 val = TRANS_ENABLE; 1732 pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); 1733 1734 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 1735 PIPECONF_INTERLACED_ILK) 1736 val |= TRANS_INTERLACED; 1737 else 1738 val |= TRANS_PROGRESSIVE; 1739 1740 intel_de_write(dev_priv, LPT_TRANSCONF, val); 1741 if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, 1742 TRANS_STATE_ENABLE, 100)) 1743 drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n"); 1744 } 1745 1746 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1747 enum pipe pipe) 1748 { 1749 i915_reg_t reg; 1750 u32 val; 1751 1752 /* FDI relies on the transcoder */ 1753 assert_fdi_tx_disabled(dev_priv, pipe); 1754 assert_fdi_rx_disabled(dev_priv, pipe); 1755 1756 /* Ports must be off as well */ 1757 assert_pch_ports_disabled(dev_priv, pipe); 1758 1759 reg = PCH_TRANSCONF(pipe); 1760 val = intel_de_read(dev_priv, reg); 1761 val &= ~TRANS_ENABLE; 1762 intel_de_write(dev_priv, reg, val); 1763 /* wait for PCH transcoder off, transcoder state */ 1764 if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) 1765 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n", 1766 pipe_name(pipe)); 1767 1768 if (HAS_PCH_CPT(dev_priv)) { 1769 /* Workaround: Clear the timing override chicken bit again. */ 1770 reg = TRANS_CHICKEN2(pipe); 1771 val = intel_de_read(dev_priv, reg); 1772 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1773 intel_de_write(dev_priv, reg, val); 1774 } 1775 } 1776 1777 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 1778 { 1779 u32 val; 1780 1781 val = intel_de_read(dev_priv, LPT_TRANSCONF); 1782 val &= ~TRANS_ENABLE; 1783 intel_de_write(dev_priv, LPT_TRANSCONF, val); 1784 /* wait for PCH transcoder off, transcoder state */ 1785 if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, 1786 TRANS_STATE_ENABLE, 50)) 1787 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n"); 1788 1789 /* Workaround: clear timing override bit. */ 1790 val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); 1791 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1792 intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); 1793 } 1794 1795 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) 1796 { 1797 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1798 1799 if (HAS_PCH_LPT(dev_priv)) 1800 return PIPE_A; 1801 else 1802 return crtc->pipe; 1803 } 1804 1805 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state) 1806 { 1807 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1808 1809 /* 1810 * On i965gm the hardware frame counter reads 1811 * zero when the TV encoder is enabled :( 1812 */ 1813 if (IS_I965GM(dev_priv) && 1814 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT))) 1815 return 0; 1816 1817 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1818 return 0xffffffff; /* full 32 bit counter */ 1819 else if (INTEL_GEN(dev_priv) >= 3) 1820 return 0xffffff; /* only 24 bits of frame count */ 1821 else 1822 return 0; /* Gen2 doesn't have a hardware frame counter */ 1823 } 1824 1825 void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) 1826 { 1827 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1828 1829 assert_vblank_disabled(&crtc->base); 1830 drm_crtc_set_max_vblank_count(&crtc->base, 1831 intel_crtc_max_vblank_count(crtc_state)); 1832 drm_crtc_vblank_on(&crtc->base); 1833 } 1834 1835 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state) 1836 { 1837 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1838 1839 drm_crtc_vblank_off(&crtc->base); 1840 assert_vblank_disabled(&crtc->base); 1841 } 1842 1843 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) 1844 { 1845 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 1846 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1847 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 1848 enum pipe pipe = crtc->pipe; 1849 i915_reg_t reg; 1850 u32 val; 1851 1852 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe)); 1853 1854 assert_planes_disabled(crtc); 1855 1856 /* 1857 * A pipe without a PLL won't actually be able to drive bits from 1858 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1859 * need the check. 1860 */ 1861 if (HAS_GMCH(dev_priv)) { 1862 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 1863 assert_dsi_pll_enabled(dev_priv); 1864 else 1865 assert_pll_enabled(dev_priv, pipe); 1866 } else { 1867 if (new_crtc_state->has_pch_encoder) { 1868 /* if driving the PCH, we need FDI enabled */ 1869 assert_fdi_rx_pll_enabled(dev_priv, 1870 intel_crtc_pch_transcoder(crtc)); 1871 assert_fdi_tx_pll_enabled(dev_priv, 1872 (enum pipe) cpu_transcoder); 1873 } 1874 /* FIXME: assert CPU port conditions for SNB+ */ 1875 } 1876 1877 trace_intel_pipe_enable(crtc); 1878 1879 reg = PIPECONF(cpu_transcoder); 1880 val = intel_de_read(dev_priv, reg); 1881 if (val & PIPECONF_ENABLE) { 1882 /* we keep both pipes enabled on 830 */ 1883 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); 1884 return; 1885 } 1886 1887 intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE); 1888 intel_de_posting_read(dev_priv, reg); 1889 1890 /* 1891 * Until the pipe starts PIPEDSL reads will return a stale value, 1892 * which causes an apparent vblank timestamp jump when PIPEDSL 1893 * resets to its proper value. That also messes up the frame count 1894 * when it's derived from the timestamps. So let's wait for the 1895 * pipe to start properly before we call drm_crtc_vblank_on() 1896 */ 1897 if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 1898 intel_wait_for_pipe_scanline_moving(crtc); 1899 } 1900 1901 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) 1902 { 1903 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1904 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1905 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 1906 enum pipe pipe = crtc->pipe; 1907 i915_reg_t reg; 1908 u32 val; 1909 1910 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe)); 1911 1912 /* 1913 * Make sure planes won't keep trying to pump pixels to us, 1914 * or we might hang the display. 1915 */ 1916 assert_planes_disabled(crtc); 1917 1918 trace_intel_pipe_disable(crtc); 1919 1920 reg = PIPECONF(cpu_transcoder); 1921 val = intel_de_read(dev_priv, reg); 1922 if ((val & PIPECONF_ENABLE) == 0) 1923 return; 1924 1925 /* 1926 * Double wide has implications for planes 1927 * so best keep it disabled when not needed. 1928 */ 1929 if (old_crtc_state->double_wide) 1930 val &= ~PIPECONF_DOUBLE_WIDE; 1931 1932 /* Don't disable pipe or pipe PLLs if needed */ 1933 if (!IS_I830(dev_priv)) 1934 val &= ~PIPECONF_ENABLE; 1935 1936 intel_de_write(dev_priv, reg, val); 1937 if ((val & PIPECONF_ENABLE) == 0) 1938 intel_wait_for_pipe_off(old_crtc_state); 1939 } 1940 1941 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) 1942 { 1943 return IS_GEN(dev_priv, 2) ? 2048 : 4096; 1944 } 1945 1946 static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane) 1947 { 1948 if (!is_ccs_modifier(fb->modifier)) 1949 return false; 1950 1951 return plane >= fb->format->num_planes / 2; 1952 } 1953 1954 static bool is_gen12_ccs_modifier(u64 modifier) 1955 { 1956 return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || 1957 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; 1958 1959 } 1960 1961 static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane) 1962 { 1963 return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane); 1964 } 1965 1966 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane) 1967 { 1968 if (is_ccs_modifier(fb->modifier)) 1969 return is_ccs_plane(fb, plane); 1970 1971 return plane == 1; 1972 } 1973 1974 static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane) 1975 { 1976 drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || 1977 (main_plane && main_plane >= fb->format->num_planes / 2)); 1978 1979 return fb->format->num_planes / 2 + main_plane; 1980 } 1981 1982 static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) 1983 { 1984 drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || 1985 ccs_plane < fb->format->num_planes / 2); 1986 1987 return ccs_plane - fb->format->num_planes / 2; 1988 } 1989 1990 /* Return either the main plane's CCS or - if not a CCS FB - UV plane */ 1991 int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane) 1992 { 1993 if (is_ccs_modifier(fb->modifier)) 1994 return main_to_ccs_plane(fb, main_plane); 1995 1996 return 1; 1997 } 1998 1999 bool 2000 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, 2001 uint64_t modifier) 2002 { 2003 return info->is_yuv && 2004 info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2); 2005 } 2006 2007 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, 2008 int color_plane) 2009 { 2010 return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && 2011 color_plane == 1; 2012 } 2013 2014 static unsigned int 2015 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) 2016 { 2017 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2018 unsigned int cpp = fb->format->cpp[color_plane]; 2019 2020 switch (fb->modifier) { 2021 case DRM_FORMAT_MOD_LINEAR: 2022 return intel_tile_size(dev_priv); 2023 case I915_FORMAT_MOD_X_TILED: 2024 if (IS_GEN(dev_priv, 2)) 2025 return 128; 2026 else 2027 return 512; 2028 case I915_FORMAT_MOD_Y_TILED_CCS: 2029 if (is_ccs_plane(fb, color_plane)) 2030 return 128; 2031 /* fall through */ 2032 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 2033 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 2034 if (is_ccs_plane(fb, color_plane)) 2035 return 64; 2036 /* fall through */ 2037 case I915_FORMAT_MOD_Y_TILED: 2038 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv)) 2039 return 128; 2040 else 2041 return 512; 2042 case I915_FORMAT_MOD_Yf_TILED_CCS: 2043 if (is_ccs_plane(fb, color_plane)) 2044 return 128; 2045 /* fall through */ 2046 case I915_FORMAT_MOD_Yf_TILED: 2047 switch (cpp) { 2048 case 1: 2049 return 64; 2050 case 2: 2051 case 4: 2052 return 128; 2053 case 8: 2054 case 16: 2055 return 256; 2056 default: 2057 MISSING_CASE(cpp); 2058 return cpp; 2059 } 2060 break; 2061 default: 2062 MISSING_CASE(fb->modifier); 2063 return cpp; 2064 } 2065 } 2066 2067 static unsigned int 2068 intel_tile_height(const struct drm_framebuffer *fb, int color_plane) 2069 { 2070 if (is_gen12_ccs_plane(fb, color_plane)) 2071 return 1; 2072 2073 return intel_tile_size(to_i915(fb->dev)) / 2074 intel_tile_width_bytes(fb, color_plane); 2075 } 2076 2077 /* Return the tile dimensions in pixel units */ 2078 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane, 2079 unsigned int *tile_width, 2080 unsigned int *tile_height) 2081 { 2082 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane); 2083 unsigned int cpp = fb->format->cpp[color_plane]; 2084 2085 *tile_width = tile_width_bytes / cpp; 2086 *tile_height = intel_tile_height(fb, color_plane); 2087 } 2088 2089 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, 2090 int color_plane) 2091 { 2092 unsigned int tile_width, tile_height; 2093 2094 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2095 2096 return fb->pitches[color_plane] * tile_height; 2097 } 2098 2099 unsigned int 2100 intel_fb_align_height(const struct drm_framebuffer *fb, 2101 int color_plane, unsigned int height) 2102 { 2103 unsigned int tile_height = intel_tile_height(fb, color_plane); 2104 2105 return ALIGN(height, tile_height); 2106 } 2107 2108 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 2109 { 2110 unsigned int size = 0; 2111 int i; 2112 2113 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 2114 size += rot_info->plane[i].width * rot_info->plane[i].height; 2115 2116 return size; 2117 } 2118 2119 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info) 2120 { 2121 unsigned int size = 0; 2122 int i; 2123 2124 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) 2125 size += rem_info->plane[i].width * rem_info->plane[i].height; 2126 2127 return size; 2128 } 2129 2130 static void 2131 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, 2132 const struct drm_framebuffer *fb, 2133 unsigned int rotation) 2134 { 2135 view->type = I915_GGTT_VIEW_NORMAL; 2136 if (drm_rotation_90_or_270(rotation)) { 2137 view->type = I915_GGTT_VIEW_ROTATED; 2138 view->rotated = to_intel_framebuffer(fb)->rot_info; 2139 } 2140 } 2141 2142 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv) 2143 { 2144 if (IS_I830(dev_priv)) 2145 return 16 * 1024; 2146 else if (IS_I85X(dev_priv)) 2147 return 256; 2148 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) 2149 return 32; 2150 else 2151 return 4 * 1024; 2152 } 2153 2154 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) 2155 { 2156 if (INTEL_GEN(dev_priv) >= 9) 2157 return 256 * 1024; 2158 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || 2159 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2160 return 128 * 1024; 2161 else if (INTEL_GEN(dev_priv) >= 4) 2162 return 4 * 1024; 2163 else 2164 return 0; 2165 } 2166 2167 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, 2168 int color_plane) 2169 { 2170 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2171 2172 /* AUX_DIST needs only 4K alignment */ 2173 if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) || 2174 is_ccs_plane(fb, color_plane)) 2175 return 4096; 2176 2177 switch (fb->modifier) { 2178 case DRM_FORMAT_MOD_LINEAR: 2179 return intel_linear_alignment(dev_priv); 2180 case I915_FORMAT_MOD_X_TILED: 2181 if (INTEL_GEN(dev_priv) >= 9) 2182 return 256 * 1024; 2183 return 0; 2184 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 2185 if (is_semiplanar_uv_plane(fb, color_plane)) 2186 return intel_tile_row_size(fb, color_plane); 2187 /* Fall-through */ 2188 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 2189 return 16 * 1024; 2190 case I915_FORMAT_MOD_Y_TILED_CCS: 2191 case I915_FORMAT_MOD_Yf_TILED_CCS: 2192 case I915_FORMAT_MOD_Y_TILED: 2193 if (INTEL_GEN(dev_priv) >= 12 && 2194 is_semiplanar_uv_plane(fb, color_plane)) 2195 return intel_tile_row_size(fb, color_plane); 2196 /* Fall-through */ 2197 case I915_FORMAT_MOD_Yf_TILED: 2198 return 1 * 1024 * 1024; 2199 default: 2200 MISSING_CASE(fb->modifier); 2201 return 0; 2202 } 2203 } 2204 2205 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) 2206 { 2207 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 2208 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 2209 2210 return INTEL_GEN(dev_priv) < 4 || 2211 (plane->has_fbc && 2212 plane_state->view.type == I915_GGTT_VIEW_NORMAL); 2213 } 2214 2215 struct i915_vma * 2216 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 2217 const struct i915_ggtt_view *view, 2218 bool uses_fence, 2219 unsigned long *out_flags) 2220 { 2221 struct drm_device *dev = fb->dev; 2222 struct drm_i915_private *dev_priv = to_i915(dev); 2223 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2224 intel_wakeref_t wakeref; 2225 struct i915_vma *vma; 2226 unsigned int pinctl; 2227 u32 alignment; 2228 2229 if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj))) 2230 return ERR_PTR(-EINVAL); 2231 2232 alignment = intel_surf_alignment(fb, 0); 2233 if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment))) 2234 return ERR_PTR(-EINVAL); 2235 2236 /* Note that the w/a also requires 64 PTE of padding following the 2237 * bo. We currently fill all unused PTE with the shadow page and so 2238 * we should always have valid PTE following the scanout preventing 2239 * the VT-d warning. 2240 */ 2241 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) 2242 alignment = 256 * 1024; 2243 2244 /* 2245 * Global gtt pte registers are special registers which actually forward 2246 * writes to a chunk of system memory. Which means that there is no risk 2247 * that the register values disappear as soon as we call 2248 * intel_runtime_pm_put(), so it is correct to wrap only the 2249 * pin/unpin/fence and not more. 2250 */ 2251 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2252 2253 atomic_inc(&dev_priv->gpu_error.pending_fb_pin); 2254 2255 /* 2256 * Valleyview is definitely limited to scanning out the first 2257 * 512MiB. Lets presume this behaviour was inherited from the 2258 * g4x display engine and that all earlier gen are similarly 2259 * limited. Testing suggests that it is a little more 2260 * complicated than this. For example, Cherryview appears quite 2261 * happy to scanout from anywhere within its global aperture. 2262 */ 2263 pinctl = 0; 2264 if (HAS_GMCH(dev_priv)) 2265 pinctl |= PIN_MAPPABLE; 2266 2267 vma = i915_gem_object_pin_to_display_plane(obj, 2268 alignment, view, pinctl); 2269 if (IS_ERR(vma)) 2270 goto err; 2271 2272 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) { 2273 int ret; 2274 2275 /* 2276 * Install a fence for tiled scan-out. Pre-i965 always needs a 2277 * fence, whereas 965+ only requires a fence if using 2278 * framebuffer compression. For simplicity, we always, when 2279 * possible, install a fence as the cost is not that onerous. 2280 * 2281 * If we fail to fence the tiled scanout, then either the 2282 * modeset will reject the change (which is highly unlikely as 2283 * the affected systems, all but one, do not have unmappable 2284 * space) or we will not be able to enable full powersaving 2285 * techniques (also likely not to apply due to various limits 2286 * FBC and the like impose on the size of the buffer, which 2287 * presumably we violated anyway with this unmappable buffer). 2288 * Anyway, it is presumably better to stumble onwards with 2289 * something and try to run the system in a "less than optimal" 2290 * mode that matches the user configuration. 2291 */ 2292 ret = i915_vma_pin_fence(vma); 2293 if (ret != 0 && INTEL_GEN(dev_priv) < 4) { 2294 i915_gem_object_unpin_from_display_plane(vma); 2295 vma = ERR_PTR(ret); 2296 goto err; 2297 } 2298 2299 if (ret == 0 && vma->fence) 2300 *out_flags |= PLANE_HAS_FENCE; 2301 } 2302 2303 i915_vma_get(vma); 2304 err: 2305 atomic_dec(&dev_priv->gpu_error.pending_fb_pin); 2306 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2307 return vma; 2308 } 2309 2310 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) 2311 { 2312 i915_gem_object_lock(vma->obj); 2313 if (flags & PLANE_HAS_FENCE) 2314 i915_vma_unpin_fence(vma); 2315 i915_gem_object_unpin_from_display_plane(vma); 2316 i915_gem_object_unlock(vma->obj); 2317 2318 i915_vma_put(vma); 2319 } 2320 2321 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane, 2322 unsigned int rotation) 2323 { 2324 if (drm_rotation_90_or_270(rotation)) 2325 return to_intel_framebuffer(fb)->rotated[color_plane].pitch; 2326 else 2327 return fb->pitches[color_plane]; 2328 } 2329 2330 /* 2331 * Convert the x/y offsets into a linear offset. 2332 * Only valid with 0/180 degree rotation, which is fine since linear 2333 * offset is only used with linear buffers on pre-hsw and tiled buffers 2334 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 2335 */ 2336 u32 intel_fb_xy_to_linear(int x, int y, 2337 const struct intel_plane_state *state, 2338 int color_plane) 2339 { 2340 const struct drm_framebuffer *fb = state->hw.fb; 2341 unsigned int cpp = fb->format->cpp[color_plane]; 2342 unsigned int pitch = state->color_plane[color_plane].stride; 2343 2344 return y * pitch + x * cpp; 2345 } 2346 2347 /* 2348 * Add the x/y offsets derived from fb->offsets[] to the user 2349 * specified plane src x/y offsets. The resulting x/y offsets 2350 * specify the start of scanout from the beginning of the gtt mapping. 2351 */ 2352 void intel_add_fb_offsets(int *x, int *y, 2353 const struct intel_plane_state *state, 2354 int color_plane) 2355 2356 { 2357 *x += state->color_plane[color_plane].x; 2358 *y += state->color_plane[color_plane].y; 2359 } 2360 2361 static u32 intel_adjust_tile_offset(int *x, int *y, 2362 unsigned int tile_width, 2363 unsigned int tile_height, 2364 unsigned int tile_size, 2365 unsigned int pitch_tiles, 2366 u32 old_offset, 2367 u32 new_offset) 2368 { 2369 unsigned int pitch_pixels = pitch_tiles * tile_width; 2370 unsigned int tiles; 2371 2372 WARN_ON(old_offset & (tile_size - 1)); 2373 WARN_ON(new_offset & (tile_size - 1)); 2374 WARN_ON(new_offset > old_offset); 2375 2376 tiles = (old_offset - new_offset) / tile_size; 2377 2378 *y += tiles / pitch_tiles * tile_height; 2379 *x += tiles % pitch_tiles * tile_width; 2380 2381 /* minimize x in case it got needlessly big */ 2382 *y += *x / pitch_pixels * tile_height; 2383 *x %= pitch_pixels; 2384 2385 return new_offset; 2386 } 2387 2388 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane) 2389 { 2390 return fb->modifier == DRM_FORMAT_MOD_LINEAR || 2391 is_gen12_ccs_plane(fb, color_plane); 2392 } 2393 2394 static u32 intel_adjust_aligned_offset(int *x, int *y, 2395 const struct drm_framebuffer *fb, 2396 int color_plane, 2397 unsigned int rotation, 2398 unsigned int pitch, 2399 u32 old_offset, u32 new_offset) 2400 { 2401 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2402 unsigned int cpp = fb->format->cpp[color_plane]; 2403 2404 drm_WARN_ON(&dev_priv->drm, new_offset > old_offset); 2405 2406 if (!is_surface_linear(fb, color_plane)) { 2407 unsigned int tile_size, tile_width, tile_height; 2408 unsigned int pitch_tiles; 2409 2410 tile_size = intel_tile_size(dev_priv); 2411 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2412 2413 if (drm_rotation_90_or_270(rotation)) { 2414 pitch_tiles = pitch / tile_height; 2415 swap(tile_width, tile_height); 2416 } else { 2417 pitch_tiles = pitch / (tile_width * cpp); 2418 } 2419 2420 intel_adjust_tile_offset(x, y, tile_width, tile_height, 2421 tile_size, pitch_tiles, 2422 old_offset, new_offset); 2423 } else { 2424 old_offset += *y * pitch + *x * cpp; 2425 2426 *y = (old_offset - new_offset) / pitch; 2427 *x = ((old_offset - new_offset) - *y * pitch) / cpp; 2428 } 2429 2430 return new_offset; 2431 } 2432 2433 /* 2434 * Adjust the tile offset by moving the difference into 2435 * the x/y offsets. 2436 */ 2437 static u32 intel_plane_adjust_aligned_offset(int *x, int *y, 2438 const struct intel_plane_state *state, 2439 int color_plane, 2440 u32 old_offset, u32 new_offset) 2441 { 2442 return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane, 2443 state->hw.rotation, 2444 state->color_plane[color_plane].stride, 2445 old_offset, new_offset); 2446 } 2447 2448 /* 2449 * Computes the aligned offset to the base tile and adjusts 2450 * x, y. bytes per pixel is assumed to be a power-of-two. 2451 * 2452 * In the 90/270 rotated case, x and y are assumed 2453 * to be already rotated to match the rotated GTT view, and 2454 * pitch is the tile_height aligned framebuffer height. 2455 * 2456 * This function is used when computing the derived information 2457 * under intel_framebuffer, so using any of that information 2458 * here is not allowed. Anything under drm_framebuffer can be 2459 * used. This is why the user has to pass in the pitch since it 2460 * is specified in the rotated orientation. 2461 */ 2462 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv, 2463 int *x, int *y, 2464 const struct drm_framebuffer *fb, 2465 int color_plane, 2466 unsigned int pitch, 2467 unsigned int rotation, 2468 u32 alignment) 2469 { 2470 unsigned int cpp = fb->format->cpp[color_plane]; 2471 u32 offset, offset_aligned; 2472 2473 if (!is_surface_linear(fb, color_plane)) { 2474 unsigned int tile_size, tile_width, tile_height; 2475 unsigned int tile_rows, tiles, pitch_tiles; 2476 2477 tile_size = intel_tile_size(dev_priv); 2478 intel_tile_dims(fb, color_plane, &tile_width, &tile_height); 2479 2480 if (drm_rotation_90_or_270(rotation)) { 2481 pitch_tiles = pitch / tile_height; 2482 swap(tile_width, tile_height); 2483 } else { 2484 pitch_tiles = pitch / (tile_width * cpp); 2485 } 2486 2487 tile_rows = *y / tile_height; 2488 *y %= tile_height; 2489 2490 tiles = *x / tile_width; 2491 *x %= tile_width; 2492 2493 offset = (tile_rows * pitch_tiles + tiles) * tile_size; 2494 2495 offset_aligned = offset; 2496 if (alignment) 2497 offset_aligned = rounddown(offset_aligned, alignment); 2498 2499 intel_adjust_tile_offset(x, y, tile_width, tile_height, 2500 tile_size, pitch_tiles, 2501 offset, offset_aligned); 2502 } else { 2503 offset = *y * pitch + *x * cpp; 2504 offset_aligned = offset; 2505 if (alignment) { 2506 offset_aligned = rounddown(offset_aligned, alignment); 2507 *y = (offset % alignment) / pitch; 2508 *x = ((offset % alignment) - *y * pitch) / cpp; 2509 } else { 2510 *y = *x = 0; 2511 } 2512 } 2513 2514 return offset_aligned; 2515 } 2516 2517 static u32 intel_plane_compute_aligned_offset(int *x, int *y, 2518 const struct intel_plane_state *state, 2519 int color_plane) 2520 { 2521 struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane); 2522 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 2523 const struct drm_framebuffer *fb = state->hw.fb; 2524 unsigned int rotation = state->hw.rotation; 2525 int pitch = state->color_plane[color_plane].stride; 2526 u32 alignment; 2527 2528 if (intel_plane->id == PLANE_CURSOR) 2529 alignment = intel_cursor_alignment(dev_priv); 2530 else 2531 alignment = intel_surf_alignment(fb, color_plane); 2532 2533 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane, 2534 pitch, rotation, alignment); 2535 } 2536 2537 /* Convert the fb->offset[] into x/y offsets */ 2538 static int intel_fb_offset_to_xy(int *x, int *y, 2539 const struct drm_framebuffer *fb, 2540 int color_plane) 2541 { 2542 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2543 unsigned int height; 2544 u32 alignment; 2545 2546 if (INTEL_GEN(dev_priv) >= 12 && 2547 is_semiplanar_uv_plane(fb, color_plane)) 2548 alignment = intel_tile_row_size(fb, color_plane); 2549 else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) 2550 alignment = intel_tile_size(dev_priv); 2551 else 2552 alignment = 0; 2553 2554 if (alignment != 0 && fb->offsets[color_plane] % alignment) { 2555 drm_dbg_kms(&dev_priv->drm, 2556 "Misaligned offset 0x%08x for color plane %d\n", 2557 fb->offsets[color_plane], color_plane); 2558 return -EINVAL; 2559 } 2560 2561 height = drm_framebuffer_plane_height(fb->height, fb, color_plane); 2562 height = ALIGN(height, intel_tile_height(fb, color_plane)); 2563 2564 /* Catch potential overflows early */ 2565 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]), 2566 fb->offsets[color_plane])) { 2567 drm_dbg_kms(&dev_priv->drm, 2568 "Bad offset 0x%08x or pitch %d for color plane %d\n", 2569 fb->offsets[color_plane], fb->pitches[color_plane], 2570 color_plane); 2571 return -ERANGE; 2572 } 2573 2574 *x = 0; 2575 *y = 0; 2576 2577 intel_adjust_aligned_offset(x, y, 2578 fb, color_plane, DRM_MODE_ROTATE_0, 2579 fb->pitches[color_plane], 2580 fb->offsets[color_plane], 0); 2581 2582 return 0; 2583 } 2584 2585 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) 2586 { 2587 switch (fb_modifier) { 2588 case I915_FORMAT_MOD_X_TILED: 2589 return I915_TILING_X; 2590 case I915_FORMAT_MOD_Y_TILED: 2591 case I915_FORMAT_MOD_Y_TILED_CCS: 2592 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 2593 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 2594 return I915_TILING_Y; 2595 default: 2596 return I915_TILING_NONE; 2597 } 2598 } 2599 2600 /* 2601 * From the Sky Lake PRM: 2602 * "The Color Control Surface (CCS) contains the compression status of 2603 * the cache-line pairs. The compression state of the cache-line pair 2604 * is specified by 2 bits in the CCS. Each CCS cache-line represents 2605 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled 2606 * cache-line-pairs. CCS is always Y tiled." 2607 * 2608 * Since cache line pairs refers to horizontally adjacent cache lines, 2609 * each cache line in the CCS corresponds to an area of 32x16 cache 2610 * lines on the main surface. Since each pixel is 4 bytes, this gives 2611 * us a ratio of one byte in the CCS for each 8x16 pixels in the 2612 * main surface. 2613 */ 2614 static const struct drm_format_info skl_ccs_formats[] = { 2615 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 2616 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2617 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 2618 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2619 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 2620 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 2621 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 2622 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 2623 }; 2624 2625 /* 2626 * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the 2627 * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles 2628 * in the main surface. With 4 byte pixels and each Y-tile having dimensions of 2629 * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in 2630 * the main surface. 2631 */ 2632 static const struct drm_format_info gen12_ccs_formats[] = { 2633 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 2634 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2635 .hsub = 1, .vsub = 1, }, 2636 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 2637 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2638 .hsub = 1, .vsub = 1, }, 2639 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 2640 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2641 .hsub = 1, .vsub = 1, .has_alpha = true }, 2642 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 2643 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2644 .hsub = 1, .vsub = 1, .has_alpha = true }, 2645 { .format = DRM_FORMAT_YUYV, .num_planes = 2, 2646 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2647 .hsub = 2, .vsub = 1, .is_yuv = true }, 2648 { .format = DRM_FORMAT_YVYU, .num_planes = 2, 2649 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2650 .hsub = 2, .vsub = 1, .is_yuv = true }, 2651 { .format = DRM_FORMAT_UYVY, .num_planes = 2, 2652 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2653 .hsub = 2, .vsub = 1, .is_yuv = true }, 2654 { .format = DRM_FORMAT_VYUY, .num_planes = 2, 2655 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 2656 .hsub = 2, .vsub = 1, .is_yuv = true }, 2657 { .format = DRM_FORMAT_NV12, .num_planes = 4, 2658 .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 }, 2659 .hsub = 2, .vsub = 2, .is_yuv = true }, 2660 { .format = DRM_FORMAT_P010, .num_planes = 4, 2661 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 2662 .hsub = 2, .vsub = 2, .is_yuv = true }, 2663 { .format = DRM_FORMAT_P012, .num_planes = 4, 2664 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 2665 .hsub = 2, .vsub = 2, .is_yuv = true }, 2666 { .format = DRM_FORMAT_P016, .num_planes = 4, 2667 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 2668 .hsub = 2, .vsub = 2, .is_yuv = true }, 2669 }; 2670 2671 static const struct drm_format_info * 2672 lookup_format_info(const struct drm_format_info formats[], 2673 int num_formats, u32 format) 2674 { 2675 int i; 2676 2677 for (i = 0; i < num_formats; i++) { 2678 if (formats[i].format == format) 2679 return &formats[i]; 2680 } 2681 2682 return NULL; 2683 } 2684 2685 static const struct drm_format_info * 2686 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 2687 { 2688 switch (cmd->modifier[0]) { 2689 case I915_FORMAT_MOD_Y_TILED_CCS: 2690 case I915_FORMAT_MOD_Yf_TILED_CCS: 2691 return lookup_format_info(skl_ccs_formats, 2692 ARRAY_SIZE(skl_ccs_formats), 2693 cmd->pixel_format); 2694 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 2695 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 2696 return lookup_format_info(gen12_ccs_formats, 2697 ARRAY_SIZE(gen12_ccs_formats), 2698 cmd->pixel_format); 2699 default: 2700 return NULL; 2701 } 2702 } 2703 2704 bool is_ccs_modifier(u64 modifier) 2705 { 2706 return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || 2707 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || 2708 modifier == I915_FORMAT_MOD_Y_TILED_CCS || 2709 modifier == I915_FORMAT_MOD_Yf_TILED_CCS; 2710 } 2711 2712 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane) 2713 { 2714 return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)], 2715 512) * 64; 2716 } 2717 2718 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 2719 u32 pixel_format, u64 modifier) 2720 { 2721 struct intel_crtc *crtc; 2722 struct intel_plane *plane; 2723 2724 /* 2725 * We assume the primary plane for pipe A has 2726 * the highest stride limits of them all, 2727 * if in case pipe A is disabled, use the first pipe from pipe_mask. 2728 */ 2729 crtc = intel_get_first_crtc(dev_priv); 2730 if (!crtc) 2731 return 0; 2732 2733 plane = to_intel_plane(crtc->base.primary); 2734 2735 return plane->max_stride(plane, pixel_format, modifier, 2736 DRM_MODE_ROTATE_0); 2737 } 2738 2739 static 2740 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv, 2741 u32 pixel_format, u64 modifier) 2742 { 2743 /* 2744 * Arbitrary limit for gen4+ chosen to match the 2745 * render engine max stride. 2746 * 2747 * The new CCS hash mode makes remapping impossible 2748 */ 2749 if (!is_ccs_modifier(modifier)) { 2750 if (INTEL_GEN(dev_priv) >= 7) 2751 return 256*1024; 2752 else if (INTEL_GEN(dev_priv) >= 4) 2753 return 128*1024; 2754 } 2755 2756 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier); 2757 } 2758 2759 static u32 2760 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) 2761 { 2762 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2763 u32 tile_width; 2764 2765 if (is_surface_linear(fb, color_plane)) { 2766 u32 max_stride = intel_plane_fb_max_stride(dev_priv, 2767 fb->format->format, 2768 fb->modifier); 2769 2770 /* 2771 * To make remapping with linear generally feasible 2772 * we need the stride to be page aligned. 2773 */ 2774 if (fb->pitches[color_plane] > max_stride && 2775 !is_ccs_modifier(fb->modifier)) 2776 return intel_tile_size(dev_priv); 2777 else 2778 return 64; 2779 } 2780 2781 tile_width = intel_tile_width_bytes(fb, color_plane); 2782 if (is_ccs_modifier(fb->modifier)) { 2783 /* 2784 * Display WA #0531: skl,bxt,kbl,glk 2785 * 2786 * Render decompression and plane width > 3840 2787 * combined with horizontal panning requires the 2788 * plane stride to be a multiple of 4. We'll just 2789 * require the entire fb to accommodate that to avoid 2790 * potential runtime errors at plane configuration time. 2791 */ 2792 if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840) 2793 tile_width *= 4; 2794 /* 2795 * The main surface pitch must be padded to a multiple of four 2796 * tile widths. 2797 */ 2798 else if (INTEL_GEN(dev_priv) >= 12) 2799 tile_width *= 4; 2800 } 2801 return tile_width; 2802 } 2803 2804 bool intel_plane_can_remap(const struct intel_plane_state *plane_state) 2805 { 2806 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 2807 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 2808 const struct drm_framebuffer *fb = plane_state->hw.fb; 2809 int i; 2810 2811 /* We don't want to deal with remapping with cursors */ 2812 if (plane->id == PLANE_CURSOR) 2813 return false; 2814 2815 /* 2816 * The display engine limits already match/exceed the 2817 * render engine limits, so not much point in remapping. 2818 * Would also need to deal with the fence POT alignment 2819 * and gen2 2KiB GTT tile size. 2820 */ 2821 if (INTEL_GEN(dev_priv) < 4) 2822 return false; 2823 2824 /* 2825 * The new CCS hash mode isn't compatible with remapping as 2826 * the virtual address of the pages affects the compressed data. 2827 */ 2828 if (is_ccs_modifier(fb->modifier)) 2829 return false; 2830 2831 /* Linear needs a page aligned stride for remapping */ 2832 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) { 2833 unsigned int alignment = intel_tile_size(dev_priv) - 1; 2834 2835 for (i = 0; i < fb->format->num_planes; i++) { 2836 if (fb->pitches[i] & alignment) 2837 return false; 2838 } 2839 } 2840 2841 return true; 2842 } 2843 2844 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) 2845 { 2846 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 2847 const struct drm_framebuffer *fb = plane_state->hw.fb; 2848 unsigned int rotation = plane_state->hw.rotation; 2849 u32 stride, max_stride; 2850 2851 /* 2852 * No remapping for invisible planes since we don't have 2853 * an actual source viewport to remap. 2854 */ 2855 if (!plane_state->uapi.visible) 2856 return false; 2857 2858 if (!intel_plane_can_remap(plane_state)) 2859 return false; 2860 2861 /* 2862 * FIXME: aux plane limits on gen9+ are 2863 * unclear in Bspec, for now no checking. 2864 */ 2865 stride = intel_fb_pitch(fb, 0, rotation); 2866 max_stride = plane->max_stride(plane, fb->format->format, 2867 fb->modifier, rotation); 2868 2869 return stride > max_stride; 2870 } 2871 2872 static void 2873 intel_fb_plane_get_subsampling(int *hsub, int *vsub, 2874 const struct drm_framebuffer *fb, 2875 int color_plane) 2876 { 2877 int main_plane; 2878 2879 if (color_plane == 0) { 2880 *hsub = 1; 2881 *vsub = 1; 2882 2883 return; 2884 } 2885 2886 /* 2887 * TODO: Deduct the subsampling from the char block for all CCS 2888 * formats and planes. 2889 */ 2890 if (!is_gen12_ccs_plane(fb, color_plane)) { 2891 *hsub = fb->format->hsub; 2892 *vsub = fb->format->vsub; 2893 2894 return; 2895 } 2896 2897 main_plane = ccs_to_main_plane(fb, color_plane); 2898 *hsub = drm_format_info_block_width(fb->format, color_plane) / 2899 drm_format_info_block_width(fb->format, main_plane); 2900 2901 /* 2902 * The min stride check in the core framebuffer_check() function 2903 * assumes that format->hsub applies to every plane except for the 2904 * first plane. That's incorrect for the CCS AUX plane of the first 2905 * plane, but for the above check to pass we must define the block 2906 * width with that subsampling applied to it. Adjust the width here 2907 * accordingly, so we can calculate the actual subsampling factor. 2908 */ 2909 if (main_plane == 0) 2910 *hsub *= fb->format->hsub; 2911 2912 *vsub = 32; 2913 } 2914 static int 2915 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y) 2916 { 2917 struct drm_i915_private *i915 = to_i915(fb->dev); 2918 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2919 int main_plane; 2920 int hsub, vsub; 2921 int tile_width, tile_height; 2922 int ccs_x, ccs_y; 2923 int main_x, main_y; 2924 2925 if (!is_ccs_plane(fb, ccs_plane)) 2926 return 0; 2927 2928 intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height); 2929 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); 2930 2931 tile_width *= hsub; 2932 tile_height *= vsub; 2933 2934 ccs_x = (x * hsub) % tile_width; 2935 ccs_y = (y * vsub) % tile_height; 2936 2937 main_plane = ccs_to_main_plane(fb, ccs_plane); 2938 main_x = intel_fb->normal[main_plane].x % tile_width; 2939 main_y = intel_fb->normal[main_plane].y % tile_height; 2940 2941 /* 2942 * CCS doesn't have its own x/y offset register, so the intra CCS tile 2943 * x/y offsets must match between CCS and the main surface. 2944 */ 2945 if (main_x != ccs_x || main_y != ccs_y) { 2946 drm_dbg_kms(&i915->drm, 2947 "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n", 2948 main_x, main_y, 2949 ccs_x, ccs_y, 2950 intel_fb->normal[main_plane].x, 2951 intel_fb->normal[main_plane].y, 2952 x, y); 2953 return -EINVAL; 2954 } 2955 2956 return 0; 2957 } 2958 2959 static void 2960 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane) 2961 { 2962 int main_plane = is_ccs_plane(fb, color_plane) ? 2963 ccs_to_main_plane(fb, color_plane) : 0; 2964 int main_hsub, main_vsub; 2965 int hsub, vsub; 2966 2967 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane); 2968 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane); 2969 *w = fb->width / main_hsub / hsub; 2970 *h = fb->height / main_vsub / vsub; 2971 } 2972 2973 /* 2974 * Setup the rotated view for an FB plane and return the size the GTT mapping 2975 * requires for this view. 2976 */ 2977 static u32 2978 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info, 2979 u32 gtt_offset_rotated, int x, int y, 2980 unsigned int width, unsigned int height, 2981 unsigned int tile_size, 2982 unsigned int tile_width, unsigned int tile_height, 2983 struct drm_framebuffer *fb) 2984 { 2985 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2986 struct intel_rotation_info *rot_info = &intel_fb->rot_info; 2987 unsigned int pitch_tiles; 2988 struct drm_rect r; 2989 2990 /* Y or Yf modifiers required for 90/270 rotation */ 2991 if (fb->modifier != I915_FORMAT_MOD_Y_TILED && 2992 fb->modifier != I915_FORMAT_MOD_Yf_TILED) 2993 return 0; 2994 2995 if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane))) 2996 return 0; 2997 2998 rot_info->plane[plane] = *plane_info; 2999 3000 intel_fb->rotated[plane].pitch = plane_info->height * tile_height; 3001 3002 /* rotate the x/y offsets to match the GTT view */ 3003 drm_rect_init(&r, x, y, width, height); 3004 drm_rect_rotate(&r, 3005 plane_info->width * tile_width, 3006 plane_info->height * tile_height, 3007 DRM_MODE_ROTATE_270); 3008 x = r.x1; 3009 y = r.y1; 3010 3011 /* rotate the tile dimensions to match the GTT view */ 3012 pitch_tiles = intel_fb->rotated[plane].pitch / tile_height; 3013 swap(tile_width, tile_height); 3014 3015 /* 3016 * We only keep the x/y offsets, so push all of the 3017 * gtt offset into the x/y offsets. 3018 */ 3019 intel_adjust_tile_offset(&x, &y, 3020 tile_width, tile_height, 3021 tile_size, pitch_tiles, 3022 gtt_offset_rotated * tile_size, 0); 3023 3024 /* 3025 * First pixel of the framebuffer from 3026 * the start of the rotated gtt mapping. 3027 */ 3028 intel_fb->rotated[plane].x = x; 3029 intel_fb->rotated[plane].y = y; 3030 3031 return plane_info->width * plane_info->height; 3032 } 3033 3034 static int 3035 intel_fill_fb_info(struct drm_i915_private *dev_priv, 3036 struct drm_framebuffer *fb) 3037 { 3038 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 3039 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 3040 u32 gtt_offset_rotated = 0; 3041 unsigned int max_size = 0; 3042 int i, num_planes = fb->format->num_planes; 3043 unsigned int tile_size = intel_tile_size(dev_priv); 3044 3045 for (i = 0; i < num_planes; i++) { 3046 unsigned int width, height; 3047 unsigned int cpp, size; 3048 u32 offset; 3049 int x, y; 3050 int ret; 3051 3052 cpp = fb->format->cpp[i]; 3053 intel_fb_plane_dims(&width, &height, fb, i); 3054 3055 ret = intel_fb_offset_to_xy(&x, &y, fb, i); 3056 if (ret) { 3057 drm_dbg_kms(&dev_priv->drm, 3058 "bad fb plane %d offset: 0x%x\n", 3059 i, fb->offsets[i]); 3060 return ret; 3061 } 3062 3063 ret = intel_fb_check_ccs_xy(fb, i, x, y); 3064 if (ret) 3065 return ret; 3066 3067 /* 3068 * The fence (if used) is aligned to the start of the object 3069 * so having the framebuffer wrap around across the edge of the 3070 * fenced region doesn't really work. We have no API to configure 3071 * the fence start offset within the object (nor could we probably 3072 * on gen2/3). So it's just easier if we just require that the 3073 * fb layout agrees with the fence layout. We already check that the 3074 * fb stride matches the fence stride elsewhere. 3075 */ 3076 if (i == 0 && i915_gem_object_is_tiled(obj) && 3077 (x + width) * cpp > fb->pitches[i]) { 3078 drm_dbg_kms(&dev_priv->drm, 3079 "bad fb plane %d offset: 0x%x\n", 3080 i, fb->offsets[i]); 3081 return -EINVAL; 3082 } 3083 3084 /* 3085 * First pixel of the framebuffer from 3086 * the start of the normal gtt mapping. 3087 */ 3088 intel_fb->normal[i].x = x; 3089 intel_fb->normal[i].y = y; 3090 3091 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i, 3092 fb->pitches[i], 3093 DRM_MODE_ROTATE_0, 3094 tile_size); 3095 offset /= tile_size; 3096 3097 if (!is_surface_linear(fb, i)) { 3098 struct intel_remapped_plane_info plane_info; 3099 unsigned int tile_width, tile_height; 3100 3101 intel_tile_dims(fb, i, &tile_width, &tile_height); 3102 3103 plane_info.offset = offset; 3104 plane_info.stride = DIV_ROUND_UP(fb->pitches[i], 3105 tile_width * cpp); 3106 plane_info.width = DIV_ROUND_UP(x + width, tile_width); 3107 plane_info.height = DIV_ROUND_UP(y + height, 3108 tile_height); 3109 3110 /* how many tiles does this plane need */ 3111 size = plane_info.stride * plane_info.height; 3112 /* 3113 * If the plane isn't horizontally tile aligned, 3114 * we need one more tile. 3115 */ 3116 if (x != 0) 3117 size++; 3118 3119 gtt_offset_rotated += 3120 setup_fb_rotation(i, &plane_info, 3121 gtt_offset_rotated, 3122 x, y, width, height, 3123 tile_size, 3124 tile_width, tile_height, 3125 fb); 3126 } else { 3127 size = DIV_ROUND_UP((y + height) * fb->pitches[i] + 3128 x * cpp, tile_size); 3129 } 3130 3131 /* how many tiles in total needed in the bo */ 3132 max_size = max(max_size, offset + size); 3133 } 3134 3135 if (mul_u32_u32(max_size, tile_size) > obj->base.size) { 3136 drm_dbg_kms(&dev_priv->drm, 3137 "fb too big for bo (need %llu bytes, have %zu bytes)\n", 3138 mul_u32_u32(max_size, tile_size), obj->base.size); 3139 return -EINVAL; 3140 } 3141 3142 return 0; 3143 } 3144 3145 static void 3146 intel_plane_remap_gtt(struct intel_plane_state *plane_state) 3147 { 3148 struct drm_i915_private *dev_priv = 3149 to_i915(plane_state->uapi.plane->dev); 3150 struct drm_framebuffer *fb = plane_state->hw.fb; 3151 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 3152 struct intel_rotation_info *info = &plane_state->view.rotated; 3153 unsigned int rotation = plane_state->hw.rotation; 3154 int i, num_planes = fb->format->num_planes; 3155 unsigned int tile_size = intel_tile_size(dev_priv); 3156 unsigned int src_x, src_y; 3157 unsigned int src_w, src_h; 3158 u32 gtt_offset = 0; 3159 3160 memset(&plane_state->view, 0, sizeof(plane_state->view)); 3161 plane_state->view.type = drm_rotation_90_or_270(rotation) ? 3162 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED; 3163 3164 src_x = plane_state->uapi.src.x1 >> 16; 3165 src_y = plane_state->uapi.src.y1 >> 16; 3166 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 3167 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 3168 3169 drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier)); 3170 3171 /* Make src coordinates relative to the viewport */ 3172 drm_rect_translate(&plane_state->uapi.src, 3173 -(src_x << 16), -(src_y << 16)); 3174 3175 /* Rotate src coordinates to match rotated GTT view */ 3176 if (drm_rotation_90_or_270(rotation)) 3177 drm_rect_rotate(&plane_state->uapi.src, 3178 src_w << 16, src_h << 16, 3179 DRM_MODE_ROTATE_270); 3180 3181 for (i = 0; i < num_planes; i++) { 3182 unsigned int hsub = i ? fb->format->hsub : 1; 3183 unsigned int vsub = i ? fb->format->vsub : 1; 3184 unsigned int cpp = fb->format->cpp[i]; 3185 unsigned int tile_width, tile_height; 3186 unsigned int width, height; 3187 unsigned int pitch_tiles; 3188 unsigned int x, y; 3189 u32 offset; 3190 3191 intel_tile_dims(fb, i, &tile_width, &tile_height); 3192 3193 x = src_x / hsub; 3194 y = src_y / vsub; 3195 width = src_w / hsub; 3196 height = src_h / vsub; 3197 3198 /* 3199 * First pixel of the src viewport from the 3200 * start of the normal gtt mapping. 3201 */ 3202 x += intel_fb->normal[i].x; 3203 y += intel_fb->normal[i].y; 3204 3205 offset = intel_compute_aligned_offset(dev_priv, &x, &y, 3206 fb, i, fb->pitches[i], 3207 DRM_MODE_ROTATE_0, tile_size); 3208 offset /= tile_size; 3209 3210 drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane)); 3211 info->plane[i].offset = offset; 3212 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], 3213 tile_width * cpp); 3214 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); 3215 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); 3216 3217 if (drm_rotation_90_or_270(rotation)) { 3218 struct drm_rect r; 3219 3220 /* rotate the x/y offsets to match the GTT view */ 3221 drm_rect_init(&r, x, y, width, height); 3222 drm_rect_rotate(&r, 3223 info->plane[i].width * tile_width, 3224 info->plane[i].height * tile_height, 3225 DRM_MODE_ROTATE_270); 3226 x = r.x1; 3227 y = r.y1; 3228 3229 pitch_tiles = info->plane[i].height; 3230 plane_state->color_plane[i].stride = pitch_tiles * tile_height; 3231 3232 /* rotate the tile dimensions to match the GTT view */ 3233 swap(tile_width, tile_height); 3234 } else { 3235 pitch_tiles = info->plane[i].width; 3236 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp; 3237 } 3238 3239 /* 3240 * We only keep the x/y offsets, so push all of the 3241 * gtt offset into the x/y offsets. 3242 */ 3243 intel_adjust_tile_offset(&x, &y, 3244 tile_width, tile_height, 3245 tile_size, pitch_tiles, 3246 gtt_offset * tile_size, 0); 3247 3248 gtt_offset += info->plane[i].width * info->plane[i].height; 3249 3250 plane_state->color_plane[i].offset = 0; 3251 plane_state->color_plane[i].x = x; 3252 plane_state->color_plane[i].y = y; 3253 } 3254 } 3255 3256 static int 3257 intel_plane_compute_gtt(struct intel_plane_state *plane_state) 3258 { 3259 const struct intel_framebuffer *fb = 3260 to_intel_framebuffer(plane_state->hw.fb); 3261 unsigned int rotation = plane_state->hw.rotation; 3262 int i, num_planes; 3263 3264 if (!fb) 3265 return 0; 3266 3267 num_planes = fb->base.format->num_planes; 3268 3269 if (intel_plane_needs_remap(plane_state)) { 3270 intel_plane_remap_gtt(plane_state); 3271 3272 /* 3273 * Sometimes even remapping can't overcome 3274 * the stride limitations :( Can happen with 3275 * big plane sizes and suitably misaligned 3276 * offsets. 3277 */ 3278 return intel_plane_check_stride(plane_state); 3279 } 3280 3281 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation); 3282 3283 for (i = 0; i < num_planes; i++) { 3284 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation); 3285 plane_state->color_plane[i].offset = 0; 3286 3287 if (drm_rotation_90_or_270(rotation)) { 3288 plane_state->color_plane[i].x = fb->rotated[i].x; 3289 plane_state->color_plane[i].y = fb->rotated[i].y; 3290 } else { 3291 plane_state->color_plane[i].x = fb->normal[i].x; 3292 plane_state->color_plane[i].y = fb->normal[i].y; 3293 } 3294 } 3295 3296 /* Rotate src coordinates to match rotated GTT view */ 3297 if (drm_rotation_90_or_270(rotation)) 3298 drm_rect_rotate(&plane_state->uapi.src, 3299 fb->base.width << 16, fb->base.height << 16, 3300 DRM_MODE_ROTATE_270); 3301 3302 return intel_plane_check_stride(plane_state); 3303 } 3304 3305 static int i9xx_format_to_fourcc(int format) 3306 { 3307 switch (format) { 3308 case DISPPLANE_8BPP: 3309 return DRM_FORMAT_C8; 3310 case DISPPLANE_BGRA555: 3311 return DRM_FORMAT_ARGB1555; 3312 case DISPPLANE_BGRX555: 3313 return DRM_FORMAT_XRGB1555; 3314 case DISPPLANE_BGRX565: 3315 return DRM_FORMAT_RGB565; 3316 default: 3317 case DISPPLANE_BGRX888: 3318 return DRM_FORMAT_XRGB8888; 3319 case DISPPLANE_RGBX888: 3320 return DRM_FORMAT_XBGR8888; 3321 case DISPPLANE_BGRA888: 3322 return DRM_FORMAT_ARGB8888; 3323 case DISPPLANE_RGBA888: 3324 return DRM_FORMAT_ABGR8888; 3325 case DISPPLANE_BGRX101010: 3326 return DRM_FORMAT_XRGB2101010; 3327 case DISPPLANE_RGBX101010: 3328 return DRM_FORMAT_XBGR2101010; 3329 case DISPPLANE_BGRA101010: 3330 return DRM_FORMAT_ARGB2101010; 3331 case DISPPLANE_RGBA101010: 3332 return DRM_FORMAT_ABGR2101010; 3333 case DISPPLANE_RGBX161616: 3334 return DRM_FORMAT_XBGR16161616F; 3335 } 3336 } 3337 3338 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 3339 { 3340 switch (format) { 3341 case PLANE_CTL_FORMAT_RGB_565: 3342 return DRM_FORMAT_RGB565; 3343 case PLANE_CTL_FORMAT_NV12: 3344 return DRM_FORMAT_NV12; 3345 case PLANE_CTL_FORMAT_XYUV: 3346 return DRM_FORMAT_XYUV8888; 3347 case PLANE_CTL_FORMAT_P010: 3348 return DRM_FORMAT_P010; 3349 case PLANE_CTL_FORMAT_P012: 3350 return DRM_FORMAT_P012; 3351 case PLANE_CTL_FORMAT_P016: 3352 return DRM_FORMAT_P016; 3353 case PLANE_CTL_FORMAT_Y210: 3354 return DRM_FORMAT_Y210; 3355 case PLANE_CTL_FORMAT_Y212: 3356 return DRM_FORMAT_Y212; 3357 case PLANE_CTL_FORMAT_Y216: 3358 return DRM_FORMAT_Y216; 3359 case PLANE_CTL_FORMAT_Y410: 3360 return DRM_FORMAT_XVYU2101010; 3361 case PLANE_CTL_FORMAT_Y412: 3362 return DRM_FORMAT_XVYU12_16161616; 3363 case PLANE_CTL_FORMAT_Y416: 3364 return DRM_FORMAT_XVYU16161616; 3365 default: 3366 case PLANE_CTL_FORMAT_XRGB_8888: 3367 if (rgb_order) { 3368 if (alpha) 3369 return DRM_FORMAT_ABGR8888; 3370 else 3371 return DRM_FORMAT_XBGR8888; 3372 } else { 3373 if (alpha) 3374 return DRM_FORMAT_ARGB8888; 3375 else 3376 return DRM_FORMAT_XRGB8888; 3377 } 3378 case PLANE_CTL_FORMAT_XRGB_2101010: 3379 if (rgb_order) { 3380 if (alpha) 3381 return DRM_FORMAT_ABGR2101010; 3382 else 3383 return DRM_FORMAT_XBGR2101010; 3384 } else { 3385 if (alpha) 3386 return DRM_FORMAT_ARGB2101010; 3387 else 3388 return DRM_FORMAT_XRGB2101010; 3389 } 3390 case PLANE_CTL_FORMAT_XRGB_16161616F: 3391 if (rgb_order) { 3392 if (alpha) 3393 return DRM_FORMAT_ABGR16161616F; 3394 else 3395 return DRM_FORMAT_XBGR16161616F; 3396 } else { 3397 if (alpha) 3398 return DRM_FORMAT_ARGB16161616F; 3399 else 3400 return DRM_FORMAT_XRGB16161616F; 3401 } 3402 } 3403 } 3404 3405 static struct i915_vma * 3406 initial_plane_vma(struct drm_i915_private *i915, 3407 struct intel_initial_plane_config *plane_config) 3408 { 3409 struct drm_i915_gem_object *obj; 3410 struct i915_vma *vma; 3411 u32 base, size; 3412 3413 if (plane_config->size == 0) 3414 return NULL; 3415 3416 base = round_down(plane_config->base, 3417 I915_GTT_MIN_ALIGNMENT); 3418 size = round_up(plane_config->base + plane_config->size, 3419 I915_GTT_MIN_ALIGNMENT); 3420 size -= base; 3421 3422 /* 3423 * If the FB is too big, just don't use it since fbdev is not very 3424 * important and we should probably use that space with FBC or other 3425 * features. 3426 */ 3427 if (size * 2 > i915->stolen_usable_size) 3428 return NULL; 3429 3430 obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size); 3431 if (IS_ERR(obj)) 3432 return NULL; 3433 3434 switch (plane_config->tiling) { 3435 case I915_TILING_NONE: 3436 break; 3437 case I915_TILING_X: 3438 case I915_TILING_Y: 3439 obj->tiling_and_stride = 3440 plane_config->fb->base.pitches[0] | 3441 plane_config->tiling; 3442 break; 3443 default: 3444 MISSING_CASE(plane_config->tiling); 3445 goto err_obj; 3446 } 3447 3448 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); 3449 if (IS_ERR(vma)) 3450 goto err_obj; 3451 3452 if (i915_ggtt_pin(vma, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base)) 3453 goto err_obj; 3454 3455 if (i915_gem_object_is_tiled(obj) && 3456 !i915_vma_is_map_and_fenceable(vma)) 3457 goto err_obj; 3458 3459 return vma; 3460 3461 err_obj: 3462 i915_gem_object_put(obj); 3463 return NULL; 3464 } 3465 3466 static bool 3467 intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 3468 struct intel_initial_plane_config *plane_config) 3469 { 3470 struct drm_device *dev = crtc->base.dev; 3471 struct drm_i915_private *dev_priv = to_i915(dev); 3472 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 3473 struct drm_framebuffer *fb = &plane_config->fb->base; 3474 struct i915_vma *vma; 3475 3476 switch (fb->modifier) { 3477 case DRM_FORMAT_MOD_LINEAR: 3478 case I915_FORMAT_MOD_X_TILED: 3479 case I915_FORMAT_MOD_Y_TILED: 3480 break; 3481 default: 3482 drm_dbg(&dev_priv->drm, 3483 "Unsupported modifier for initial FB: 0x%llx\n", 3484 fb->modifier); 3485 return false; 3486 } 3487 3488 vma = initial_plane_vma(dev_priv, plane_config); 3489 if (!vma) 3490 return false; 3491 3492 mode_cmd.pixel_format = fb->format->format; 3493 mode_cmd.width = fb->width; 3494 mode_cmd.height = fb->height; 3495 mode_cmd.pitches[0] = fb->pitches[0]; 3496 mode_cmd.modifier[0] = fb->modifier; 3497 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 3498 3499 if (intel_framebuffer_init(to_intel_framebuffer(fb), 3500 vma->obj, &mode_cmd)) { 3501 drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n"); 3502 goto err_vma; 3503 } 3504 3505 plane_config->vma = vma; 3506 return true; 3507 3508 err_vma: 3509 i915_vma_put(vma); 3510 return false; 3511 } 3512 3513 static void 3514 intel_set_plane_visible(struct intel_crtc_state *crtc_state, 3515 struct intel_plane_state *plane_state, 3516 bool visible) 3517 { 3518 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 3519 3520 plane_state->uapi.visible = visible; 3521 3522 if (visible) 3523 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base); 3524 else 3525 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); 3526 } 3527 3528 static void fixup_active_planes(struct intel_crtc_state *crtc_state) 3529 { 3530 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 3531 struct drm_plane *plane; 3532 3533 /* 3534 * Active_planes aliases if multiple "primary" or cursor planes 3535 * have been used on the same (or wrong) pipe. plane_mask uses 3536 * unique ids, hence we can use that to reconstruct active_planes. 3537 */ 3538 crtc_state->active_planes = 0; 3539 3540 drm_for_each_plane_mask(plane, &dev_priv->drm, 3541 crtc_state->uapi.plane_mask) 3542 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 3543 } 3544 3545 static void intel_plane_disable_noatomic(struct intel_crtc *crtc, 3546 struct intel_plane *plane) 3547 { 3548 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3549 struct intel_crtc_state *crtc_state = 3550 to_intel_crtc_state(crtc->base.state); 3551 struct intel_plane_state *plane_state = 3552 to_intel_plane_state(plane->base.state); 3553 3554 drm_dbg_kms(&dev_priv->drm, 3555 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 3556 plane->base.base.id, plane->base.name, 3557 crtc->base.base.id, crtc->base.name); 3558 3559 intel_set_plane_visible(crtc_state, plane_state, false); 3560 fixup_active_planes(crtc_state); 3561 crtc_state->data_rate[plane->id] = 0; 3562 crtc_state->min_cdclk[plane->id] = 0; 3563 3564 if (plane->id == PLANE_PRIMARY) 3565 hsw_disable_ips(crtc_state); 3566 3567 /* 3568 * Vblank time updates from the shadow to live plane control register 3569 * are blocked if the memory self-refresh mode is active at that 3570 * moment. So to make sure the plane gets truly disabled, disable 3571 * first the self-refresh mode. The self-refresh enable bit in turn 3572 * will be checked/applied by the HW only at the next frame start 3573 * event which is after the vblank start event, so we need to have a 3574 * wait-for-vblank between disabling the plane and the pipe. 3575 */ 3576 if (HAS_GMCH(dev_priv) && 3577 intel_set_memory_cxsr(dev_priv, false)) 3578 intel_wait_for_vblank(dev_priv, crtc->pipe); 3579 3580 /* 3581 * Gen2 reports pipe underruns whenever all planes are disabled. 3582 * So disable underrun reporting before all the planes get disabled. 3583 */ 3584 if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes) 3585 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 3586 3587 intel_disable_plane(plane, crtc_state); 3588 } 3589 3590 static struct intel_frontbuffer * 3591 to_intel_frontbuffer(struct drm_framebuffer *fb) 3592 { 3593 return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL; 3594 } 3595 3596 static void 3597 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 3598 struct intel_initial_plane_config *plane_config) 3599 { 3600 struct drm_device *dev = intel_crtc->base.dev; 3601 struct drm_i915_private *dev_priv = to_i915(dev); 3602 struct drm_crtc *c; 3603 struct drm_plane *primary = intel_crtc->base.primary; 3604 struct drm_plane_state *plane_state = primary->state; 3605 struct intel_plane *intel_plane = to_intel_plane(primary); 3606 struct intel_plane_state *intel_state = 3607 to_intel_plane_state(plane_state); 3608 struct drm_framebuffer *fb; 3609 struct i915_vma *vma; 3610 3611 if (!plane_config->fb) 3612 return; 3613 3614 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 3615 fb = &plane_config->fb->base; 3616 vma = plane_config->vma; 3617 goto valid_fb; 3618 } 3619 3620 /* 3621 * Failed to alloc the obj, check to see if we should share 3622 * an fb with another CRTC instead 3623 */ 3624 for_each_crtc(dev, c) { 3625 struct intel_plane_state *state; 3626 3627 if (c == &intel_crtc->base) 3628 continue; 3629 3630 if (!to_intel_crtc(c)->active) 3631 continue; 3632 3633 state = to_intel_plane_state(c->primary->state); 3634 if (!state->vma) 3635 continue; 3636 3637 if (intel_plane_ggtt_offset(state) == plane_config->base) { 3638 fb = state->hw.fb; 3639 vma = state->vma; 3640 goto valid_fb; 3641 } 3642 } 3643 3644 /* 3645 * We've failed to reconstruct the BIOS FB. Current display state 3646 * indicates that the primary plane is visible, but has a NULL FB, 3647 * which will lead to problems later if we don't fix it up. The 3648 * simplest solution is to just disable the primary plane now and 3649 * pretend the BIOS never had it enabled. 3650 */ 3651 intel_plane_disable_noatomic(intel_crtc, intel_plane); 3652 3653 return; 3654 3655 valid_fb: 3656 intel_state->hw.rotation = plane_config->rotation; 3657 intel_fill_fb_ggtt_view(&intel_state->view, fb, 3658 intel_state->hw.rotation); 3659 intel_state->color_plane[0].stride = 3660 intel_fb_pitch(fb, 0, intel_state->hw.rotation); 3661 3662 __i915_vma_pin(vma); 3663 intel_state->vma = i915_vma_get(vma); 3664 if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0) 3665 if (vma->fence) 3666 intel_state->flags |= PLANE_HAS_FENCE; 3667 3668 plane_state->src_x = 0; 3669 plane_state->src_y = 0; 3670 plane_state->src_w = fb->width << 16; 3671 plane_state->src_h = fb->height << 16; 3672 3673 plane_state->crtc_x = 0; 3674 plane_state->crtc_y = 0; 3675 plane_state->crtc_w = fb->width; 3676 plane_state->crtc_h = fb->height; 3677 3678 intel_state->uapi.src = drm_plane_state_src(plane_state); 3679 intel_state->uapi.dst = drm_plane_state_dest(plane_state); 3680 3681 if (plane_config->tiling) 3682 dev_priv->preserve_bios_swizzle = true; 3683 3684 plane_state->fb = fb; 3685 drm_framebuffer_get(fb); 3686 3687 plane_state->crtc = &intel_crtc->base; 3688 intel_plane_copy_uapi_to_hw_state(intel_state, intel_state); 3689 3690 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); 3691 3692 atomic_or(to_intel_plane(primary)->frontbuffer_bit, 3693 &to_intel_frontbuffer(fb)->bits); 3694 } 3695 3696 static int skl_max_plane_width(const struct drm_framebuffer *fb, 3697 int color_plane, 3698 unsigned int rotation) 3699 { 3700 int cpp = fb->format->cpp[color_plane]; 3701 3702 switch (fb->modifier) { 3703 case DRM_FORMAT_MOD_LINEAR: 3704 case I915_FORMAT_MOD_X_TILED: 3705 /* 3706 * Validated limit is 4k, but has 5k should 3707 * work apart from the following features: 3708 * - Ytile (already limited to 4k) 3709 * - FP16 (already limited to 4k) 3710 * - render compression (already limited to 4k) 3711 * - KVMR sprite and cursor (don't care) 3712 * - horizontal panning (TODO verify this) 3713 * - pipe and plane scaling (TODO verify this) 3714 */ 3715 if (cpp == 8) 3716 return 4096; 3717 else 3718 return 5120; 3719 case I915_FORMAT_MOD_Y_TILED_CCS: 3720 case I915_FORMAT_MOD_Yf_TILED_CCS: 3721 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 3722 /* FIXME AUX plane? */ 3723 case I915_FORMAT_MOD_Y_TILED: 3724 case I915_FORMAT_MOD_Yf_TILED: 3725 if (cpp == 8) 3726 return 2048; 3727 else 3728 return 4096; 3729 default: 3730 MISSING_CASE(fb->modifier); 3731 return 2048; 3732 } 3733 } 3734 3735 static int glk_max_plane_width(const struct drm_framebuffer *fb, 3736 int color_plane, 3737 unsigned int rotation) 3738 { 3739 int cpp = fb->format->cpp[color_plane]; 3740 3741 switch (fb->modifier) { 3742 case DRM_FORMAT_MOD_LINEAR: 3743 case I915_FORMAT_MOD_X_TILED: 3744 if (cpp == 8) 3745 return 4096; 3746 else 3747 return 5120; 3748 case I915_FORMAT_MOD_Y_TILED_CCS: 3749 case I915_FORMAT_MOD_Yf_TILED_CCS: 3750 /* FIXME AUX plane? */ 3751 case I915_FORMAT_MOD_Y_TILED: 3752 case I915_FORMAT_MOD_Yf_TILED: 3753 if (cpp == 8) 3754 return 2048; 3755 else 3756 return 5120; 3757 default: 3758 MISSING_CASE(fb->modifier); 3759 return 2048; 3760 } 3761 } 3762 3763 static int icl_max_plane_width(const struct drm_framebuffer *fb, 3764 int color_plane, 3765 unsigned int rotation) 3766 { 3767 return 5120; 3768 } 3769 3770 static int skl_max_plane_height(void) 3771 { 3772 return 4096; 3773 } 3774 3775 static int icl_max_plane_height(void) 3776 { 3777 return 4320; 3778 } 3779 3780 static bool 3781 skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, 3782 int main_x, int main_y, u32 main_offset, 3783 int ccs_plane) 3784 { 3785 const struct drm_framebuffer *fb = plane_state->hw.fb; 3786 int aux_x = plane_state->color_plane[ccs_plane].x; 3787 int aux_y = plane_state->color_plane[ccs_plane].y; 3788 u32 aux_offset = plane_state->color_plane[ccs_plane].offset; 3789 u32 alignment = intel_surf_alignment(fb, ccs_plane); 3790 int hsub; 3791 int vsub; 3792 3793 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); 3794 while (aux_offset >= main_offset && aux_y <= main_y) { 3795 int x, y; 3796 3797 if (aux_x == main_x && aux_y == main_y) 3798 break; 3799 3800 if (aux_offset == 0) 3801 break; 3802 3803 x = aux_x / hsub; 3804 y = aux_y / vsub; 3805 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, 3806 plane_state, 3807 ccs_plane, 3808 aux_offset, 3809 aux_offset - 3810 alignment); 3811 aux_x = x * hsub + aux_x % hsub; 3812 aux_y = y * vsub + aux_y % vsub; 3813 } 3814 3815 if (aux_x != main_x || aux_y != main_y) 3816 return false; 3817 3818 plane_state->color_plane[ccs_plane].offset = aux_offset; 3819 plane_state->color_plane[ccs_plane].x = aux_x; 3820 plane_state->color_plane[ccs_plane].y = aux_y; 3821 3822 return true; 3823 } 3824 3825 unsigned int 3826 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) 3827 { 3828 int x = 0, y = 0; 3829 3830 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3831 plane_state->color_plane[0].offset, 0); 3832 3833 return y; 3834 } 3835 3836 static int skl_check_main_surface(struct intel_plane_state *plane_state) 3837 { 3838 struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); 3839 const struct drm_framebuffer *fb = plane_state->hw.fb; 3840 unsigned int rotation = plane_state->hw.rotation; 3841 int x = plane_state->uapi.src.x1 >> 16; 3842 int y = plane_state->uapi.src.y1 >> 16; 3843 int w = drm_rect_width(&plane_state->uapi.src) >> 16; 3844 int h = drm_rect_height(&plane_state->uapi.src) >> 16; 3845 int max_width; 3846 int max_height; 3847 u32 alignment; 3848 u32 offset; 3849 int aux_plane = intel_main_to_aux_plane(fb, 0); 3850 u32 aux_offset = plane_state->color_plane[aux_plane].offset; 3851 3852 if (INTEL_GEN(dev_priv) >= 11) 3853 max_width = icl_max_plane_width(fb, 0, rotation); 3854 else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 3855 max_width = glk_max_plane_width(fb, 0, rotation); 3856 else 3857 max_width = skl_max_plane_width(fb, 0, rotation); 3858 3859 if (INTEL_GEN(dev_priv) >= 11) 3860 max_height = icl_max_plane_height(); 3861 else 3862 max_height = skl_max_plane_height(); 3863 3864 if (w > max_width || h > max_height) { 3865 drm_dbg_kms(&dev_priv->drm, 3866 "requested Y/RGB source size %dx%d too big (limit %dx%d)\n", 3867 w, h, max_width, max_height); 3868 return -EINVAL; 3869 } 3870 3871 intel_add_fb_offsets(&x, &y, plane_state, 0); 3872 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0); 3873 alignment = intel_surf_alignment(fb, 0); 3874 if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment))) 3875 return -EINVAL; 3876 3877 /* 3878 * AUX surface offset is specified as the distance from the 3879 * main surface offset, and it must be non-negative. Make 3880 * sure that is what we will get. 3881 */ 3882 if (offset > aux_offset) 3883 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3884 offset, aux_offset & ~(alignment - 1)); 3885 3886 /* 3887 * When using an X-tiled surface, the plane blows up 3888 * if the x offset + width exceed the stride. 3889 * 3890 * TODO: linear and Y-tiled seem fine, Yf untested, 3891 */ 3892 if (fb->modifier == I915_FORMAT_MOD_X_TILED) { 3893 int cpp = fb->format->cpp[0]; 3894 3895 while ((x + w) * cpp > plane_state->color_plane[0].stride) { 3896 if (offset == 0) { 3897 drm_dbg_kms(&dev_priv->drm, 3898 "Unable to find suitable display surface offset due to X-tiling\n"); 3899 return -EINVAL; 3900 } 3901 3902 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3903 offset, offset - alignment); 3904 } 3905 } 3906 3907 /* 3908 * CCS AUX surface doesn't have its own x/y offsets, we must make sure 3909 * they match with the main surface x/y offsets. 3910 */ 3911 if (is_ccs_modifier(fb->modifier)) { 3912 while (!skl_check_main_ccs_coordinates(plane_state, x, y, 3913 offset, aux_plane)) { 3914 if (offset == 0) 3915 break; 3916 3917 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 3918 offset, offset - alignment); 3919 } 3920 3921 if (x != plane_state->color_plane[aux_plane].x || 3922 y != plane_state->color_plane[aux_plane].y) { 3923 drm_dbg_kms(&dev_priv->drm, 3924 "Unable to find suitable display surface offset due to CCS\n"); 3925 return -EINVAL; 3926 } 3927 } 3928 3929 plane_state->color_plane[0].offset = offset; 3930 plane_state->color_plane[0].x = x; 3931 plane_state->color_plane[0].y = y; 3932 3933 /* 3934 * Put the final coordinates back so that the src 3935 * coordinate checks will see the right values. 3936 */ 3937 drm_rect_translate_to(&plane_state->uapi.src, 3938 x << 16, y << 16); 3939 3940 return 0; 3941 } 3942 3943 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) 3944 { 3945 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 3946 const struct drm_framebuffer *fb = plane_state->hw.fb; 3947 unsigned int rotation = plane_state->hw.rotation; 3948 int uv_plane = 1; 3949 int max_width = skl_max_plane_width(fb, uv_plane, rotation); 3950 int max_height = 4096; 3951 int x = plane_state->uapi.src.x1 >> 17; 3952 int y = plane_state->uapi.src.y1 >> 17; 3953 int w = drm_rect_width(&plane_state->uapi.src) >> 17; 3954 int h = drm_rect_height(&plane_state->uapi.src) >> 17; 3955 u32 offset; 3956 3957 intel_add_fb_offsets(&x, &y, plane_state, uv_plane); 3958 offset = intel_plane_compute_aligned_offset(&x, &y, 3959 plane_state, uv_plane); 3960 3961 /* FIXME not quite sure how/if these apply to the chroma plane */ 3962 if (w > max_width || h > max_height) { 3963 drm_dbg_kms(&i915->drm, 3964 "CbCr source size %dx%d too big (limit %dx%d)\n", 3965 w, h, max_width, max_height); 3966 return -EINVAL; 3967 } 3968 3969 if (is_ccs_modifier(fb->modifier)) { 3970 int ccs_plane = main_to_ccs_plane(fb, uv_plane); 3971 int aux_offset = plane_state->color_plane[ccs_plane].offset; 3972 int alignment = intel_surf_alignment(fb, uv_plane); 3973 3974 if (offset > aux_offset) 3975 offset = intel_plane_adjust_aligned_offset(&x, &y, 3976 plane_state, 3977 uv_plane, 3978 offset, 3979 aux_offset & ~(alignment - 1)); 3980 3981 while (!skl_check_main_ccs_coordinates(plane_state, x, y, 3982 offset, ccs_plane)) { 3983 if (offset == 0) 3984 break; 3985 3986 offset = intel_plane_adjust_aligned_offset(&x, &y, 3987 plane_state, 3988 uv_plane, 3989 offset, offset - alignment); 3990 } 3991 3992 if (x != plane_state->color_plane[ccs_plane].x || 3993 y != plane_state->color_plane[ccs_plane].y) { 3994 drm_dbg_kms(&i915->drm, 3995 "Unable to find suitable display surface offset due to CCS\n"); 3996 return -EINVAL; 3997 } 3998 } 3999 4000 plane_state->color_plane[uv_plane].offset = offset; 4001 plane_state->color_plane[uv_plane].x = x; 4002 plane_state->color_plane[uv_plane].y = y; 4003 4004 return 0; 4005 } 4006 4007 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) 4008 { 4009 const struct drm_framebuffer *fb = plane_state->hw.fb; 4010 int src_x = plane_state->uapi.src.x1 >> 16; 4011 int src_y = plane_state->uapi.src.y1 >> 16; 4012 u32 offset; 4013 int ccs_plane; 4014 4015 for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) { 4016 int main_hsub, main_vsub; 4017 int hsub, vsub; 4018 int x, y; 4019 4020 if (!is_ccs_plane(fb, ccs_plane)) 4021 continue; 4022 4023 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, 4024 ccs_to_main_plane(fb, ccs_plane)); 4025 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); 4026 4027 hsub *= main_hsub; 4028 vsub *= main_vsub; 4029 x = src_x / hsub; 4030 y = src_y / vsub; 4031 4032 intel_add_fb_offsets(&x, &y, plane_state, ccs_plane); 4033 4034 offset = intel_plane_compute_aligned_offset(&x, &y, 4035 plane_state, 4036 ccs_plane); 4037 4038 plane_state->color_plane[ccs_plane].offset = offset; 4039 plane_state->color_plane[ccs_plane].x = (x * hsub + 4040 src_x % hsub) / 4041 main_hsub; 4042 plane_state->color_plane[ccs_plane].y = (y * vsub + 4043 src_y % vsub) / 4044 main_vsub; 4045 } 4046 4047 return 0; 4048 } 4049 4050 int skl_check_plane_surface(struct intel_plane_state *plane_state) 4051 { 4052 const struct drm_framebuffer *fb = plane_state->hw.fb; 4053 int ret; 4054 bool needs_aux = false; 4055 4056 ret = intel_plane_compute_gtt(plane_state); 4057 if (ret) 4058 return ret; 4059 4060 if (!plane_state->uapi.visible) 4061 return 0; 4062 4063 /* 4064 * Handle the AUX surface first since the main surface setup depends on 4065 * it. 4066 */ 4067 if (is_ccs_modifier(fb->modifier)) { 4068 needs_aux = true; 4069 ret = skl_check_ccs_aux_surface(plane_state); 4070 if (ret) 4071 return ret; 4072 } 4073 4074 if (intel_format_info_is_yuv_semiplanar(fb->format, 4075 fb->modifier)) { 4076 needs_aux = true; 4077 ret = skl_check_nv12_aux_surface(plane_state); 4078 if (ret) 4079 return ret; 4080 } 4081 4082 if (!needs_aux) { 4083 int i; 4084 4085 for (i = 1; i < fb->format->num_planes; i++) { 4086 plane_state->color_plane[i].offset = ~0xfff; 4087 plane_state->color_plane[i].x = 0; 4088 plane_state->color_plane[i].y = 0; 4089 } 4090 } 4091 4092 ret = skl_check_main_surface(plane_state); 4093 if (ret) 4094 return ret; 4095 4096 return 0; 4097 } 4098 4099 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state, 4100 const struct intel_plane_state *plane_state, 4101 unsigned int *num, unsigned int *den) 4102 { 4103 const struct drm_framebuffer *fb = plane_state->hw.fb; 4104 unsigned int cpp = fb->format->cpp[0]; 4105 4106 /* 4107 * g4x bspec says 64bpp pixel rate can't exceed 80% 4108 * of cdclk when the sprite plane is enabled on the 4109 * same pipe. ilk/snb bspec says 64bpp pixel rate is 4110 * never allowed to exceed 80% of cdclk. Let's just go 4111 * with the ilk/snb limit always. 4112 */ 4113 if (cpp == 8) { 4114 *num = 10; 4115 *den = 8; 4116 } else { 4117 *num = 1; 4118 *den = 1; 4119 } 4120 } 4121 4122 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state, 4123 const struct intel_plane_state *plane_state) 4124 { 4125 unsigned int pixel_rate; 4126 unsigned int num, den; 4127 4128 /* 4129 * Note that crtc_state->pixel_rate accounts for both 4130 * horizontal and vertical panel fitter downscaling factors. 4131 * Pre-HSW bspec tells us to only consider the horizontal 4132 * downscaling factor here. We ignore that and just consider 4133 * both for simplicity. 4134 */ 4135 pixel_rate = crtc_state->pixel_rate; 4136 4137 i9xx_plane_ratio(crtc_state, plane_state, &num, &den); 4138 4139 /* two pixels per clock with double wide pipe */ 4140 if (crtc_state->double_wide) 4141 den *= 2; 4142 4143 return DIV_ROUND_UP(pixel_rate * num, den); 4144 } 4145 4146 unsigned int 4147 i9xx_plane_max_stride(struct intel_plane *plane, 4148 u32 pixel_format, u64 modifier, 4149 unsigned int rotation) 4150 { 4151 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4152 4153 if (!HAS_GMCH(dev_priv)) { 4154 return 32*1024; 4155 } else if (INTEL_GEN(dev_priv) >= 4) { 4156 if (modifier == I915_FORMAT_MOD_X_TILED) 4157 return 16*1024; 4158 else 4159 return 32*1024; 4160 } else if (INTEL_GEN(dev_priv) >= 3) { 4161 if (modifier == I915_FORMAT_MOD_X_TILED) 4162 return 8*1024; 4163 else 4164 return 16*1024; 4165 } else { 4166 if (plane->i9xx_plane == PLANE_C) 4167 return 4*1024; 4168 else 4169 return 8*1024; 4170 } 4171 } 4172 4173 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 4174 { 4175 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4176 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4177 u32 dspcntr = 0; 4178 4179 if (crtc_state->gamma_enable) 4180 dspcntr |= DISPPLANE_GAMMA_ENABLE; 4181 4182 if (crtc_state->csc_enable) 4183 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 4184 4185 if (INTEL_GEN(dev_priv) < 5) 4186 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); 4187 4188 return dspcntr; 4189 } 4190 4191 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, 4192 const struct intel_plane_state *plane_state) 4193 { 4194 struct drm_i915_private *dev_priv = 4195 to_i915(plane_state->uapi.plane->dev); 4196 const struct drm_framebuffer *fb = plane_state->hw.fb; 4197 unsigned int rotation = plane_state->hw.rotation; 4198 u32 dspcntr; 4199 4200 dspcntr = DISPLAY_PLANE_ENABLE; 4201 4202 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) || 4203 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 4204 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 4205 4206 switch (fb->format->format) { 4207 case DRM_FORMAT_C8: 4208 dspcntr |= DISPPLANE_8BPP; 4209 break; 4210 case DRM_FORMAT_XRGB1555: 4211 dspcntr |= DISPPLANE_BGRX555; 4212 break; 4213 case DRM_FORMAT_ARGB1555: 4214 dspcntr |= DISPPLANE_BGRA555; 4215 break; 4216 case DRM_FORMAT_RGB565: 4217 dspcntr |= DISPPLANE_BGRX565; 4218 break; 4219 case DRM_FORMAT_XRGB8888: 4220 dspcntr |= DISPPLANE_BGRX888; 4221 break; 4222 case DRM_FORMAT_XBGR8888: 4223 dspcntr |= DISPPLANE_RGBX888; 4224 break; 4225 case DRM_FORMAT_ARGB8888: 4226 dspcntr |= DISPPLANE_BGRA888; 4227 break; 4228 case DRM_FORMAT_ABGR8888: 4229 dspcntr |= DISPPLANE_RGBA888; 4230 break; 4231 case DRM_FORMAT_XRGB2101010: 4232 dspcntr |= DISPPLANE_BGRX101010; 4233 break; 4234 case DRM_FORMAT_XBGR2101010: 4235 dspcntr |= DISPPLANE_RGBX101010; 4236 break; 4237 case DRM_FORMAT_ARGB2101010: 4238 dspcntr |= DISPPLANE_BGRA101010; 4239 break; 4240 case DRM_FORMAT_ABGR2101010: 4241 dspcntr |= DISPPLANE_RGBA101010; 4242 break; 4243 case DRM_FORMAT_XBGR16161616F: 4244 dspcntr |= DISPPLANE_RGBX161616; 4245 break; 4246 default: 4247 MISSING_CASE(fb->format->format); 4248 return 0; 4249 } 4250 4251 if (INTEL_GEN(dev_priv) >= 4 && 4252 fb->modifier == I915_FORMAT_MOD_X_TILED) 4253 dspcntr |= DISPPLANE_TILED; 4254 4255 if (rotation & DRM_MODE_ROTATE_180) 4256 dspcntr |= DISPPLANE_ROTATE_180; 4257 4258 if (rotation & DRM_MODE_REFLECT_X) 4259 dspcntr |= DISPPLANE_MIRROR; 4260 4261 return dspcntr; 4262 } 4263 4264 int i9xx_check_plane_surface(struct intel_plane_state *plane_state) 4265 { 4266 struct drm_i915_private *dev_priv = 4267 to_i915(plane_state->uapi.plane->dev); 4268 const struct drm_framebuffer *fb = plane_state->hw.fb; 4269 int src_x, src_y, src_w; 4270 u32 offset; 4271 int ret; 4272 4273 ret = intel_plane_compute_gtt(plane_state); 4274 if (ret) 4275 return ret; 4276 4277 if (!plane_state->uapi.visible) 4278 return 0; 4279 4280 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 4281 src_x = plane_state->uapi.src.x1 >> 16; 4282 src_y = plane_state->uapi.src.y1 >> 16; 4283 4284 /* Undocumented hardware limit on i965/g4x/vlv/chv */ 4285 if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048) 4286 return -EINVAL; 4287 4288 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 4289 4290 if (INTEL_GEN(dev_priv) >= 4) 4291 offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 4292 plane_state, 0); 4293 else 4294 offset = 0; 4295 4296 /* 4297 * Put the final coordinates back so that the src 4298 * coordinate checks will see the right values. 4299 */ 4300 drm_rect_translate_to(&plane_state->uapi.src, 4301 src_x << 16, src_y << 16); 4302 4303 /* HSW/BDW do this automagically in hardware */ 4304 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { 4305 unsigned int rotation = plane_state->hw.rotation; 4306 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 4307 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 4308 4309 if (rotation & DRM_MODE_ROTATE_180) { 4310 src_x += src_w - 1; 4311 src_y += src_h - 1; 4312 } else if (rotation & DRM_MODE_REFLECT_X) { 4313 src_x += src_w - 1; 4314 } 4315 } 4316 4317 plane_state->color_plane[0].offset = offset; 4318 plane_state->color_plane[0].x = src_x; 4319 plane_state->color_plane[0].y = src_y; 4320 4321 return 0; 4322 } 4323 4324 static bool i9xx_plane_has_windowing(struct intel_plane *plane) 4325 { 4326 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4327 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4328 4329 if (IS_CHERRYVIEW(dev_priv)) 4330 return i9xx_plane == PLANE_B; 4331 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 4332 return false; 4333 else if (IS_GEN(dev_priv, 4)) 4334 return i9xx_plane == PLANE_C; 4335 else 4336 return i9xx_plane == PLANE_B || 4337 i9xx_plane == PLANE_C; 4338 } 4339 4340 static int 4341 i9xx_plane_check(struct intel_crtc_state *crtc_state, 4342 struct intel_plane_state *plane_state) 4343 { 4344 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 4345 int ret; 4346 4347 ret = chv_plane_check_rotation(plane_state); 4348 if (ret) 4349 return ret; 4350 4351 ret = drm_atomic_helper_check_plane_state(&plane_state->uapi, 4352 &crtc_state->uapi, 4353 DRM_PLANE_HELPER_NO_SCALING, 4354 DRM_PLANE_HELPER_NO_SCALING, 4355 i9xx_plane_has_windowing(plane), 4356 true); 4357 if (ret) 4358 return ret; 4359 4360 ret = i9xx_check_plane_surface(plane_state); 4361 if (ret) 4362 return ret; 4363 4364 if (!plane_state->uapi.visible) 4365 return 0; 4366 4367 ret = intel_plane_check_src_coordinates(plane_state); 4368 if (ret) 4369 return ret; 4370 4371 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state); 4372 4373 return 0; 4374 } 4375 4376 static void i9xx_update_plane(struct intel_plane *plane, 4377 const struct intel_crtc_state *crtc_state, 4378 const struct intel_plane_state *plane_state) 4379 { 4380 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4381 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4382 u32 linear_offset; 4383 int x = plane_state->color_plane[0].x; 4384 int y = plane_state->color_plane[0].y; 4385 int crtc_x = plane_state->uapi.dst.x1; 4386 int crtc_y = plane_state->uapi.dst.y1; 4387 int crtc_w = drm_rect_width(&plane_state->uapi.dst); 4388 int crtc_h = drm_rect_height(&plane_state->uapi.dst); 4389 unsigned long irqflags; 4390 u32 dspaddr_offset; 4391 u32 dspcntr; 4392 4393 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); 4394 4395 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 4396 4397 if (INTEL_GEN(dev_priv) >= 4) 4398 dspaddr_offset = plane_state->color_plane[0].offset; 4399 else 4400 dspaddr_offset = linear_offset; 4401 4402 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 4403 4404 intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane), 4405 plane_state->color_plane[0].stride); 4406 4407 if (INTEL_GEN(dev_priv) < 4) { 4408 /* 4409 * PLANE_A doesn't actually have a full window 4410 * generator but let's assume we still need to 4411 * program whatever is there. 4412 */ 4413 intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane), 4414 (crtc_y << 16) | crtc_x); 4415 intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane), 4416 ((crtc_h - 1) << 16) | (crtc_w - 1)); 4417 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { 4418 intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane), 4419 (crtc_y << 16) | crtc_x); 4420 intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane), 4421 ((crtc_h - 1) << 16) | (crtc_w - 1)); 4422 intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0); 4423 } 4424 4425 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 4426 intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane), 4427 (y << 16) | x); 4428 } else if (INTEL_GEN(dev_priv) >= 4) { 4429 intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane), 4430 linear_offset); 4431 intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane), 4432 (y << 16) | x); 4433 } 4434 4435 /* 4436 * The control register self-arms if the plane was previously 4437 * disabled. Try to make the plane enable atomic by writing 4438 * the control register just before the surface register. 4439 */ 4440 intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); 4441 if (INTEL_GEN(dev_priv) >= 4) 4442 intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 4443 intel_plane_ggtt_offset(plane_state) + dspaddr_offset); 4444 else 4445 intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 4446 intel_plane_ggtt_offset(plane_state) + dspaddr_offset); 4447 4448 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 4449 } 4450 4451 static void i9xx_disable_plane(struct intel_plane *plane, 4452 const struct intel_crtc_state *crtc_state) 4453 { 4454 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4455 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4456 unsigned long irqflags; 4457 u32 dspcntr; 4458 4459 /* 4460 * DSPCNTR pipe gamma enable on g4x+ and pipe csc 4461 * enable on ilk+ affect the pipe bottom color as 4462 * well, so we must configure them even if the plane 4463 * is disabled. 4464 * 4465 * On pre-g4x there is no way to gamma correct the 4466 * pipe bottom color but we'll keep on doing this 4467 * anyway so that the crtc state readout works correctly. 4468 */ 4469 dspcntr = i9xx_plane_ctl_crtc(crtc_state); 4470 4471 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 4472 4473 intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); 4474 if (INTEL_GEN(dev_priv) >= 4) 4475 intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0); 4476 else 4477 intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0); 4478 4479 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 4480 } 4481 4482 static bool i9xx_plane_get_hw_state(struct intel_plane *plane, 4483 enum pipe *pipe) 4484 { 4485 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4486 enum intel_display_power_domain power_domain; 4487 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4488 intel_wakeref_t wakeref; 4489 bool ret; 4490 u32 val; 4491 4492 /* 4493 * Not 100% correct for planes that can move between pipes, 4494 * but that's only the case for gen2-4 which don't have any 4495 * display power wells. 4496 */ 4497 power_domain = POWER_DOMAIN_PIPE(plane->pipe); 4498 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 4499 if (!wakeref) 4500 return false; 4501 4502 val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); 4503 4504 ret = val & DISPLAY_PLANE_ENABLE; 4505 4506 if (INTEL_GEN(dev_priv) >= 5) 4507 *pipe = plane->pipe; 4508 else 4509 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 4510 DISPPLANE_SEL_PIPE_SHIFT; 4511 4512 intel_display_power_put(dev_priv, power_domain, wakeref); 4513 4514 return ret; 4515 } 4516 4517 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 4518 { 4519 struct drm_device *dev = intel_crtc->base.dev; 4520 struct drm_i915_private *dev_priv = to_i915(dev); 4521 unsigned long irqflags; 4522 4523 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 4524 4525 intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0); 4526 intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 4527 intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); 4528 4529 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 4530 } 4531 4532 /* 4533 * This function detaches (aka. unbinds) unused scalers in hardware 4534 */ 4535 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state) 4536 { 4537 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 4538 const struct intel_crtc_scaler_state *scaler_state = 4539 &crtc_state->scaler_state; 4540 int i; 4541 4542 /* loop through and disable scalers that aren't in use */ 4543 for (i = 0; i < intel_crtc->num_scalers; i++) { 4544 if (!scaler_state->scalers[i].in_use) 4545 skl_detach_scaler(intel_crtc, i); 4546 } 4547 } 4548 4549 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb, 4550 int color_plane, unsigned int rotation) 4551 { 4552 /* 4553 * The stride is either expressed as a multiple of 64 bytes chunks for 4554 * linear buffers or in number of tiles for tiled buffers. 4555 */ 4556 if (is_surface_linear(fb, color_plane)) 4557 return 64; 4558 else if (drm_rotation_90_or_270(rotation)) 4559 return intel_tile_height(fb, color_plane); 4560 else 4561 return intel_tile_width_bytes(fb, color_plane); 4562 } 4563 4564 u32 skl_plane_stride(const struct intel_plane_state *plane_state, 4565 int color_plane) 4566 { 4567 const struct drm_framebuffer *fb = plane_state->hw.fb; 4568 unsigned int rotation = plane_state->hw.rotation; 4569 u32 stride = plane_state->color_plane[color_plane].stride; 4570 4571 if (color_plane >= fb->format->num_planes) 4572 return 0; 4573 4574 return stride / skl_plane_stride_mult(fb, color_plane, rotation); 4575 } 4576 4577 static u32 skl_plane_ctl_format(u32 pixel_format) 4578 { 4579 switch (pixel_format) { 4580 case DRM_FORMAT_C8: 4581 return PLANE_CTL_FORMAT_INDEXED; 4582 case DRM_FORMAT_RGB565: 4583 return PLANE_CTL_FORMAT_RGB_565; 4584 case DRM_FORMAT_XBGR8888: 4585 case DRM_FORMAT_ABGR8888: 4586 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; 4587 case DRM_FORMAT_XRGB8888: 4588 case DRM_FORMAT_ARGB8888: 4589 return PLANE_CTL_FORMAT_XRGB_8888; 4590 case DRM_FORMAT_XBGR2101010: 4591 case DRM_FORMAT_ABGR2101010: 4592 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX; 4593 case DRM_FORMAT_XRGB2101010: 4594 case DRM_FORMAT_ARGB2101010: 4595 return PLANE_CTL_FORMAT_XRGB_2101010; 4596 case DRM_FORMAT_XBGR16161616F: 4597 case DRM_FORMAT_ABGR16161616F: 4598 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX; 4599 case DRM_FORMAT_XRGB16161616F: 4600 case DRM_FORMAT_ARGB16161616F: 4601 return PLANE_CTL_FORMAT_XRGB_16161616F; 4602 case DRM_FORMAT_XYUV8888: 4603 return PLANE_CTL_FORMAT_XYUV; 4604 case DRM_FORMAT_YUYV: 4605 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 4606 case DRM_FORMAT_YVYU: 4607 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; 4608 case DRM_FORMAT_UYVY: 4609 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; 4610 case DRM_FORMAT_VYUY: 4611 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 4612 case DRM_FORMAT_NV12: 4613 return PLANE_CTL_FORMAT_NV12; 4614 case DRM_FORMAT_P010: 4615 return PLANE_CTL_FORMAT_P010; 4616 case DRM_FORMAT_P012: 4617 return PLANE_CTL_FORMAT_P012; 4618 case DRM_FORMAT_P016: 4619 return PLANE_CTL_FORMAT_P016; 4620 case DRM_FORMAT_Y210: 4621 return PLANE_CTL_FORMAT_Y210; 4622 case DRM_FORMAT_Y212: 4623 return PLANE_CTL_FORMAT_Y212; 4624 case DRM_FORMAT_Y216: 4625 return PLANE_CTL_FORMAT_Y216; 4626 case DRM_FORMAT_XVYU2101010: 4627 return PLANE_CTL_FORMAT_Y410; 4628 case DRM_FORMAT_XVYU12_16161616: 4629 return PLANE_CTL_FORMAT_Y412; 4630 case DRM_FORMAT_XVYU16161616: 4631 return PLANE_CTL_FORMAT_Y416; 4632 default: 4633 MISSING_CASE(pixel_format); 4634 } 4635 4636 return 0; 4637 } 4638 4639 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state) 4640 { 4641 if (!plane_state->hw.fb->format->has_alpha) 4642 return PLANE_CTL_ALPHA_DISABLE; 4643 4644 switch (plane_state->hw.pixel_blend_mode) { 4645 case DRM_MODE_BLEND_PIXEL_NONE: 4646 return PLANE_CTL_ALPHA_DISABLE; 4647 case DRM_MODE_BLEND_PREMULTI: 4648 return PLANE_CTL_ALPHA_SW_PREMULTIPLY; 4649 case DRM_MODE_BLEND_COVERAGE: 4650 return PLANE_CTL_ALPHA_HW_PREMULTIPLY; 4651 default: 4652 MISSING_CASE(plane_state->hw.pixel_blend_mode); 4653 return PLANE_CTL_ALPHA_DISABLE; 4654 } 4655 } 4656 4657 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state) 4658 { 4659 if (!plane_state->hw.fb->format->has_alpha) 4660 return PLANE_COLOR_ALPHA_DISABLE; 4661 4662 switch (plane_state->hw.pixel_blend_mode) { 4663 case DRM_MODE_BLEND_PIXEL_NONE: 4664 return PLANE_COLOR_ALPHA_DISABLE; 4665 case DRM_MODE_BLEND_PREMULTI: 4666 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY; 4667 case DRM_MODE_BLEND_COVERAGE: 4668 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY; 4669 default: 4670 MISSING_CASE(plane_state->hw.pixel_blend_mode); 4671 return PLANE_COLOR_ALPHA_DISABLE; 4672 } 4673 } 4674 4675 static u32 skl_plane_ctl_tiling(u64 fb_modifier) 4676 { 4677 switch (fb_modifier) { 4678 case DRM_FORMAT_MOD_LINEAR: 4679 break; 4680 case I915_FORMAT_MOD_X_TILED: 4681 return PLANE_CTL_TILED_X; 4682 case I915_FORMAT_MOD_Y_TILED: 4683 return PLANE_CTL_TILED_Y; 4684 case I915_FORMAT_MOD_Y_TILED_CCS: 4685 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; 4686 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 4687 return PLANE_CTL_TILED_Y | 4688 PLANE_CTL_RENDER_DECOMPRESSION_ENABLE | 4689 PLANE_CTL_CLEAR_COLOR_DISABLE; 4690 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 4691 return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE; 4692 case I915_FORMAT_MOD_Yf_TILED: 4693 return PLANE_CTL_TILED_YF; 4694 case I915_FORMAT_MOD_Yf_TILED_CCS: 4695 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; 4696 default: 4697 MISSING_CASE(fb_modifier); 4698 } 4699 4700 return 0; 4701 } 4702 4703 static u32 skl_plane_ctl_rotate(unsigned int rotate) 4704 { 4705 switch (rotate) { 4706 case DRM_MODE_ROTATE_0: 4707 break; 4708 /* 4709 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 4710 * while i915 HW rotation is clockwise, thats why this swapping. 4711 */ 4712 case DRM_MODE_ROTATE_90: 4713 return PLANE_CTL_ROTATE_270; 4714 case DRM_MODE_ROTATE_180: 4715 return PLANE_CTL_ROTATE_180; 4716 case DRM_MODE_ROTATE_270: 4717 return PLANE_CTL_ROTATE_90; 4718 default: 4719 MISSING_CASE(rotate); 4720 } 4721 4722 return 0; 4723 } 4724 4725 static u32 cnl_plane_ctl_flip(unsigned int reflect) 4726 { 4727 switch (reflect) { 4728 case 0: 4729 break; 4730 case DRM_MODE_REFLECT_X: 4731 return PLANE_CTL_FLIP_HORIZONTAL; 4732 case DRM_MODE_REFLECT_Y: 4733 default: 4734 MISSING_CASE(reflect); 4735 } 4736 4737 return 0; 4738 } 4739 4740 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 4741 { 4742 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 4743 u32 plane_ctl = 0; 4744 4745 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 4746 return plane_ctl; 4747 4748 if (crtc_state->gamma_enable) 4749 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE; 4750 4751 if (crtc_state->csc_enable) 4752 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE; 4753 4754 return plane_ctl; 4755 } 4756 4757 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, 4758 const struct intel_plane_state *plane_state) 4759 { 4760 struct drm_i915_private *dev_priv = 4761 to_i915(plane_state->uapi.plane->dev); 4762 const struct drm_framebuffer *fb = plane_state->hw.fb; 4763 unsigned int rotation = plane_state->hw.rotation; 4764 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 4765 u32 plane_ctl; 4766 4767 plane_ctl = PLANE_CTL_ENABLE; 4768 4769 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) { 4770 plane_ctl |= skl_plane_ctl_alpha(plane_state); 4771 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 4772 4773 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) 4774 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709; 4775 4776 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 4777 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE; 4778 } 4779 4780 plane_ctl |= skl_plane_ctl_format(fb->format->format); 4781 plane_ctl |= skl_plane_ctl_tiling(fb->modifier); 4782 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK); 4783 4784 if (INTEL_GEN(dev_priv) >= 10) 4785 plane_ctl |= cnl_plane_ctl_flip(rotation & 4786 DRM_MODE_REFLECT_MASK); 4787 4788 if (key->flags & I915_SET_COLORKEY_DESTINATION) 4789 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; 4790 else if (key->flags & I915_SET_COLORKEY_SOURCE) 4791 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; 4792 4793 return plane_ctl; 4794 } 4795 4796 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state) 4797 { 4798 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 4799 u32 plane_color_ctl = 0; 4800 4801 if (INTEL_GEN(dev_priv) >= 11) 4802 return plane_color_ctl; 4803 4804 if (crtc_state->gamma_enable) 4805 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE; 4806 4807 if (crtc_state->csc_enable) 4808 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; 4809 4810 return plane_color_ctl; 4811 } 4812 4813 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, 4814 const struct intel_plane_state *plane_state) 4815 { 4816 struct drm_i915_private *dev_priv = 4817 to_i915(plane_state->uapi.plane->dev); 4818 const struct drm_framebuffer *fb = plane_state->hw.fb; 4819 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 4820 u32 plane_color_ctl = 0; 4821 4822 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; 4823 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state); 4824 4825 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) { 4826 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) 4827 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; 4828 else 4829 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709; 4830 4831 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) 4832 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; 4833 } else if (fb->format->is_yuv) { 4834 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE; 4835 } 4836 4837 return plane_color_ctl; 4838 } 4839 4840 static int 4841 __intel_display_resume(struct drm_device *dev, 4842 struct drm_atomic_state *state, 4843 struct drm_modeset_acquire_ctx *ctx) 4844 { 4845 struct drm_crtc_state *crtc_state; 4846 struct drm_crtc *crtc; 4847 int i, ret; 4848 4849 intel_modeset_setup_hw_state(dev, ctx); 4850 intel_vga_redisable(to_i915(dev)); 4851 4852 if (!state) 4853 return 0; 4854 4855 /* 4856 * We've duplicated the state, pointers to the old state are invalid. 4857 * 4858 * Don't attempt to use the old state until we commit the duplicated state. 4859 */ 4860 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 4861 /* 4862 * Force recalculation even if we restore 4863 * current state. With fast modeset this may not result 4864 * in a modeset when the state is compatible. 4865 */ 4866 crtc_state->mode_changed = true; 4867 } 4868 4869 /* ignore any reset values/BIOS leftovers in the WM registers */ 4870 if (!HAS_GMCH(to_i915(dev))) 4871 to_intel_atomic_state(state)->skip_intermediate_wm = true; 4872 4873 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 4874 4875 drm_WARN_ON(dev, ret == -EDEADLK); 4876 return ret; 4877 } 4878 4879 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) 4880 { 4881 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display && 4882 intel_has_gpu_reset(&dev_priv->gt)); 4883 } 4884 4885 void intel_prepare_reset(struct drm_i915_private *dev_priv) 4886 { 4887 struct drm_device *dev = &dev_priv->drm; 4888 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 4889 struct drm_atomic_state *state; 4890 int ret; 4891 4892 /* reset doesn't touch the display */ 4893 if (!i915_modparams.force_reset_modeset_test && 4894 !gpu_reset_clobbers_display(dev_priv)) 4895 return; 4896 4897 /* We have a modeset vs reset deadlock, defensively unbreak it. */ 4898 set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 4899 smp_mb__after_atomic(); 4900 wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET); 4901 4902 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) { 4903 drm_dbg_kms(&dev_priv->drm, 4904 "Modeset potentially stuck, unbreaking through wedging\n"); 4905 intel_gt_set_wedged(&dev_priv->gt); 4906 } 4907 4908 /* 4909 * Need mode_config.mutex so that we don't 4910 * trample ongoing ->detect() and whatnot. 4911 */ 4912 mutex_lock(&dev->mode_config.mutex); 4913 drm_modeset_acquire_init(ctx, 0); 4914 while (1) { 4915 ret = drm_modeset_lock_all_ctx(dev, ctx); 4916 if (ret != -EDEADLK) 4917 break; 4918 4919 drm_modeset_backoff(ctx); 4920 } 4921 /* 4922 * Disabling the crtcs gracefully seems nicer. Also the 4923 * g33 docs say we should at least disable all the planes. 4924 */ 4925 state = drm_atomic_helper_duplicate_state(dev, ctx); 4926 if (IS_ERR(state)) { 4927 ret = PTR_ERR(state); 4928 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n", 4929 ret); 4930 return; 4931 } 4932 4933 ret = drm_atomic_helper_disable_all(dev, ctx); 4934 if (ret) { 4935 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n", 4936 ret); 4937 drm_atomic_state_put(state); 4938 return; 4939 } 4940 4941 dev_priv->modeset_restore_state = state; 4942 state->acquire_ctx = ctx; 4943 } 4944 4945 void intel_finish_reset(struct drm_i915_private *dev_priv) 4946 { 4947 struct drm_device *dev = &dev_priv->drm; 4948 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 4949 struct drm_atomic_state *state; 4950 int ret; 4951 4952 /* reset doesn't touch the display */ 4953 if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) 4954 return; 4955 4956 state = fetch_and_zero(&dev_priv->modeset_restore_state); 4957 if (!state) 4958 goto unlock; 4959 4960 /* reset doesn't touch the display */ 4961 if (!gpu_reset_clobbers_display(dev_priv)) { 4962 /* for testing only restore the display */ 4963 ret = __intel_display_resume(dev, state, ctx); 4964 if (ret) 4965 drm_err(&dev_priv->drm, 4966 "Restoring old state failed with %i\n", ret); 4967 } else { 4968 /* 4969 * The display has been reset as well, 4970 * so need a full re-initialization. 4971 */ 4972 intel_pps_unlock_regs_wa(dev_priv); 4973 intel_modeset_init_hw(dev_priv); 4974 intel_init_clock_gating(dev_priv); 4975 4976 spin_lock_irq(&dev_priv->irq_lock); 4977 if (dev_priv->display.hpd_irq_setup) 4978 dev_priv->display.hpd_irq_setup(dev_priv); 4979 spin_unlock_irq(&dev_priv->irq_lock); 4980 4981 ret = __intel_display_resume(dev, state, ctx); 4982 if (ret) 4983 drm_err(&dev_priv->drm, 4984 "Restoring old state failed with %i\n", ret); 4985 4986 intel_hpd_init(dev_priv); 4987 } 4988 4989 drm_atomic_state_put(state); 4990 unlock: 4991 drm_modeset_drop_locks(ctx); 4992 drm_modeset_acquire_fini(ctx); 4993 mutex_unlock(&dev->mode_config.mutex); 4994 4995 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 4996 } 4997 4998 static void icl_set_pipe_chicken(struct intel_crtc *crtc) 4999 { 5000 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5001 enum pipe pipe = crtc->pipe; 5002 u32 tmp; 5003 5004 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe)); 5005 5006 /* 5007 * Display WA #1153: icl 5008 * enable hardware to bypass the alpha math 5009 * and rounding for per-pixel values 00 and 0xff 5010 */ 5011 tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 5012 /* 5013 * Display WA # 1605353570: icl 5014 * Set the pixel rounding bit to 1 for allowing 5015 * passthrough of Frame buffer pixels unmodified 5016 * across pipe 5017 */ 5018 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 5019 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); 5020 } 5021 5022 static void intel_fdi_normal_train(struct intel_crtc *crtc) 5023 { 5024 struct drm_device *dev = crtc->base.dev; 5025 struct drm_i915_private *dev_priv = to_i915(dev); 5026 enum pipe pipe = crtc->pipe; 5027 i915_reg_t reg; 5028 u32 temp; 5029 5030 /* enable normal train */ 5031 reg = FDI_TX_CTL(pipe); 5032 temp = intel_de_read(dev_priv, reg); 5033 if (IS_IVYBRIDGE(dev_priv)) { 5034 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 5035 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 5036 } else { 5037 temp &= ~FDI_LINK_TRAIN_NONE; 5038 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 5039 } 5040 intel_de_write(dev_priv, reg, temp); 5041 5042 reg = FDI_RX_CTL(pipe); 5043 temp = intel_de_read(dev_priv, reg); 5044 if (HAS_PCH_CPT(dev_priv)) { 5045 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5046 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 5047 } else { 5048 temp &= ~FDI_LINK_TRAIN_NONE; 5049 temp |= FDI_LINK_TRAIN_NONE; 5050 } 5051 intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 5052 5053 /* wait one idle pattern time */ 5054 intel_de_posting_read(dev_priv, reg); 5055 udelay(1000); 5056 5057 /* IVB wants error correction enabled */ 5058 if (IS_IVYBRIDGE(dev_priv)) 5059 intel_de_write(dev_priv, reg, 5060 intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE); 5061 } 5062 5063 /* The FDI link training functions for ILK/Ibexpeak. */ 5064 static void ilk_fdi_link_train(struct intel_crtc *crtc, 5065 const struct intel_crtc_state *crtc_state) 5066 { 5067 struct drm_device *dev = crtc->base.dev; 5068 struct drm_i915_private *dev_priv = to_i915(dev); 5069 enum pipe pipe = crtc->pipe; 5070 i915_reg_t reg; 5071 u32 temp, tries; 5072 5073 /* FDI needs bits from pipe first */ 5074 assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder); 5075 5076 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 5077 for train result */ 5078 reg = FDI_RX_IMR(pipe); 5079 temp = intel_de_read(dev_priv, reg); 5080 temp &= ~FDI_RX_SYMBOL_LOCK; 5081 temp &= ~FDI_RX_BIT_LOCK; 5082 intel_de_write(dev_priv, reg, temp); 5083 intel_de_read(dev_priv, reg); 5084 udelay(150); 5085 5086 /* enable CPU FDI TX and PCH FDI RX */ 5087 reg = FDI_TX_CTL(pipe); 5088 temp = intel_de_read(dev_priv, reg); 5089 temp &= ~FDI_DP_PORT_WIDTH_MASK; 5090 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 5091 temp &= ~FDI_LINK_TRAIN_NONE; 5092 temp |= FDI_LINK_TRAIN_PATTERN_1; 5093 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); 5094 5095 reg = FDI_RX_CTL(pipe); 5096 temp = intel_de_read(dev_priv, reg); 5097 temp &= ~FDI_LINK_TRAIN_NONE; 5098 temp |= FDI_LINK_TRAIN_PATTERN_1; 5099 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); 5100 5101 intel_de_posting_read(dev_priv, reg); 5102 udelay(150); 5103 5104 /* Ironlake workaround, enable clock pointer after FDI enable*/ 5105 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), 5106 FDI_RX_PHASE_SYNC_POINTER_OVR); 5107 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), 5108 FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN); 5109 5110 reg = FDI_RX_IIR(pipe); 5111 for (tries = 0; tries < 5; tries++) { 5112 temp = intel_de_read(dev_priv, reg); 5113 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 5114 5115 if ((temp & FDI_RX_BIT_LOCK)) { 5116 drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n"); 5117 intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK); 5118 break; 5119 } 5120 } 5121 if (tries == 5) 5122 drm_err(&dev_priv->drm, "FDI train 1 fail!\n"); 5123 5124 /* Train 2 */ 5125 reg = FDI_TX_CTL(pipe); 5126 temp = intel_de_read(dev_priv, reg); 5127 temp &= ~FDI_LINK_TRAIN_NONE; 5128 temp |= FDI_LINK_TRAIN_PATTERN_2; 5129 intel_de_write(dev_priv, reg, temp); 5130 5131 reg = FDI_RX_CTL(pipe); 5132 temp = intel_de_read(dev_priv, reg); 5133 temp &= ~FDI_LINK_TRAIN_NONE; 5134 temp |= FDI_LINK_TRAIN_PATTERN_2; 5135 intel_de_write(dev_priv, reg, temp); 5136 5137 intel_de_posting_read(dev_priv, reg); 5138 udelay(150); 5139 5140 reg = FDI_RX_IIR(pipe); 5141 for (tries = 0; tries < 5; tries++) { 5142 temp = intel_de_read(dev_priv, reg); 5143 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 5144 5145 if (temp & FDI_RX_SYMBOL_LOCK) { 5146 intel_de_write(dev_priv, reg, 5147 temp | FDI_RX_SYMBOL_LOCK); 5148 drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n"); 5149 break; 5150 } 5151 } 5152 if (tries == 5) 5153 drm_err(&dev_priv->drm, "FDI train 2 fail!\n"); 5154 5155 drm_dbg_kms(&dev_priv->drm, "FDI train done\n"); 5156 5157 } 5158 5159 static const int snb_b_fdi_train_param[] = { 5160 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 5161 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 5162 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 5163 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 5164 }; 5165 5166 /* The FDI link training functions for SNB/Cougarpoint. */ 5167 static void gen6_fdi_link_train(struct intel_crtc *crtc, 5168 const struct intel_crtc_state *crtc_state) 5169 { 5170 struct drm_device *dev = crtc->base.dev; 5171 struct drm_i915_private *dev_priv = to_i915(dev); 5172 enum pipe pipe = crtc->pipe; 5173 i915_reg_t reg; 5174 u32 temp, i, retry; 5175 5176 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 5177 for train result */ 5178 reg = FDI_RX_IMR(pipe); 5179 temp = intel_de_read(dev_priv, reg); 5180 temp &= ~FDI_RX_SYMBOL_LOCK; 5181 temp &= ~FDI_RX_BIT_LOCK; 5182 intel_de_write(dev_priv, reg, temp); 5183 5184 intel_de_posting_read(dev_priv, reg); 5185 udelay(150); 5186 5187 /* enable CPU FDI TX and PCH FDI RX */ 5188 reg = FDI_TX_CTL(pipe); 5189 temp = intel_de_read(dev_priv, reg); 5190 temp &= ~FDI_DP_PORT_WIDTH_MASK; 5191 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 5192 temp &= ~FDI_LINK_TRAIN_NONE; 5193 temp |= FDI_LINK_TRAIN_PATTERN_1; 5194 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5195 /* SNB-B */ 5196 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 5197 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); 5198 5199 intel_de_write(dev_priv, FDI_RX_MISC(pipe), 5200 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 5201 5202 reg = FDI_RX_CTL(pipe); 5203 temp = intel_de_read(dev_priv, reg); 5204 if (HAS_PCH_CPT(dev_priv)) { 5205 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5206 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 5207 } else { 5208 temp &= ~FDI_LINK_TRAIN_NONE; 5209 temp |= FDI_LINK_TRAIN_PATTERN_1; 5210 } 5211 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); 5212 5213 intel_de_posting_read(dev_priv, reg); 5214 udelay(150); 5215 5216 for (i = 0; i < 4; i++) { 5217 reg = FDI_TX_CTL(pipe); 5218 temp = intel_de_read(dev_priv, reg); 5219 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5220 temp |= snb_b_fdi_train_param[i]; 5221 intel_de_write(dev_priv, reg, temp); 5222 5223 intel_de_posting_read(dev_priv, reg); 5224 udelay(500); 5225 5226 for (retry = 0; retry < 5; retry++) { 5227 reg = FDI_RX_IIR(pipe); 5228 temp = intel_de_read(dev_priv, reg); 5229 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 5230 if (temp & FDI_RX_BIT_LOCK) { 5231 intel_de_write(dev_priv, reg, 5232 temp | FDI_RX_BIT_LOCK); 5233 drm_dbg_kms(&dev_priv->drm, 5234 "FDI train 1 done.\n"); 5235 break; 5236 } 5237 udelay(50); 5238 } 5239 if (retry < 5) 5240 break; 5241 } 5242 if (i == 4) 5243 drm_err(&dev_priv->drm, "FDI train 1 fail!\n"); 5244 5245 /* Train 2 */ 5246 reg = FDI_TX_CTL(pipe); 5247 temp = intel_de_read(dev_priv, reg); 5248 temp &= ~FDI_LINK_TRAIN_NONE; 5249 temp |= FDI_LINK_TRAIN_PATTERN_2; 5250 if (IS_GEN(dev_priv, 6)) { 5251 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5252 /* SNB-B */ 5253 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 5254 } 5255 intel_de_write(dev_priv, reg, temp); 5256 5257 reg = FDI_RX_CTL(pipe); 5258 temp = intel_de_read(dev_priv, reg); 5259 if (HAS_PCH_CPT(dev_priv)) { 5260 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5261 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 5262 } else { 5263 temp &= ~FDI_LINK_TRAIN_NONE; 5264 temp |= FDI_LINK_TRAIN_PATTERN_2; 5265 } 5266 intel_de_write(dev_priv, reg, temp); 5267 5268 intel_de_posting_read(dev_priv, reg); 5269 udelay(150); 5270 5271 for (i = 0; i < 4; i++) { 5272 reg = FDI_TX_CTL(pipe); 5273 temp = intel_de_read(dev_priv, reg); 5274 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5275 temp |= snb_b_fdi_train_param[i]; 5276 intel_de_write(dev_priv, reg, temp); 5277 5278 intel_de_posting_read(dev_priv, reg); 5279 udelay(500); 5280 5281 for (retry = 0; retry < 5; retry++) { 5282 reg = FDI_RX_IIR(pipe); 5283 temp = intel_de_read(dev_priv, reg); 5284 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 5285 if (temp & FDI_RX_SYMBOL_LOCK) { 5286 intel_de_write(dev_priv, reg, 5287 temp | FDI_RX_SYMBOL_LOCK); 5288 drm_dbg_kms(&dev_priv->drm, 5289 "FDI train 2 done.\n"); 5290 break; 5291 } 5292 udelay(50); 5293 } 5294 if (retry < 5) 5295 break; 5296 } 5297 if (i == 4) 5298 drm_err(&dev_priv->drm, "FDI train 2 fail!\n"); 5299 5300 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n"); 5301 } 5302 5303 /* Manual link training for Ivy Bridge A0 parts */ 5304 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, 5305 const struct intel_crtc_state *crtc_state) 5306 { 5307 struct drm_device *dev = crtc->base.dev; 5308 struct drm_i915_private *dev_priv = to_i915(dev); 5309 enum pipe pipe = crtc->pipe; 5310 i915_reg_t reg; 5311 u32 temp, i, j; 5312 5313 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 5314 for train result */ 5315 reg = FDI_RX_IMR(pipe); 5316 temp = intel_de_read(dev_priv, reg); 5317 temp &= ~FDI_RX_SYMBOL_LOCK; 5318 temp &= ~FDI_RX_BIT_LOCK; 5319 intel_de_write(dev_priv, reg, temp); 5320 5321 intel_de_posting_read(dev_priv, reg); 5322 udelay(150); 5323 5324 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n", 5325 intel_de_read(dev_priv, FDI_RX_IIR(pipe))); 5326 5327 /* Try each vswing and preemphasis setting twice before moving on */ 5328 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 5329 /* disable first in case we need to retry */ 5330 reg = FDI_TX_CTL(pipe); 5331 temp = intel_de_read(dev_priv, reg); 5332 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 5333 temp &= ~FDI_TX_ENABLE; 5334 intel_de_write(dev_priv, reg, temp); 5335 5336 reg = FDI_RX_CTL(pipe); 5337 temp = intel_de_read(dev_priv, reg); 5338 temp &= ~FDI_LINK_TRAIN_AUTO; 5339 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5340 temp &= ~FDI_RX_ENABLE; 5341 intel_de_write(dev_priv, reg, temp); 5342 5343 /* enable CPU FDI TX and PCH FDI RX */ 5344 reg = FDI_TX_CTL(pipe); 5345 temp = intel_de_read(dev_priv, reg); 5346 temp &= ~FDI_DP_PORT_WIDTH_MASK; 5347 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 5348 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 5349 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 5350 temp |= snb_b_fdi_train_param[j/2]; 5351 temp |= FDI_COMPOSITE_SYNC; 5352 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); 5353 5354 intel_de_write(dev_priv, FDI_RX_MISC(pipe), 5355 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 5356 5357 reg = FDI_RX_CTL(pipe); 5358 temp = intel_de_read(dev_priv, reg); 5359 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 5360 temp |= FDI_COMPOSITE_SYNC; 5361 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); 5362 5363 intel_de_posting_read(dev_priv, reg); 5364 udelay(1); /* should be 0.5us */ 5365 5366 for (i = 0; i < 4; i++) { 5367 reg = FDI_RX_IIR(pipe); 5368 temp = intel_de_read(dev_priv, reg); 5369 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 5370 5371 if (temp & FDI_RX_BIT_LOCK || 5372 (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) { 5373 intel_de_write(dev_priv, reg, 5374 temp | FDI_RX_BIT_LOCK); 5375 drm_dbg_kms(&dev_priv->drm, 5376 "FDI train 1 done, level %i.\n", 5377 i); 5378 break; 5379 } 5380 udelay(1); /* should be 0.5us */ 5381 } 5382 if (i == 4) { 5383 drm_dbg_kms(&dev_priv->drm, 5384 "FDI train 1 fail on vswing %d\n", j / 2); 5385 continue; 5386 } 5387 5388 /* Train 2 */ 5389 reg = FDI_TX_CTL(pipe); 5390 temp = intel_de_read(dev_priv, reg); 5391 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 5392 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 5393 intel_de_write(dev_priv, reg, temp); 5394 5395 reg = FDI_RX_CTL(pipe); 5396 temp = intel_de_read(dev_priv, reg); 5397 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5398 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 5399 intel_de_write(dev_priv, reg, temp); 5400 5401 intel_de_posting_read(dev_priv, reg); 5402 udelay(2); /* should be 1.5us */ 5403 5404 for (i = 0; i < 4; i++) { 5405 reg = FDI_RX_IIR(pipe); 5406 temp = intel_de_read(dev_priv, reg); 5407 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 5408 5409 if (temp & FDI_RX_SYMBOL_LOCK || 5410 (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) { 5411 intel_de_write(dev_priv, reg, 5412 temp | FDI_RX_SYMBOL_LOCK); 5413 drm_dbg_kms(&dev_priv->drm, 5414 "FDI train 2 done, level %i.\n", 5415 i); 5416 goto train_done; 5417 } 5418 udelay(2); /* should be 1.5us */ 5419 } 5420 if (i == 4) 5421 drm_dbg_kms(&dev_priv->drm, 5422 "FDI train 2 fail on vswing %d\n", j / 2); 5423 } 5424 5425 train_done: 5426 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n"); 5427 } 5428 5429 static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) 5430 { 5431 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 5432 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 5433 enum pipe pipe = intel_crtc->pipe; 5434 i915_reg_t reg; 5435 u32 temp; 5436 5437 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 5438 reg = FDI_RX_CTL(pipe); 5439 temp = intel_de_read(dev_priv, reg); 5440 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 5441 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 5442 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 5443 intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE); 5444 5445 intel_de_posting_read(dev_priv, reg); 5446 udelay(200); 5447 5448 /* Switch from Rawclk to PCDclk */ 5449 temp = intel_de_read(dev_priv, reg); 5450 intel_de_write(dev_priv, reg, temp | FDI_PCDCLK); 5451 5452 intel_de_posting_read(dev_priv, reg); 5453 udelay(200); 5454 5455 /* Enable CPU FDI TX PLL, always on for Ironlake */ 5456 reg = FDI_TX_CTL(pipe); 5457 temp = intel_de_read(dev_priv, reg); 5458 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 5459 intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE); 5460 5461 intel_de_posting_read(dev_priv, reg); 5462 udelay(100); 5463 } 5464 } 5465 5466 static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc) 5467 { 5468 struct drm_device *dev = intel_crtc->base.dev; 5469 struct drm_i915_private *dev_priv = to_i915(dev); 5470 enum pipe pipe = intel_crtc->pipe; 5471 i915_reg_t reg; 5472 u32 temp; 5473 5474 /* Switch from PCDclk to Rawclk */ 5475 reg = FDI_RX_CTL(pipe); 5476 temp = intel_de_read(dev_priv, reg); 5477 intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK); 5478 5479 /* Disable CPU FDI TX PLL */ 5480 reg = FDI_TX_CTL(pipe); 5481 temp = intel_de_read(dev_priv, reg); 5482 intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE); 5483 5484 intel_de_posting_read(dev_priv, reg); 5485 udelay(100); 5486 5487 reg = FDI_RX_CTL(pipe); 5488 temp = intel_de_read(dev_priv, reg); 5489 intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE); 5490 5491 /* Wait for the clocks to turn off. */ 5492 intel_de_posting_read(dev_priv, reg); 5493 udelay(100); 5494 } 5495 5496 static void ilk_fdi_disable(struct intel_crtc *crtc) 5497 { 5498 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5499 enum pipe pipe = crtc->pipe; 5500 i915_reg_t reg; 5501 u32 temp; 5502 5503 /* disable CPU FDI tx and PCH FDI rx */ 5504 reg = FDI_TX_CTL(pipe); 5505 temp = intel_de_read(dev_priv, reg); 5506 intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE); 5507 intel_de_posting_read(dev_priv, reg); 5508 5509 reg = FDI_RX_CTL(pipe); 5510 temp = intel_de_read(dev_priv, reg); 5511 temp &= ~(0x7 << 16); 5512 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 5513 intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE); 5514 5515 intel_de_posting_read(dev_priv, reg); 5516 udelay(100); 5517 5518 /* Ironlake workaround, disable clock pointer after downing FDI */ 5519 if (HAS_PCH_IBX(dev_priv)) 5520 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), 5521 FDI_RX_PHASE_SYNC_POINTER_OVR); 5522 5523 /* still set train pattern 1 */ 5524 reg = FDI_TX_CTL(pipe); 5525 temp = intel_de_read(dev_priv, reg); 5526 temp &= ~FDI_LINK_TRAIN_NONE; 5527 temp |= FDI_LINK_TRAIN_PATTERN_1; 5528 intel_de_write(dev_priv, reg, temp); 5529 5530 reg = FDI_RX_CTL(pipe); 5531 temp = intel_de_read(dev_priv, reg); 5532 if (HAS_PCH_CPT(dev_priv)) { 5533 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 5534 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 5535 } else { 5536 temp &= ~FDI_LINK_TRAIN_NONE; 5537 temp |= FDI_LINK_TRAIN_PATTERN_1; 5538 } 5539 /* BPC in FDI rx is consistent with that in PIPECONF */ 5540 temp &= ~(0x07 << 16); 5541 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 5542 intel_de_write(dev_priv, reg, temp); 5543 5544 intel_de_posting_read(dev_priv, reg); 5545 udelay(100); 5546 } 5547 5548 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 5549 { 5550 struct drm_crtc *crtc; 5551 bool cleanup_done; 5552 5553 drm_for_each_crtc(crtc, &dev_priv->drm) { 5554 struct drm_crtc_commit *commit; 5555 spin_lock(&crtc->commit_lock); 5556 commit = list_first_entry_or_null(&crtc->commit_list, 5557 struct drm_crtc_commit, commit_entry); 5558 cleanup_done = commit ? 5559 try_wait_for_completion(&commit->cleanup_done) : true; 5560 spin_unlock(&crtc->commit_lock); 5561 5562 if (cleanup_done) 5563 continue; 5564 5565 drm_crtc_wait_one_vblank(crtc); 5566 5567 return true; 5568 } 5569 5570 return false; 5571 } 5572 5573 void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 5574 { 5575 u32 temp; 5576 5577 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE); 5578 5579 mutex_lock(&dev_priv->sb_lock); 5580 5581 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5582 temp |= SBI_SSCCTL_DISABLE; 5583 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 5584 5585 mutex_unlock(&dev_priv->sb_lock); 5586 } 5587 5588 /* Program iCLKIP clock to the desired frequency */ 5589 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) 5590 { 5591 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5592 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5593 int clock = crtc_state->hw.adjusted_mode.crtc_clock; 5594 u32 divsel, phaseinc, auxdiv, phasedir = 0; 5595 u32 temp; 5596 5597 lpt_disable_iclkip(dev_priv); 5598 5599 /* The iCLK virtual clock root frequency is in MHz, 5600 * but the adjusted_mode->crtc_clock in in KHz. To get the 5601 * divisors, it is necessary to divide one by another, so we 5602 * convert the virtual clock precision to KHz here for higher 5603 * precision. 5604 */ 5605 for (auxdiv = 0; auxdiv < 2; auxdiv++) { 5606 u32 iclk_virtual_root_freq = 172800 * 1000; 5607 u32 iclk_pi_range = 64; 5608 u32 desired_divisor; 5609 5610 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 5611 clock << auxdiv); 5612 divsel = (desired_divisor / iclk_pi_range) - 2; 5613 phaseinc = desired_divisor % iclk_pi_range; 5614 5615 /* 5616 * Near 20MHz is a corner case which is 5617 * out of range for the 7-bit divisor 5618 */ 5619 if (divsel <= 0x7f) 5620 break; 5621 } 5622 5623 /* This should not happen with any sane values */ 5624 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 5625 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 5626 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) & 5627 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 5628 5629 drm_dbg_kms(&dev_priv->drm, 5630 "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 5631 clock, auxdiv, divsel, phasedir, phaseinc); 5632 5633 mutex_lock(&dev_priv->sb_lock); 5634 5635 /* Program SSCDIVINTPHASE6 */ 5636 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 5637 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 5638 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 5639 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 5640 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 5641 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 5642 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 5643 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 5644 5645 /* Program SSCAUXDIV */ 5646 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 5647 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 5648 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 5649 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 5650 5651 /* Enable modulator and associated divider */ 5652 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5653 temp &= ~SBI_SSCCTL_DISABLE; 5654 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 5655 5656 mutex_unlock(&dev_priv->sb_lock); 5657 5658 /* Wait for initialization time */ 5659 udelay(24); 5660 5661 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE); 5662 } 5663 5664 int lpt_get_iclkip(struct drm_i915_private *dev_priv) 5665 { 5666 u32 divsel, phaseinc, auxdiv; 5667 u32 iclk_virtual_root_freq = 172800 * 1000; 5668 u32 iclk_pi_range = 64; 5669 u32 desired_divisor; 5670 u32 temp; 5671 5672 if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) 5673 return 0; 5674 5675 mutex_lock(&dev_priv->sb_lock); 5676 5677 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 5678 if (temp & SBI_SSCCTL_DISABLE) { 5679 mutex_unlock(&dev_priv->sb_lock); 5680 return 0; 5681 } 5682 5683 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 5684 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> 5685 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; 5686 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> 5687 SBI_SSCDIVINTPHASE_INCVAL_SHIFT; 5688 5689 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 5690 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> 5691 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; 5692 5693 mutex_unlock(&dev_priv->sb_lock); 5694 5695 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; 5696 5697 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 5698 desired_divisor << auxdiv); 5699 } 5700 5701 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, 5702 enum pipe pch_transcoder) 5703 { 5704 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5705 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5706 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 5707 5708 intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder), 5709 intel_de_read(dev_priv, HTOTAL(cpu_transcoder))); 5710 intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder), 5711 intel_de_read(dev_priv, HBLANK(cpu_transcoder))); 5712 intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder), 5713 intel_de_read(dev_priv, HSYNC(cpu_transcoder))); 5714 5715 intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder), 5716 intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); 5717 intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder), 5718 intel_de_read(dev_priv, VBLANK(cpu_transcoder))); 5719 intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder), 5720 intel_de_read(dev_priv, VSYNC(cpu_transcoder))); 5721 intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder), 5722 intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder))); 5723 } 5724 5725 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable) 5726 { 5727 u32 temp; 5728 5729 temp = intel_de_read(dev_priv, SOUTH_CHICKEN1); 5730 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 5731 return; 5732 5733 drm_WARN_ON(&dev_priv->drm, 5734 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) & 5735 FDI_RX_ENABLE); 5736 drm_WARN_ON(&dev_priv->drm, 5737 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) & 5738 FDI_RX_ENABLE); 5739 5740 temp &= ~FDI_BC_BIFURCATION_SELECT; 5741 if (enable) 5742 temp |= FDI_BC_BIFURCATION_SELECT; 5743 5744 drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n", 5745 enable ? "en" : "dis"); 5746 intel_de_write(dev_priv, SOUTH_CHICKEN1, temp); 5747 intel_de_posting_read(dev_priv, SOUTH_CHICKEN1); 5748 } 5749 5750 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) 5751 { 5752 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5753 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5754 5755 switch (crtc->pipe) { 5756 case PIPE_A: 5757 break; 5758 case PIPE_B: 5759 if (crtc_state->fdi_lanes > 2) 5760 cpt_set_fdi_bc_bifurcation(dev_priv, false); 5761 else 5762 cpt_set_fdi_bc_bifurcation(dev_priv, true); 5763 5764 break; 5765 case PIPE_C: 5766 cpt_set_fdi_bc_bifurcation(dev_priv, true); 5767 5768 break; 5769 default: 5770 BUG(); 5771 } 5772 } 5773 5774 /* 5775 * Finds the encoder associated with the given CRTC. This can only be 5776 * used when we know that the CRTC isn't feeding multiple encoders! 5777 */ 5778 static struct intel_encoder * 5779 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 5780 const struct intel_crtc_state *crtc_state) 5781 { 5782 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5783 const struct drm_connector_state *connector_state; 5784 const struct drm_connector *connector; 5785 struct intel_encoder *encoder = NULL; 5786 int num_encoders = 0; 5787 int i; 5788 5789 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 5790 if (connector_state->crtc != &crtc->base) 5791 continue; 5792 5793 encoder = to_intel_encoder(connector_state->best_encoder); 5794 num_encoders++; 5795 } 5796 5797 drm_WARN(encoder->base.dev, num_encoders != 1, 5798 "%d encoders for pipe %c\n", 5799 num_encoders, pipe_name(crtc->pipe)); 5800 5801 return encoder; 5802 } 5803 5804 /* 5805 * Enable PCH resources required for PCH ports: 5806 * - PCH PLLs 5807 * - FDI training & RX/TX 5808 * - update transcoder timings 5809 * - DP transcoding bits 5810 * - transcoder 5811 */ 5812 static void ilk_pch_enable(const struct intel_atomic_state *state, 5813 const struct intel_crtc_state *crtc_state) 5814 { 5815 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5816 struct drm_device *dev = crtc->base.dev; 5817 struct drm_i915_private *dev_priv = to_i915(dev); 5818 enum pipe pipe = crtc->pipe; 5819 u32 temp; 5820 5821 assert_pch_transcoder_disabled(dev_priv, pipe); 5822 5823 if (IS_IVYBRIDGE(dev_priv)) 5824 ivb_update_fdi_bc_bifurcation(crtc_state); 5825 5826 /* Write the TU size bits before fdi link training, so that error 5827 * detection works. */ 5828 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe), 5829 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 5830 5831 /* For PCH output, training FDI link */ 5832 dev_priv->display.fdi_link_train(crtc, crtc_state); 5833 5834 /* We need to program the right clock selection before writing the pixel 5835 * mutliplier into the DPLL. */ 5836 if (HAS_PCH_CPT(dev_priv)) { 5837 u32 sel; 5838 5839 temp = intel_de_read(dev_priv, PCH_DPLL_SEL); 5840 temp |= TRANS_DPLL_ENABLE(pipe); 5841 sel = TRANS_DPLLB_SEL(pipe); 5842 if (crtc_state->shared_dpll == 5843 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) 5844 temp |= sel; 5845 else 5846 temp &= ~sel; 5847 intel_de_write(dev_priv, PCH_DPLL_SEL, temp); 5848 } 5849 5850 /* XXX: pch pll's can be enabled any time before we enable the PCH 5851 * transcoder, and we actually should do this to not upset any PCH 5852 * transcoder that already use the clock when we share it. 5853 * 5854 * Note that enable_shared_dpll tries to do the right thing, but 5855 * get_shared_dpll unconditionally resets the pll - we need that to have 5856 * the right LVDS enable sequence. */ 5857 intel_enable_shared_dpll(crtc_state); 5858 5859 /* set transcoder timing, panel must allow it */ 5860 assert_panel_unlocked(dev_priv, pipe); 5861 ilk_pch_transcoder_set_timings(crtc_state, pipe); 5862 5863 intel_fdi_normal_train(crtc); 5864 5865 /* For PCH DP, enable TRANS_DP_CTL */ 5866 if (HAS_PCH_CPT(dev_priv) && 5867 intel_crtc_has_dp_encoder(crtc_state)) { 5868 const struct drm_display_mode *adjusted_mode = 5869 &crtc_state->hw.adjusted_mode; 5870 u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 5871 i915_reg_t reg = TRANS_DP_CTL(pipe); 5872 enum port port; 5873 5874 temp = intel_de_read(dev_priv, reg); 5875 temp &= ~(TRANS_DP_PORT_SEL_MASK | 5876 TRANS_DP_SYNC_MASK | 5877 TRANS_DP_BPC_MASK); 5878 temp |= TRANS_DP_OUTPUT_ENABLE; 5879 temp |= bpc << 9; /* same format but at 11:9 */ 5880 5881 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 5882 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 5883 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 5884 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 5885 5886 port = intel_get_crtc_new_encoder(state, crtc_state)->port; 5887 drm_WARN_ON(dev, port < PORT_B || port > PORT_D); 5888 temp |= TRANS_DP_PORT_SEL(port); 5889 5890 intel_de_write(dev_priv, reg, temp); 5891 } 5892 5893 ilk_enable_pch_transcoder(crtc_state); 5894 } 5895 5896 void lpt_pch_enable(const struct intel_crtc_state *crtc_state) 5897 { 5898 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5899 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5900 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 5901 5902 assert_pch_transcoder_disabled(dev_priv, PIPE_A); 5903 5904 lpt_program_iclkip(crtc_state); 5905 5906 /* Set transcoder timing. */ 5907 ilk_pch_transcoder_set_timings(crtc_state, PIPE_A); 5908 5909 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 5910 } 5911 5912 static void cpt_verify_modeset(struct drm_i915_private *dev_priv, 5913 enum pipe pipe) 5914 { 5915 i915_reg_t dslreg = PIPEDSL(pipe); 5916 u32 temp; 5917 5918 temp = intel_de_read(dev_priv, dslreg); 5919 udelay(500); 5920 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) { 5921 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) 5922 drm_err(&dev_priv->drm, 5923 "mode set failed: pipe %c stuck\n", 5924 pipe_name(pipe)); 5925 } 5926 } 5927 5928 /* 5929 * The hardware phase 0.0 refers to the center of the pixel. 5930 * We want to start from the top/left edge which is phase 5931 * -0.5. That matches how the hardware calculates the scaling 5932 * factors (from top-left of the first pixel to bottom-right 5933 * of the last pixel, as opposed to the pixel centers). 5934 * 5935 * For 4:2:0 subsampled chroma planes we obviously have to 5936 * adjust that so that the chroma sample position lands in 5937 * the right spot. 5938 * 5939 * Note that for packed YCbCr 4:2:2 formats there is no way to 5940 * control chroma siting. The hardware simply replicates the 5941 * chroma samples for both of the luma samples, and thus we don't 5942 * actually get the expected MPEG2 chroma siting convention :( 5943 * The same behaviour is observed on pre-SKL platforms as well. 5944 * 5945 * Theory behind the formula (note that we ignore sub-pixel 5946 * source coordinates): 5947 * s = source sample position 5948 * d = destination sample position 5949 * 5950 * Downscaling 4:1: 5951 * -0.5 5952 * | 0.0 5953 * | | 1.5 (initial phase) 5954 * | | | 5955 * v v v 5956 * | s | s | s | s | 5957 * | d | 5958 * 5959 * Upscaling 1:4: 5960 * -0.5 5961 * | -0.375 (initial phase) 5962 * | | 0.0 5963 * | | | 5964 * v v v 5965 * | s | 5966 * | d | d | d | d | 5967 */ 5968 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited) 5969 { 5970 int phase = -0x8000; 5971 u16 trip = 0; 5972 5973 if (chroma_cosited) 5974 phase += (sub - 1) * 0x8000 / sub; 5975 5976 phase += scale / (2 * sub); 5977 5978 /* 5979 * Hardware initial phase limited to [-0.5:1.5]. 5980 * Since the max hardware scale factor is 3.0, we 5981 * should never actually excdeed 1.0 here. 5982 */ 5983 WARN_ON(phase < -0x8000 || phase > 0x18000); 5984 5985 if (phase < 0) 5986 phase = 0x10000 + phase; 5987 else 5988 trip = PS_PHASE_TRIP; 5989 5990 return ((phase >> 2) & PS_PHASE_MASK) | trip; 5991 } 5992 5993 #define SKL_MIN_SRC_W 8 5994 #define SKL_MAX_SRC_W 4096 5995 #define SKL_MIN_SRC_H 8 5996 #define SKL_MAX_SRC_H 4096 5997 #define SKL_MIN_DST_W 8 5998 #define SKL_MAX_DST_W 4096 5999 #define SKL_MIN_DST_H 8 6000 #define SKL_MAX_DST_H 4096 6001 #define ICL_MAX_SRC_W 5120 6002 #define ICL_MAX_SRC_H 4096 6003 #define ICL_MAX_DST_W 5120 6004 #define ICL_MAX_DST_H 4096 6005 #define SKL_MIN_YUV_420_SRC_W 16 6006 #define SKL_MIN_YUV_420_SRC_H 16 6007 6008 static int 6009 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 6010 unsigned int scaler_user, int *scaler_id, 6011 int src_w, int src_h, int dst_w, int dst_h, 6012 const struct drm_format_info *format, 6013 u64 modifier, bool need_scaler) 6014 { 6015 struct intel_crtc_scaler_state *scaler_state = 6016 &crtc_state->scaler_state; 6017 struct intel_crtc *intel_crtc = 6018 to_intel_crtc(crtc_state->uapi.crtc); 6019 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 6020 const struct drm_display_mode *adjusted_mode = 6021 &crtc_state->hw.adjusted_mode; 6022 6023 /* 6024 * Src coordinates are already rotated by 270 degrees for 6025 * the 90/270 degree plane rotation cases (to match the 6026 * GTT mapping), hence no need to account for rotation here. 6027 */ 6028 if (src_w != dst_w || src_h != dst_h) 6029 need_scaler = true; 6030 6031 /* 6032 * Scaling/fitting not supported in IF-ID mode in GEN9+ 6033 * TODO: Interlace fetch mode doesn't support YUV420 planar formats. 6034 * Once NV12 is enabled, handle it here while allocating scaler 6035 * for NV12. 6036 */ 6037 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable && 6038 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 6039 drm_dbg_kms(&dev_priv->drm, 6040 "Pipe/Plane scaling not supported with IF-ID mode\n"); 6041 return -EINVAL; 6042 } 6043 6044 /* 6045 * if plane is being disabled or scaler is no more required or force detach 6046 * - free scaler binded to this plane/crtc 6047 * - in order to do this, update crtc->scaler_usage 6048 * 6049 * Here scaler state in crtc_state is set free so that 6050 * scaler can be assigned to other user. Actual register 6051 * update to free the scaler is done in plane/panel-fit programming. 6052 * For this purpose crtc/plane_state->scaler_id isn't reset here. 6053 */ 6054 if (force_detach || !need_scaler) { 6055 if (*scaler_id >= 0) { 6056 scaler_state->scaler_users &= ~(1 << scaler_user); 6057 scaler_state->scalers[*scaler_id].in_use = 0; 6058 6059 drm_dbg_kms(&dev_priv->drm, 6060 "scaler_user index %u.%u: " 6061 "Staged freeing scaler id %d scaler_users = 0x%x\n", 6062 intel_crtc->pipe, scaler_user, *scaler_id, 6063 scaler_state->scaler_users); 6064 *scaler_id = -1; 6065 } 6066 return 0; 6067 } 6068 6069 if (format && intel_format_info_is_yuv_semiplanar(format, modifier) && 6070 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { 6071 drm_dbg_kms(&dev_priv->drm, 6072 "Planar YUV: src dimensions not met\n"); 6073 return -EINVAL; 6074 } 6075 6076 /* range checks */ 6077 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 6078 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 6079 (INTEL_GEN(dev_priv) >= 11 && 6080 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H || 6081 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) || 6082 (INTEL_GEN(dev_priv) < 11 && 6083 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 6084 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) { 6085 drm_dbg_kms(&dev_priv->drm, 6086 "scaler_user index %u.%u: src %ux%u dst %ux%u " 6087 "size is out of scaler range\n", 6088 intel_crtc->pipe, scaler_user, src_w, src_h, 6089 dst_w, dst_h); 6090 return -EINVAL; 6091 } 6092 6093 /* mark this plane as a scaler user in crtc_state */ 6094 scaler_state->scaler_users |= (1 << scaler_user); 6095 drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: " 6096 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 6097 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 6098 scaler_state->scaler_users); 6099 6100 return 0; 6101 } 6102 6103 static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state) 6104 { 6105 const struct drm_display_mode *adjusted_mode = 6106 &crtc_state->hw.adjusted_mode; 6107 int width, height; 6108 6109 if (crtc_state->pch_pfit.enabled) { 6110 width = drm_rect_width(&crtc_state->pch_pfit.dst); 6111 height = drm_rect_height(&crtc_state->pch_pfit.dst); 6112 } else { 6113 width = adjusted_mode->crtc_hdisplay; 6114 height = adjusted_mode->crtc_vdisplay; 6115 } 6116 6117 return skl_update_scaler(crtc_state, !crtc_state->hw.active, 6118 SKL_CRTC_INDEX, 6119 &crtc_state->scaler_state.scaler_id, 6120 crtc_state->pipe_src_w, crtc_state->pipe_src_h, 6121 width, height, NULL, 0, 6122 crtc_state->pch_pfit.enabled); 6123 } 6124 6125 /** 6126 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 6127 * @crtc_state: crtc's scaler state 6128 * @plane_state: atomic plane state to update 6129 * 6130 * Return 6131 * 0 - scaler_usage updated successfully 6132 * error - requested scaling cannot be supported or other error condition 6133 */ 6134 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 6135 struct intel_plane_state *plane_state) 6136 { 6137 struct intel_plane *intel_plane = 6138 to_intel_plane(plane_state->uapi.plane); 6139 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 6140 struct drm_framebuffer *fb = plane_state->hw.fb; 6141 int ret; 6142 bool force_detach = !fb || !plane_state->uapi.visible; 6143 bool need_scaler = false; 6144 6145 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ 6146 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) && 6147 fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) 6148 need_scaler = true; 6149 6150 ret = skl_update_scaler(crtc_state, force_detach, 6151 drm_plane_index(&intel_plane->base), 6152 &plane_state->scaler_id, 6153 drm_rect_width(&plane_state->uapi.src) >> 16, 6154 drm_rect_height(&plane_state->uapi.src) >> 16, 6155 drm_rect_width(&plane_state->uapi.dst), 6156 drm_rect_height(&plane_state->uapi.dst), 6157 fb ? fb->format : NULL, 6158 fb ? fb->modifier : 0, 6159 need_scaler); 6160 6161 if (ret || plane_state->scaler_id < 0) 6162 return ret; 6163 6164 /* check colorkey */ 6165 if (plane_state->ckey.flags) { 6166 drm_dbg_kms(&dev_priv->drm, 6167 "[PLANE:%d:%s] scaling with color key not allowed", 6168 intel_plane->base.base.id, 6169 intel_plane->base.name); 6170 return -EINVAL; 6171 } 6172 6173 /* Check src format */ 6174 switch (fb->format->format) { 6175 case DRM_FORMAT_RGB565: 6176 case DRM_FORMAT_XBGR8888: 6177 case DRM_FORMAT_XRGB8888: 6178 case DRM_FORMAT_ABGR8888: 6179 case DRM_FORMAT_ARGB8888: 6180 case DRM_FORMAT_XRGB2101010: 6181 case DRM_FORMAT_XBGR2101010: 6182 case DRM_FORMAT_ARGB2101010: 6183 case DRM_FORMAT_ABGR2101010: 6184 case DRM_FORMAT_YUYV: 6185 case DRM_FORMAT_YVYU: 6186 case DRM_FORMAT_UYVY: 6187 case DRM_FORMAT_VYUY: 6188 case DRM_FORMAT_NV12: 6189 case DRM_FORMAT_XYUV8888: 6190 case DRM_FORMAT_P010: 6191 case DRM_FORMAT_P012: 6192 case DRM_FORMAT_P016: 6193 case DRM_FORMAT_Y210: 6194 case DRM_FORMAT_Y212: 6195 case DRM_FORMAT_Y216: 6196 case DRM_FORMAT_XVYU2101010: 6197 case DRM_FORMAT_XVYU12_16161616: 6198 case DRM_FORMAT_XVYU16161616: 6199 break; 6200 case DRM_FORMAT_XBGR16161616F: 6201 case DRM_FORMAT_ABGR16161616F: 6202 case DRM_FORMAT_XRGB16161616F: 6203 case DRM_FORMAT_ARGB16161616F: 6204 if (INTEL_GEN(dev_priv) >= 11) 6205 break; 6206 /* fall through */ 6207 default: 6208 drm_dbg_kms(&dev_priv->drm, 6209 "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", 6210 intel_plane->base.base.id, intel_plane->base.name, 6211 fb->base.id, fb->format->format); 6212 return -EINVAL; 6213 } 6214 6215 return 0; 6216 } 6217 6218 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state) 6219 { 6220 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 6221 int i; 6222 6223 for (i = 0; i < crtc->num_scalers; i++) 6224 skl_detach_scaler(crtc, i); 6225 } 6226 6227 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state) 6228 { 6229 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6230 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6231 const struct intel_crtc_scaler_state *scaler_state = 6232 &crtc_state->scaler_state; 6233 struct drm_rect src = { 6234 .x2 = crtc_state->pipe_src_w << 16, 6235 .y2 = crtc_state->pipe_src_h << 16, 6236 }; 6237 const struct drm_rect *dst = &crtc_state->pch_pfit.dst; 6238 u16 uv_rgb_hphase, uv_rgb_vphase; 6239 enum pipe pipe = crtc->pipe; 6240 int width = drm_rect_width(dst); 6241 int height = drm_rect_height(dst); 6242 int x = dst->x1; 6243 int y = dst->y1; 6244 int hscale, vscale; 6245 unsigned long irqflags; 6246 int id; 6247 6248 if (!crtc_state->pch_pfit.enabled) 6249 return; 6250 6251 if (drm_WARN_ON(&dev_priv->drm, 6252 crtc_state->scaler_state.scaler_id < 0)) 6253 return; 6254 6255 hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX); 6256 vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX); 6257 6258 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); 6259 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); 6260 6261 id = scaler_state->scaler_id; 6262 6263 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 6264 6265 intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 6266 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 6267 intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id), 6268 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); 6269 intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id), 6270 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); 6271 intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id), 6272 x << 16 | y); 6273 intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id), 6274 width << 16 | height); 6275 6276 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 6277 } 6278 6279 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) 6280 { 6281 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6282 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6283 const struct drm_rect *dst = &crtc_state->pch_pfit.dst; 6284 enum pipe pipe = crtc->pipe; 6285 int width = drm_rect_width(dst); 6286 int height = drm_rect_height(dst); 6287 int x = dst->x1; 6288 int y = dst->y1; 6289 6290 if (!crtc_state->pch_pfit.enabled) 6291 return; 6292 6293 /* Force use of hard-coded filter coefficients 6294 * as some pre-programmed values are broken, 6295 * e.g. x201. 6296 */ 6297 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 6298 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE | 6299 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe)); 6300 else 6301 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE | 6302 PF_FILTER_MED_3x3); 6303 intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y); 6304 intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height); 6305 } 6306 6307 void hsw_enable_ips(const struct intel_crtc_state *crtc_state) 6308 { 6309 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6310 struct drm_device *dev = crtc->base.dev; 6311 struct drm_i915_private *dev_priv = to_i915(dev); 6312 6313 if (!crtc_state->ips_enabled) 6314 return; 6315 6316 /* 6317 * We can only enable IPS after we enable a plane and wait for a vblank 6318 * This function is called from post_plane_update, which is run after 6319 * a vblank wait. 6320 */ 6321 drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); 6322 6323 if (IS_BROADWELL(dev_priv)) { 6324 drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 6325 IPS_ENABLE | IPS_PCODE_CONTROL)); 6326 /* Quoting Art Runyan: "its not safe to expect any particular 6327 * value in IPS_CTL bit 31 after enabling IPS through the 6328 * mailbox." Moreover, the mailbox may return a bogus state, 6329 * so we need to just enable it and continue on. 6330 */ 6331 } else { 6332 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE); 6333 /* The bit only becomes 1 in the next vblank, so this wait here 6334 * is essentially intel_wait_for_vblank. If we don't have this 6335 * and don't wait for vblanks until the end of crtc_enable, then 6336 * the HW state readout code will complain that the expected 6337 * IPS_CTL value is not the one we read. */ 6338 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50)) 6339 drm_err(&dev_priv->drm, 6340 "Timed out waiting for IPS enable\n"); 6341 } 6342 } 6343 6344 void hsw_disable_ips(const struct intel_crtc_state *crtc_state) 6345 { 6346 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6347 struct drm_device *dev = crtc->base.dev; 6348 struct drm_i915_private *dev_priv = to_i915(dev); 6349 6350 if (!crtc_state->ips_enabled) 6351 return; 6352 6353 if (IS_BROADWELL(dev_priv)) { 6354 drm_WARN_ON(dev, 6355 sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 6356 /* 6357 * Wait for PCODE to finish disabling IPS. The BSpec specified 6358 * 42ms timeout value leads to occasional timeouts so use 100ms 6359 * instead. 6360 */ 6361 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100)) 6362 drm_err(&dev_priv->drm, 6363 "Timed out waiting for IPS disable\n"); 6364 } else { 6365 intel_de_write(dev_priv, IPS_CTL, 0); 6366 intel_de_posting_read(dev_priv, IPS_CTL); 6367 } 6368 6369 /* We need to wait for a vblank before we can disable the plane. */ 6370 intel_wait_for_vblank(dev_priv, crtc->pipe); 6371 } 6372 6373 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) 6374 { 6375 if (intel_crtc->overlay) 6376 (void) intel_overlay_switch_off(intel_crtc->overlay); 6377 6378 /* Let userspace switch the overlay on again. In most cases userspace 6379 * has to recompute where to put it anyway. 6380 */ 6381 } 6382 6383 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state, 6384 const struct intel_crtc_state *new_crtc_state) 6385 { 6386 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 6387 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6388 6389 if (!old_crtc_state->ips_enabled) 6390 return false; 6391 6392 if (needs_modeset(new_crtc_state)) 6393 return true; 6394 6395 /* 6396 * Workaround : Do not read or write the pipe palette/gamma data while 6397 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 6398 * 6399 * Disable IPS before we program the LUT. 6400 */ 6401 if (IS_HASWELL(dev_priv) && 6402 (new_crtc_state->uapi.color_mgmt_changed || 6403 new_crtc_state->update_pipe) && 6404 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) 6405 return true; 6406 6407 return !new_crtc_state->ips_enabled; 6408 } 6409 6410 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state, 6411 const struct intel_crtc_state *new_crtc_state) 6412 { 6413 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 6414 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6415 6416 if (!new_crtc_state->ips_enabled) 6417 return false; 6418 6419 if (needs_modeset(new_crtc_state)) 6420 return true; 6421 6422 /* 6423 * Workaround : Do not read or write the pipe palette/gamma data while 6424 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 6425 * 6426 * Re-enable IPS after the LUT has been programmed. 6427 */ 6428 if (IS_HASWELL(dev_priv) && 6429 (new_crtc_state->uapi.color_mgmt_changed || 6430 new_crtc_state->update_pipe) && 6431 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) 6432 return true; 6433 6434 /* 6435 * We can't read out IPS on broadwell, assume the worst and 6436 * forcibly enable IPS on the first fastset. 6437 */ 6438 if (new_crtc_state->update_pipe && 6439 old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED) 6440 return true; 6441 6442 return !old_crtc_state->ips_enabled; 6443 } 6444 6445 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) 6446 { 6447 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 6448 6449 if (!crtc_state->nv12_planes) 6450 return false; 6451 6452 /* WA Display #0827: Gen9:all */ 6453 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) 6454 return true; 6455 6456 return false; 6457 } 6458 6459 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) 6460 { 6461 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 6462 6463 /* Wa_2006604312:icl,ehl */ 6464 if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11)) 6465 return true; 6466 6467 return false; 6468 } 6469 6470 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, 6471 const struct intel_crtc_state *new_crtc_state) 6472 { 6473 return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) && 6474 new_crtc_state->active_planes; 6475 } 6476 6477 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, 6478 const struct intel_crtc_state *new_crtc_state) 6479 { 6480 return old_crtc_state->active_planes && 6481 (!new_crtc_state->active_planes || needs_modeset(new_crtc_state)); 6482 } 6483 6484 static void intel_post_plane_update(struct intel_atomic_state *state, 6485 struct intel_crtc *crtc) 6486 { 6487 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6488 const struct intel_crtc_state *old_crtc_state = 6489 intel_atomic_get_old_crtc_state(state, crtc); 6490 const struct intel_crtc_state *new_crtc_state = 6491 intel_atomic_get_new_crtc_state(state, crtc); 6492 enum pipe pipe = crtc->pipe; 6493 6494 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); 6495 6496 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) 6497 intel_update_watermarks(crtc); 6498 6499 if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state)) 6500 hsw_enable_ips(new_crtc_state); 6501 6502 intel_fbc_post_update(state, crtc); 6503 6504 if (needs_nv12_wa(old_crtc_state) && 6505 !needs_nv12_wa(new_crtc_state)) 6506 skl_wa_827(dev_priv, pipe, false); 6507 6508 if (needs_scalerclk_wa(old_crtc_state) && 6509 !needs_scalerclk_wa(new_crtc_state)) 6510 icl_wa_scalerclkgating(dev_priv, pipe, false); 6511 } 6512 6513 static void intel_pre_plane_update(struct intel_atomic_state *state, 6514 struct intel_crtc *crtc) 6515 { 6516 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6517 const struct intel_crtc_state *old_crtc_state = 6518 intel_atomic_get_old_crtc_state(state, crtc); 6519 const struct intel_crtc_state *new_crtc_state = 6520 intel_atomic_get_new_crtc_state(state, crtc); 6521 enum pipe pipe = crtc->pipe; 6522 6523 if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state)) 6524 hsw_disable_ips(old_crtc_state); 6525 6526 if (intel_fbc_pre_update(state, crtc)) 6527 intel_wait_for_vblank(dev_priv, pipe); 6528 6529 /* Display WA 827 */ 6530 if (!needs_nv12_wa(old_crtc_state) && 6531 needs_nv12_wa(new_crtc_state)) 6532 skl_wa_827(dev_priv, pipe, true); 6533 6534 /* Wa_2006604312:icl,ehl */ 6535 if (!needs_scalerclk_wa(old_crtc_state) && 6536 needs_scalerclk_wa(new_crtc_state)) 6537 icl_wa_scalerclkgating(dev_priv, pipe, true); 6538 6539 /* 6540 * Vblank time updates from the shadow to live plane control register 6541 * are blocked if the memory self-refresh mode is active at that 6542 * moment. So to make sure the plane gets truly disabled, disable 6543 * first the self-refresh mode. The self-refresh enable bit in turn 6544 * will be checked/applied by the HW only at the next frame start 6545 * event which is after the vblank start event, so we need to have a 6546 * wait-for-vblank between disabling the plane and the pipe. 6547 */ 6548 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && 6549 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 6550 intel_wait_for_vblank(dev_priv, pipe); 6551 6552 /* 6553 * IVB workaround: must disable low power watermarks for at least 6554 * one frame before enabling scaling. LP watermarks can be re-enabled 6555 * when scaling is disabled. 6556 * 6557 * WaCxSRDisabledForSpriteScaling:ivb 6558 */ 6559 if (old_crtc_state->hw.active && 6560 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv)) 6561 intel_wait_for_vblank(dev_priv, pipe); 6562 6563 /* 6564 * If we're doing a modeset we don't need to do any 6565 * pre-vblank watermark programming here. 6566 */ 6567 if (!needs_modeset(new_crtc_state)) { 6568 /* 6569 * For platforms that support atomic watermarks, program the 6570 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 6571 * will be the intermediate values that are safe for both pre- and 6572 * post- vblank; when vblank happens, the 'active' values will be set 6573 * to the final 'target' values and we'll do this again to get the 6574 * optimal watermarks. For gen9+ platforms, the values we program here 6575 * will be the final target values which will get automatically latched 6576 * at vblank time; no further programming will be necessary. 6577 * 6578 * If a platform hasn't been transitioned to atomic watermarks yet, 6579 * we'll continue to update watermarks the old way, if flags tell 6580 * us to. 6581 */ 6582 if (dev_priv->display.initial_watermarks) 6583 dev_priv->display.initial_watermarks(state, crtc); 6584 else if (new_crtc_state->update_wm_pre) 6585 intel_update_watermarks(crtc); 6586 } 6587 6588 /* 6589 * Gen2 reports pipe underruns whenever all planes are disabled. 6590 * So disable underrun reporting before all the planes get disabled. 6591 * 6592 * We do this after .initial_watermarks() so that we have a 6593 * chance of catching underruns with the intermediate watermarks 6594 * vs. the old plane configuration. 6595 */ 6596 if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state)) 6597 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6598 } 6599 6600 static void intel_crtc_disable_planes(struct intel_atomic_state *state, 6601 struct intel_crtc *crtc) 6602 { 6603 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6604 const struct intel_crtc_state *new_crtc_state = 6605 intel_atomic_get_new_crtc_state(state, crtc); 6606 unsigned int update_mask = new_crtc_state->update_planes; 6607 const struct intel_plane_state *old_plane_state; 6608 struct intel_plane *plane; 6609 unsigned fb_bits = 0; 6610 int i; 6611 6612 intel_crtc_dpms_overlay_disable(crtc); 6613 6614 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 6615 if (crtc->pipe != plane->pipe || 6616 !(update_mask & BIT(plane->id))) 6617 continue; 6618 6619 intel_disable_plane(plane, new_crtc_state); 6620 6621 if (old_plane_state->uapi.visible) 6622 fb_bits |= plane->frontbuffer_bit; 6623 } 6624 6625 intel_frontbuffer_flip(dev_priv, fb_bits); 6626 } 6627 6628 /* 6629 * intel_connector_primary_encoder - get the primary encoder for a connector 6630 * @connector: connector for which to return the encoder 6631 * 6632 * Returns the primary encoder for a connector. There is a 1:1 mapping from 6633 * all connectors to their encoder, except for DP-MST connectors which have 6634 * both a virtual and a primary encoder. These DP-MST primary encoders can be 6635 * pointed to by as many DP-MST connectors as there are pipes. 6636 */ 6637 static struct intel_encoder * 6638 intel_connector_primary_encoder(struct intel_connector *connector) 6639 { 6640 struct intel_encoder *encoder; 6641 6642 if (connector->mst_port) 6643 return &dp_to_dig_port(connector->mst_port)->base; 6644 6645 encoder = intel_attached_encoder(connector); 6646 drm_WARN_ON(connector->base.dev, !encoder); 6647 6648 return encoder; 6649 } 6650 6651 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 6652 { 6653 struct drm_connector_state *new_conn_state; 6654 struct drm_connector *connector; 6655 int i; 6656 6657 for_each_new_connector_in_state(&state->base, connector, new_conn_state, 6658 i) { 6659 struct intel_connector *intel_connector; 6660 struct intel_encoder *encoder; 6661 struct intel_crtc *crtc; 6662 6663 if (!intel_connector_needs_modeset(state, connector)) 6664 continue; 6665 6666 intel_connector = to_intel_connector(connector); 6667 encoder = intel_connector_primary_encoder(intel_connector); 6668 if (!encoder->update_prepare) 6669 continue; 6670 6671 crtc = new_conn_state->crtc ? 6672 to_intel_crtc(new_conn_state->crtc) : NULL; 6673 encoder->update_prepare(state, encoder, crtc); 6674 } 6675 } 6676 6677 static void intel_encoders_update_complete(struct intel_atomic_state *state) 6678 { 6679 struct drm_connector_state *new_conn_state; 6680 struct drm_connector *connector; 6681 int i; 6682 6683 for_each_new_connector_in_state(&state->base, connector, new_conn_state, 6684 i) { 6685 struct intel_connector *intel_connector; 6686 struct intel_encoder *encoder; 6687 struct intel_crtc *crtc; 6688 6689 if (!intel_connector_needs_modeset(state, connector)) 6690 continue; 6691 6692 intel_connector = to_intel_connector(connector); 6693 encoder = intel_connector_primary_encoder(intel_connector); 6694 if (!encoder->update_complete) 6695 continue; 6696 6697 crtc = new_conn_state->crtc ? 6698 to_intel_crtc(new_conn_state->crtc) : NULL; 6699 encoder->update_complete(state, encoder, crtc); 6700 } 6701 } 6702 6703 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, 6704 struct intel_crtc *crtc) 6705 { 6706 const struct intel_crtc_state *crtc_state = 6707 intel_atomic_get_new_crtc_state(state, crtc); 6708 const struct drm_connector_state *conn_state; 6709 struct drm_connector *conn; 6710 int i; 6711 6712 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6713 struct intel_encoder *encoder = 6714 to_intel_encoder(conn_state->best_encoder); 6715 6716 if (conn_state->crtc != &crtc->base) 6717 continue; 6718 6719 if (encoder->pre_pll_enable) 6720 encoder->pre_pll_enable(state, encoder, 6721 crtc_state, conn_state); 6722 } 6723 } 6724 6725 static void intel_encoders_pre_enable(struct intel_atomic_state *state, 6726 struct intel_crtc *crtc) 6727 { 6728 const struct intel_crtc_state *crtc_state = 6729 intel_atomic_get_new_crtc_state(state, crtc); 6730 const struct drm_connector_state *conn_state; 6731 struct drm_connector *conn; 6732 int i; 6733 6734 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6735 struct intel_encoder *encoder = 6736 to_intel_encoder(conn_state->best_encoder); 6737 6738 if (conn_state->crtc != &crtc->base) 6739 continue; 6740 6741 if (encoder->pre_enable) 6742 encoder->pre_enable(state, encoder, 6743 crtc_state, conn_state); 6744 } 6745 } 6746 6747 static void intel_encoders_enable(struct intel_atomic_state *state, 6748 struct intel_crtc *crtc) 6749 { 6750 const struct intel_crtc_state *crtc_state = 6751 intel_atomic_get_new_crtc_state(state, crtc); 6752 const struct drm_connector_state *conn_state; 6753 struct drm_connector *conn; 6754 int i; 6755 6756 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6757 struct intel_encoder *encoder = 6758 to_intel_encoder(conn_state->best_encoder); 6759 6760 if (conn_state->crtc != &crtc->base) 6761 continue; 6762 6763 if (encoder->enable) 6764 encoder->enable(state, encoder, 6765 crtc_state, conn_state); 6766 intel_opregion_notify_encoder(encoder, true); 6767 } 6768 } 6769 6770 static void intel_encoders_disable(struct intel_atomic_state *state, 6771 struct intel_crtc *crtc) 6772 { 6773 const struct intel_crtc_state *old_crtc_state = 6774 intel_atomic_get_old_crtc_state(state, crtc); 6775 const struct drm_connector_state *old_conn_state; 6776 struct drm_connector *conn; 6777 int i; 6778 6779 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6780 struct intel_encoder *encoder = 6781 to_intel_encoder(old_conn_state->best_encoder); 6782 6783 if (old_conn_state->crtc != &crtc->base) 6784 continue; 6785 6786 intel_opregion_notify_encoder(encoder, false); 6787 if (encoder->disable) 6788 encoder->disable(state, encoder, 6789 old_crtc_state, old_conn_state); 6790 } 6791 } 6792 6793 static void intel_encoders_post_disable(struct intel_atomic_state *state, 6794 struct intel_crtc *crtc) 6795 { 6796 const struct intel_crtc_state *old_crtc_state = 6797 intel_atomic_get_old_crtc_state(state, crtc); 6798 const struct drm_connector_state *old_conn_state; 6799 struct drm_connector *conn; 6800 int i; 6801 6802 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6803 struct intel_encoder *encoder = 6804 to_intel_encoder(old_conn_state->best_encoder); 6805 6806 if (old_conn_state->crtc != &crtc->base) 6807 continue; 6808 6809 if (encoder->post_disable) 6810 encoder->post_disable(state, encoder, 6811 old_crtc_state, old_conn_state); 6812 } 6813 } 6814 6815 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, 6816 struct intel_crtc *crtc) 6817 { 6818 const struct intel_crtc_state *old_crtc_state = 6819 intel_atomic_get_old_crtc_state(state, crtc); 6820 const struct drm_connector_state *old_conn_state; 6821 struct drm_connector *conn; 6822 int i; 6823 6824 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 6825 struct intel_encoder *encoder = 6826 to_intel_encoder(old_conn_state->best_encoder); 6827 6828 if (old_conn_state->crtc != &crtc->base) 6829 continue; 6830 6831 if (encoder->post_pll_disable) 6832 encoder->post_pll_disable(state, encoder, 6833 old_crtc_state, old_conn_state); 6834 } 6835 } 6836 6837 static void intel_encoders_update_pipe(struct intel_atomic_state *state, 6838 struct intel_crtc *crtc) 6839 { 6840 const struct intel_crtc_state *crtc_state = 6841 intel_atomic_get_new_crtc_state(state, crtc); 6842 const struct drm_connector_state *conn_state; 6843 struct drm_connector *conn; 6844 int i; 6845 6846 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 6847 struct intel_encoder *encoder = 6848 to_intel_encoder(conn_state->best_encoder); 6849 6850 if (conn_state->crtc != &crtc->base) 6851 continue; 6852 6853 if (encoder->update_pipe) 6854 encoder->update_pipe(state, encoder, 6855 crtc_state, conn_state); 6856 } 6857 } 6858 6859 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) 6860 { 6861 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6862 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 6863 6864 plane->disable_plane(plane, crtc_state); 6865 } 6866 6867 static void ilk_crtc_enable(struct intel_atomic_state *state, 6868 struct intel_crtc *crtc) 6869 { 6870 const struct intel_crtc_state *new_crtc_state = 6871 intel_atomic_get_new_crtc_state(state, crtc); 6872 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6873 enum pipe pipe = crtc->pipe; 6874 6875 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 6876 return; 6877 6878 /* 6879 * Sometimes spurious CPU pipe underruns happen during FDI 6880 * training, at least with VGA+HDMI cloning. Suppress them. 6881 * 6882 * On ILK we get an occasional spurious CPU pipe underruns 6883 * between eDP port A enable and vdd enable. Also PCH port 6884 * enable seems to result in the occasional CPU pipe underrun. 6885 * 6886 * Spurious PCH underruns also occur during PCH enabling. 6887 */ 6888 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6889 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 6890 6891 if (new_crtc_state->has_pch_encoder) 6892 intel_prepare_shared_dpll(new_crtc_state); 6893 6894 if (intel_crtc_has_dp_encoder(new_crtc_state)) 6895 intel_dp_set_m_n(new_crtc_state, M1_N1); 6896 6897 intel_set_pipe_timings(new_crtc_state); 6898 intel_set_pipe_src_size(new_crtc_state); 6899 6900 if (new_crtc_state->has_pch_encoder) 6901 intel_cpu_transcoder_set_m_n(new_crtc_state, 6902 &new_crtc_state->fdi_m_n, NULL); 6903 6904 ilk_set_pipeconf(new_crtc_state); 6905 6906 crtc->active = true; 6907 6908 intel_encoders_pre_enable(state, crtc); 6909 6910 if (new_crtc_state->has_pch_encoder) { 6911 /* Note: FDI PLL enabling _must_ be done before we enable the 6912 * cpu pipes, hence this is separate from all the other fdi/pch 6913 * enabling. */ 6914 ilk_fdi_pll_enable(new_crtc_state); 6915 } else { 6916 assert_fdi_tx_disabled(dev_priv, pipe); 6917 assert_fdi_rx_disabled(dev_priv, pipe); 6918 } 6919 6920 ilk_pfit_enable(new_crtc_state); 6921 6922 /* 6923 * On ILK+ LUT must be loaded before the pipe is running but with 6924 * clocks enabled 6925 */ 6926 intel_color_load_luts(new_crtc_state); 6927 intel_color_commit(new_crtc_state); 6928 /* update DSPCNTR to configure gamma for pipe bottom color */ 6929 intel_disable_primary_plane(new_crtc_state); 6930 6931 if (dev_priv->display.initial_watermarks) 6932 dev_priv->display.initial_watermarks(state, crtc); 6933 intel_enable_pipe(new_crtc_state); 6934 6935 if (new_crtc_state->has_pch_encoder) 6936 ilk_pch_enable(state, new_crtc_state); 6937 6938 intel_crtc_vblank_on(new_crtc_state); 6939 6940 intel_encoders_enable(state, crtc); 6941 6942 if (HAS_PCH_CPT(dev_priv)) 6943 cpt_verify_modeset(dev_priv, pipe); 6944 6945 /* 6946 * Must wait for vblank to avoid spurious PCH FIFO underruns. 6947 * And a second vblank wait is needed at least on ILK with 6948 * some interlaced HDMI modes. Let's do the double wait always 6949 * in case there are more corner cases we don't know about. 6950 */ 6951 if (new_crtc_state->has_pch_encoder) { 6952 intel_wait_for_vblank(dev_priv, pipe); 6953 intel_wait_for_vblank(dev_priv, pipe); 6954 } 6955 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6956 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 6957 } 6958 6959 /* IPS only exists on ULT machines and is tied to pipe A. */ 6960 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 6961 { 6962 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A; 6963 } 6964 6965 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, 6966 enum pipe pipe, bool apply) 6967 { 6968 u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)); 6969 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 6970 6971 if (apply) 6972 val |= mask; 6973 else 6974 val &= ~mask; 6975 6976 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val); 6977 } 6978 6979 static void icl_pipe_mbus_enable(struct intel_crtc *crtc) 6980 { 6981 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6982 enum pipe pipe = crtc->pipe; 6983 u32 val; 6984 6985 val = MBUS_DBOX_A_CREDIT(2); 6986 6987 if (INTEL_GEN(dev_priv) >= 12) { 6988 val |= MBUS_DBOX_BW_CREDIT(2); 6989 val |= MBUS_DBOX_B_CREDIT(12); 6990 } else { 6991 val |= MBUS_DBOX_BW_CREDIT(1); 6992 val |= MBUS_DBOX_B_CREDIT(8); 6993 } 6994 6995 intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val); 6996 } 6997 6998 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state) 6999 { 7000 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7001 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7002 7003 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe), 7004 HSW_LINETIME(crtc_state->linetime) | 7005 HSW_IPS_LINETIME(crtc_state->ips_linetime)); 7006 } 7007 7008 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) 7009 { 7010 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7011 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7012 i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder); 7013 u32 val; 7014 7015 val = intel_de_read(dev_priv, reg); 7016 val &= ~HSW_FRAME_START_DELAY_MASK; 7017 val |= HSW_FRAME_START_DELAY(0); 7018 intel_de_write(dev_priv, reg, val); 7019 } 7020 7021 static void hsw_crtc_enable(struct intel_atomic_state *state, 7022 struct intel_crtc *crtc) 7023 { 7024 const struct intel_crtc_state *new_crtc_state = 7025 intel_atomic_get_new_crtc_state(state, crtc); 7026 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7027 enum pipe pipe = crtc->pipe, hsw_workaround_pipe; 7028 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 7029 bool psl_clkgate_wa; 7030 7031 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 7032 return; 7033 7034 intel_encoders_pre_pll_enable(state, crtc); 7035 7036 if (new_crtc_state->shared_dpll) 7037 intel_enable_shared_dpll(new_crtc_state); 7038 7039 intel_encoders_pre_enable(state, crtc); 7040 7041 if (!transcoder_is_dsi(cpu_transcoder)) 7042 intel_set_pipe_timings(new_crtc_state); 7043 7044 intel_set_pipe_src_size(new_crtc_state); 7045 7046 if (cpu_transcoder != TRANSCODER_EDP && 7047 !transcoder_is_dsi(cpu_transcoder)) 7048 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder), 7049 new_crtc_state->pixel_multiplier - 1); 7050 7051 if (new_crtc_state->has_pch_encoder) 7052 intel_cpu_transcoder_set_m_n(new_crtc_state, 7053 &new_crtc_state->fdi_m_n, NULL); 7054 7055 if (!transcoder_is_dsi(cpu_transcoder)) { 7056 hsw_set_frame_start_delay(new_crtc_state); 7057 hsw_set_pipeconf(new_crtc_state); 7058 } 7059 7060 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 7061 bdw_set_pipemisc(new_crtc_state); 7062 7063 crtc->active = true; 7064 7065 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */ 7066 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && 7067 new_crtc_state->pch_pfit.enabled; 7068 if (psl_clkgate_wa) 7069 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); 7070 7071 if (INTEL_GEN(dev_priv) >= 9) 7072 skl_pfit_enable(new_crtc_state); 7073 else 7074 ilk_pfit_enable(new_crtc_state); 7075 7076 /* 7077 * On ILK+ LUT must be loaded before the pipe is running but with 7078 * clocks enabled 7079 */ 7080 intel_color_load_luts(new_crtc_state); 7081 intel_color_commit(new_crtc_state); 7082 /* update DSPCNTR to configure gamma/csc for pipe bottom color */ 7083 if (INTEL_GEN(dev_priv) < 9) 7084 intel_disable_primary_plane(new_crtc_state); 7085 7086 hsw_set_linetime_wm(new_crtc_state); 7087 7088 if (INTEL_GEN(dev_priv) >= 11) 7089 icl_set_pipe_chicken(crtc); 7090 7091 if (dev_priv->display.initial_watermarks) 7092 dev_priv->display.initial_watermarks(state, crtc); 7093 7094 if (INTEL_GEN(dev_priv) >= 11) 7095 icl_pipe_mbus_enable(crtc); 7096 7097 intel_encoders_enable(state, crtc); 7098 7099 if (psl_clkgate_wa) { 7100 intel_wait_for_vblank(dev_priv, pipe); 7101 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); 7102 } 7103 7104 /* If we change the relative order between pipe/planes enabling, we need 7105 * to change the workaround. */ 7106 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe; 7107 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 7108 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 7109 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 7110 } 7111 } 7112 7113 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state) 7114 { 7115 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 7116 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7117 enum pipe pipe = crtc->pipe; 7118 7119 /* To avoid upsetting the power well on haswell only disable the pfit if 7120 * it's in use. The hw state code will make sure we get this right. */ 7121 if (!old_crtc_state->pch_pfit.enabled) 7122 return; 7123 7124 intel_de_write(dev_priv, PF_CTL(pipe), 0); 7125 intel_de_write(dev_priv, PF_WIN_POS(pipe), 0); 7126 intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0); 7127 } 7128 7129 static void ilk_crtc_disable(struct intel_atomic_state *state, 7130 struct intel_crtc *crtc) 7131 { 7132 const struct intel_crtc_state *old_crtc_state = 7133 intel_atomic_get_old_crtc_state(state, crtc); 7134 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7135 enum pipe pipe = crtc->pipe; 7136 7137 /* 7138 * Sometimes spurious CPU pipe underruns happen when the 7139 * pipe is already disabled, but FDI RX/TX is still enabled. 7140 * Happens at least with VGA+HDMI cloning. Suppress them. 7141 */ 7142 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 7143 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 7144 7145 intel_encoders_disable(state, crtc); 7146 7147 intel_crtc_vblank_off(old_crtc_state); 7148 7149 intel_disable_pipe(old_crtc_state); 7150 7151 ilk_pfit_disable(old_crtc_state); 7152 7153 if (old_crtc_state->has_pch_encoder) 7154 ilk_fdi_disable(crtc); 7155 7156 intel_encoders_post_disable(state, crtc); 7157 7158 if (old_crtc_state->has_pch_encoder) { 7159 ilk_disable_pch_transcoder(dev_priv, pipe); 7160 7161 if (HAS_PCH_CPT(dev_priv)) { 7162 i915_reg_t reg; 7163 u32 temp; 7164 7165 /* disable TRANS_DP_CTL */ 7166 reg = TRANS_DP_CTL(pipe); 7167 temp = intel_de_read(dev_priv, reg); 7168 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 7169 TRANS_DP_PORT_SEL_MASK); 7170 temp |= TRANS_DP_PORT_SEL_NONE; 7171 intel_de_write(dev_priv, reg, temp); 7172 7173 /* disable DPLL_SEL */ 7174 temp = intel_de_read(dev_priv, PCH_DPLL_SEL); 7175 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 7176 intel_de_write(dev_priv, PCH_DPLL_SEL, temp); 7177 } 7178 7179 ilk_fdi_pll_disable(crtc); 7180 } 7181 7182 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 7183 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 7184 } 7185 7186 static void hsw_crtc_disable(struct intel_atomic_state *state, 7187 struct intel_crtc *crtc) 7188 { 7189 /* 7190 * FIXME collapse everything to one hook. 7191 * Need care with mst->ddi interactions. 7192 */ 7193 intel_encoders_disable(state, crtc); 7194 intel_encoders_post_disable(state, crtc); 7195 } 7196 7197 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) 7198 { 7199 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7200 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7201 7202 if (!crtc_state->gmch_pfit.control) 7203 return; 7204 7205 /* 7206 * The panel fitter should only be adjusted whilst the pipe is disabled, 7207 * according to register description and PRM. 7208 */ 7209 drm_WARN_ON(&dev_priv->drm, 7210 intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE); 7211 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); 7212 7213 intel_de_write(dev_priv, PFIT_PGM_RATIOS, 7214 crtc_state->gmch_pfit.pgm_ratios); 7215 intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control); 7216 7217 /* Border color in case we don't scale up to the full screen. Black by 7218 * default, change to something else for debugging. */ 7219 intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0); 7220 } 7221 7222 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) 7223 { 7224 if (phy == PHY_NONE) 7225 return false; 7226 7227 if (IS_ELKHARTLAKE(dev_priv)) 7228 return phy <= PHY_C; 7229 7230 if (INTEL_GEN(dev_priv) >= 11) 7231 return phy <= PHY_B; 7232 7233 return false; 7234 } 7235 7236 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) 7237 { 7238 if (INTEL_GEN(dev_priv) >= 12) 7239 return phy >= PHY_D && phy <= PHY_I; 7240 7241 if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv)) 7242 return phy >= PHY_C && phy <= PHY_F; 7243 7244 return false; 7245 } 7246 7247 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) 7248 { 7249 if (IS_ELKHARTLAKE(i915) && port == PORT_D) 7250 return PHY_A; 7251 7252 return (enum phy)port; 7253 } 7254 7255 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) 7256 { 7257 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) 7258 return PORT_TC_NONE; 7259 7260 if (INTEL_GEN(dev_priv) >= 12) 7261 return port - PORT_D; 7262 7263 return port - PORT_C; 7264 } 7265 7266 enum intel_display_power_domain intel_port_to_power_domain(enum port port) 7267 { 7268 switch (port) { 7269 case PORT_A: 7270 return POWER_DOMAIN_PORT_DDI_A_LANES; 7271 case PORT_B: 7272 return POWER_DOMAIN_PORT_DDI_B_LANES; 7273 case PORT_C: 7274 return POWER_DOMAIN_PORT_DDI_C_LANES; 7275 case PORT_D: 7276 return POWER_DOMAIN_PORT_DDI_D_LANES; 7277 case PORT_E: 7278 return POWER_DOMAIN_PORT_DDI_E_LANES; 7279 case PORT_F: 7280 return POWER_DOMAIN_PORT_DDI_F_LANES; 7281 case PORT_G: 7282 return POWER_DOMAIN_PORT_DDI_G_LANES; 7283 default: 7284 MISSING_CASE(port); 7285 return POWER_DOMAIN_PORT_OTHER; 7286 } 7287 } 7288 7289 enum intel_display_power_domain 7290 intel_aux_power_domain(struct intel_digital_port *dig_port) 7291 { 7292 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 7293 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 7294 7295 if (intel_phy_is_tc(dev_priv, phy) && 7296 dig_port->tc_mode == TC_PORT_TBT_ALT) { 7297 switch (dig_port->aux_ch) { 7298 case AUX_CH_C: 7299 return POWER_DOMAIN_AUX_C_TBT; 7300 case AUX_CH_D: 7301 return POWER_DOMAIN_AUX_D_TBT; 7302 case AUX_CH_E: 7303 return POWER_DOMAIN_AUX_E_TBT; 7304 case AUX_CH_F: 7305 return POWER_DOMAIN_AUX_F_TBT; 7306 case AUX_CH_G: 7307 return POWER_DOMAIN_AUX_G_TBT; 7308 default: 7309 MISSING_CASE(dig_port->aux_ch); 7310 return POWER_DOMAIN_AUX_C_TBT; 7311 } 7312 } 7313 7314 return intel_legacy_aux_to_power_domain(dig_port->aux_ch); 7315 } 7316 7317 /* 7318 * Converts aux_ch to power_domain without caring about TBT ports for that use 7319 * intel_aux_power_domain() 7320 */ 7321 enum intel_display_power_domain 7322 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch) 7323 { 7324 switch (aux_ch) { 7325 case AUX_CH_A: 7326 return POWER_DOMAIN_AUX_A; 7327 case AUX_CH_B: 7328 return POWER_DOMAIN_AUX_B; 7329 case AUX_CH_C: 7330 return POWER_DOMAIN_AUX_C; 7331 case AUX_CH_D: 7332 return POWER_DOMAIN_AUX_D; 7333 case AUX_CH_E: 7334 return POWER_DOMAIN_AUX_E; 7335 case AUX_CH_F: 7336 return POWER_DOMAIN_AUX_F; 7337 case AUX_CH_G: 7338 return POWER_DOMAIN_AUX_G; 7339 default: 7340 MISSING_CASE(aux_ch); 7341 return POWER_DOMAIN_AUX_A; 7342 } 7343 } 7344 7345 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) 7346 { 7347 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7348 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7349 struct drm_encoder *encoder; 7350 enum pipe pipe = crtc->pipe; 7351 u64 mask; 7352 enum transcoder transcoder = crtc_state->cpu_transcoder; 7353 7354 if (!crtc_state->hw.active) 7355 return 0; 7356 7357 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe)); 7358 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder)); 7359 if (crtc_state->pch_pfit.enabled || 7360 crtc_state->pch_pfit.force_thru) 7361 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 7362 7363 drm_for_each_encoder_mask(encoder, &dev_priv->drm, 7364 crtc_state->uapi.encoder_mask) { 7365 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 7366 7367 mask |= BIT_ULL(intel_encoder->power_domain); 7368 } 7369 7370 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 7371 mask |= BIT_ULL(POWER_DOMAIN_AUDIO); 7372 7373 if (crtc_state->shared_dpll) 7374 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE); 7375 7376 return mask; 7377 } 7378 7379 static u64 7380 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state) 7381 { 7382 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7383 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7384 enum intel_display_power_domain domain; 7385 u64 domains, new_domains, old_domains; 7386 7387 old_domains = crtc->enabled_power_domains; 7388 crtc->enabled_power_domains = new_domains = 7389 get_crtc_power_domains(crtc_state); 7390 7391 domains = new_domains & ~old_domains; 7392 7393 for_each_power_domain(domain, domains) 7394 intel_display_power_get(dev_priv, domain); 7395 7396 return old_domains & ~new_domains; 7397 } 7398 7399 static void modeset_put_power_domains(struct drm_i915_private *dev_priv, 7400 u64 domains) 7401 { 7402 enum intel_display_power_domain domain; 7403 7404 for_each_power_domain(domain, domains) 7405 intel_display_power_put_unchecked(dev_priv, domain); 7406 } 7407 7408 static void valleyview_crtc_enable(struct intel_atomic_state *state, 7409 struct intel_crtc *crtc) 7410 { 7411 const struct intel_crtc_state *new_crtc_state = 7412 intel_atomic_get_new_crtc_state(state, crtc); 7413 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7414 enum pipe pipe = crtc->pipe; 7415 7416 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 7417 return; 7418 7419 if (intel_crtc_has_dp_encoder(new_crtc_state)) 7420 intel_dp_set_m_n(new_crtc_state, M1_N1); 7421 7422 intel_set_pipe_timings(new_crtc_state); 7423 intel_set_pipe_src_size(new_crtc_state); 7424 7425 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 7426 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY); 7427 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0); 7428 } 7429 7430 i9xx_set_pipeconf(new_crtc_state); 7431 7432 crtc->active = true; 7433 7434 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 7435 7436 intel_encoders_pre_pll_enable(state, crtc); 7437 7438 if (IS_CHERRYVIEW(dev_priv)) { 7439 chv_prepare_pll(crtc, new_crtc_state); 7440 chv_enable_pll(crtc, new_crtc_state); 7441 } else { 7442 vlv_prepare_pll(crtc, new_crtc_state); 7443 vlv_enable_pll(crtc, new_crtc_state); 7444 } 7445 7446 intel_encoders_pre_enable(state, crtc); 7447 7448 i9xx_pfit_enable(new_crtc_state); 7449 7450 intel_color_load_luts(new_crtc_state); 7451 intel_color_commit(new_crtc_state); 7452 /* update DSPCNTR to configure gamma for pipe bottom color */ 7453 intel_disable_primary_plane(new_crtc_state); 7454 7455 dev_priv->display.initial_watermarks(state, crtc); 7456 intel_enable_pipe(new_crtc_state); 7457 7458 intel_crtc_vblank_on(new_crtc_state); 7459 7460 intel_encoders_enable(state, crtc); 7461 } 7462 7463 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state) 7464 { 7465 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7466 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7467 7468 intel_de_write(dev_priv, FP0(crtc->pipe), 7469 crtc_state->dpll_hw_state.fp0); 7470 intel_de_write(dev_priv, FP1(crtc->pipe), 7471 crtc_state->dpll_hw_state.fp1); 7472 } 7473 7474 static void i9xx_crtc_enable(struct intel_atomic_state *state, 7475 struct intel_crtc *crtc) 7476 { 7477 const struct intel_crtc_state *new_crtc_state = 7478 intel_atomic_get_new_crtc_state(state, crtc); 7479 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7480 enum pipe pipe = crtc->pipe; 7481 7482 if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 7483 return; 7484 7485 i9xx_set_pll_dividers(new_crtc_state); 7486 7487 if (intel_crtc_has_dp_encoder(new_crtc_state)) 7488 intel_dp_set_m_n(new_crtc_state, M1_N1); 7489 7490 intel_set_pipe_timings(new_crtc_state); 7491 intel_set_pipe_src_size(new_crtc_state); 7492 7493 i9xx_set_pipeconf(new_crtc_state); 7494 7495 crtc->active = true; 7496 7497 if (!IS_GEN(dev_priv, 2)) 7498 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 7499 7500 intel_encoders_pre_enable(state, crtc); 7501 7502 i9xx_enable_pll(crtc, new_crtc_state); 7503 7504 i9xx_pfit_enable(new_crtc_state); 7505 7506 intel_color_load_luts(new_crtc_state); 7507 intel_color_commit(new_crtc_state); 7508 /* update DSPCNTR to configure gamma for pipe bottom color */ 7509 intel_disable_primary_plane(new_crtc_state); 7510 7511 if (dev_priv->display.initial_watermarks) 7512 dev_priv->display.initial_watermarks(state, crtc); 7513 else 7514 intel_update_watermarks(crtc); 7515 intel_enable_pipe(new_crtc_state); 7516 7517 intel_crtc_vblank_on(new_crtc_state); 7518 7519 intel_encoders_enable(state, crtc); 7520 } 7521 7522 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) 7523 { 7524 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 7525 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7526 7527 if (!old_crtc_state->gmch_pfit.control) 7528 return; 7529 7530 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); 7531 7532 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n", 7533 intel_de_read(dev_priv, PFIT_CONTROL)); 7534 intel_de_write(dev_priv, PFIT_CONTROL, 0); 7535 } 7536 7537 static void i9xx_crtc_disable(struct intel_atomic_state *state, 7538 struct intel_crtc *crtc) 7539 { 7540 struct intel_crtc_state *old_crtc_state = 7541 intel_atomic_get_old_crtc_state(state, crtc); 7542 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7543 enum pipe pipe = crtc->pipe; 7544 7545 /* 7546 * On gen2 planes are double buffered but the pipe isn't, so we must 7547 * wait for planes to fully turn off before disabling the pipe. 7548 */ 7549 if (IS_GEN(dev_priv, 2)) 7550 intel_wait_for_vblank(dev_priv, pipe); 7551 7552 intel_encoders_disable(state, crtc); 7553 7554 intel_crtc_vblank_off(old_crtc_state); 7555 7556 intel_disable_pipe(old_crtc_state); 7557 7558 i9xx_pfit_disable(old_crtc_state); 7559 7560 intel_encoders_post_disable(state, crtc); 7561 7562 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 7563 if (IS_CHERRYVIEW(dev_priv)) 7564 chv_disable_pll(dev_priv, pipe); 7565 else if (IS_VALLEYVIEW(dev_priv)) 7566 vlv_disable_pll(dev_priv, pipe); 7567 else 7568 i9xx_disable_pll(old_crtc_state); 7569 } 7570 7571 intel_encoders_post_pll_disable(state, crtc); 7572 7573 if (!IS_GEN(dev_priv, 2)) 7574 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 7575 7576 if (!dev_priv->display.initial_watermarks) 7577 intel_update_watermarks(crtc); 7578 7579 /* clock the pipe down to 640x480@60 to potentially save power */ 7580 if (IS_I830(dev_priv)) 7581 i830_enable_pipe(dev_priv, pipe); 7582 } 7583 7584 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, 7585 struct drm_modeset_acquire_ctx *ctx) 7586 { 7587 struct intel_encoder *encoder; 7588 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7589 struct intel_bw_state *bw_state = 7590 to_intel_bw_state(dev_priv->bw_obj.state); 7591 struct intel_cdclk_state *cdclk_state = 7592 to_intel_cdclk_state(dev_priv->cdclk.obj.state); 7593 struct intel_crtc_state *crtc_state = 7594 to_intel_crtc_state(crtc->base.state); 7595 enum intel_display_power_domain domain; 7596 struct intel_plane *plane; 7597 struct drm_atomic_state *state; 7598 struct intel_crtc_state *temp_crtc_state; 7599 enum pipe pipe = crtc->pipe; 7600 u64 domains; 7601 int ret; 7602 7603 if (!crtc_state->hw.active) 7604 return; 7605 7606 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 7607 const struct intel_plane_state *plane_state = 7608 to_intel_plane_state(plane->base.state); 7609 7610 if (plane_state->uapi.visible) 7611 intel_plane_disable_noatomic(crtc, plane); 7612 } 7613 7614 state = drm_atomic_state_alloc(&dev_priv->drm); 7615 if (!state) { 7616 drm_dbg_kms(&dev_priv->drm, 7617 "failed to disable [CRTC:%d:%s], out of memory", 7618 crtc->base.base.id, crtc->base.name); 7619 return; 7620 } 7621 7622 state->acquire_ctx = ctx; 7623 7624 /* Everything's already locked, -EDEADLK can't happen. */ 7625 temp_crtc_state = intel_atomic_get_crtc_state(state, crtc); 7626 ret = drm_atomic_add_affected_connectors(state, &crtc->base); 7627 7628 drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret); 7629 7630 dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc); 7631 7632 drm_atomic_state_put(state); 7633 7634 drm_dbg_kms(&dev_priv->drm, 7635 "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", 7636 crtc->base.base.id, crtc->base.name); 7637 7638 crtc->active = false; 7639 crtc->base.enabled = false; 7640 7641 drm_WARN_ON(&dev_priv->drm, 7642 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0); 7643 crtc_state->uapi.active = false; 7644 crtc_state->uapi.connector_mask = 0; 7645 crtc_state->uapi.encoder_mask = 0; 7646 intel_crtc_free_hw_state(crtc_state); 7647 memset(&crtc_state->hw, 0, sizeof(crtc_state->hw)); 7648 7649 for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder) 7650 encoder->base.crtc = NULL; 7651 7652 intel_fbc_disable(crtc); 7653 intel_update_watermarks(crtc); 7654 intel_disable_shared_dpll(crtc_state); 7655 7656 domains = crtc->enabled_power_domains; 7657 for_each_power_domain(domain, domains) 7658 intel_display_power_put_unchecked(dev_priv, domain); 7659 crtc->enabled_power_domains = 0; 7660 7661 dev_priv->active_pipes &= ~BIT(pipe); 7662 cdclk_state->min_cdclk[pipe] = 0; 7663 cdclk_state->min_voltage_level[pipe] = 0; 7664 cdclk_state->active_pipes &= ~BIT(pipe); 7665 7666 bw_state->data_rate[pipe] = 0; 7667 bw_state->num_active_planes[pipe] = 0; 7668 } 7669 7670 /* 7671 * turn all crtc's off, but do not adjust state 7672 * This has to be paired with a call to intel_modeset_setup_hw_state. 7673 */ 7674 int intel_display_suspend(struct drm_device *dev) 7675 { 7676 struct drm_i915_private *dev_priv = to_i915(dev); 7677 struct drm_atomic_state *state; 7678 int ret; 7679 7680 state = drm_atomic_helper_suspend(dev); 7681 ret = PTR_ERR_OR_ZERO(state); 7682 if (ret) 7683 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n", 7684 ret); 7685 else 7686 dev_priv->modeset_restore_state = state; 7687 return ret; 7688 } 7689 7690 void intel_encoder_destroy(struct drm_encoder *encoder) 7691 { 7692 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 7693 7694 drm_encoder_cleanup(encoder); 7695 kfree(intel_encoder); 7696 } 7697 7698 /* Cross check the actual hw state with our own modeset state tracking (and it's 7699 * internal consistency). */ 7700 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, 7701 struct drm_connector_state *conn_state) 7702 { 7703 struct intel_connector *connector = to_intel_connector(conn_state->connector); 7704 struct drm_i915_private *i915 = to_i915(connector->base.dev); 7705 7706 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n", 7707 connector->base.base.id, connector->base.name); 7708 7709 if (connector->get_hw_state(connector)) { 7710 struct intel_encoder *encoder = intel_attached_encoder(connector); 7711 7712 I915_STATE_WARN(!crtc_state, 7713 "connector enabled without attached crtc\n"); 7714 7715 if (!crtc_state) 7716 return; 7717 7718 I915_STATE_WARN(!crtc_state->hw.active, 7719 "connector is active, but attached crtc isn't\n"); 7720 7721 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 7722 return; 7723 7724 I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 7725 "atomic encoder doesn't match attached encoder\n"); 7726 7727 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 7728 "attached encoder crtc differs from connector crtc\n"); 7729 } else { 7730 I915_STATE_WARN(crtc_state && crtc_state->hw.active, 7731 "attached crtc is active, but connector isn't\n"); 7732 I915_STATE_WARN(!crtc_state && conn_state->best_encoder, 7733 "best encoder set without crtc!\n"); 7734 } 7735 } 7736 7737 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 7738 { 7739 if (crtc_state->hw.enable && crtc_state->has_pch_encoder) 7740 return crtc_state->fdi_lanes; 7741 7742 return 0; 7743 } 7744 7745 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, 7746 struct intel_crtc_state *pipe_config) 7747 { 7748 struct drm_i915_private *dev_priv = to_i915(dev); 7749 struct drm_atomic_state *state = pipe_config->uapi.state; 7750 struct intel_crtc *other_crtc; 7751 struct intel_crtc_state *other_crtc_state; 7752 7753 drm_dbg_kms(&dev_priv->drm, 7754 "checking fdi config on pipe %c, lanes %i\n", 7755 pipe_name(pipe), pipe_config->fdi_lanes); 7756 if (pipe_config->fdi_lanes > 4) { 7757 drm_dbg_kms(&dev_priv->drm, 7758 "invalid fdi lane config on pipe %c: %i lanes\n", 7759 pipe_name(pipe), pipe_config->fdi_lanes); 7760 return -EINVAL; 7761 } 7762 7763 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 7764 if (pipe_config->fdi_lanes > 2) { 7765 drm_dbg_kms(&dev_priv->drm, 7766 "only 2 lanes on haswell, required: %i lanes\n", 7767 pipe_config->fdi_lanes); 7768 return -EINVAL; 7769 } else { 7770 return 0; 7771 } 7772 } 7773 7774 if (INTEL_NUM_PIPES(dev_priv) == 2) 7775 return 0; 7776 7777 /* Ivybridge 3 pipe is really complicated */ 7778 switch (pipe) { 7779 case PIPE_A: 7780 return 0; 7781 case PIPE_B: 7782 if (pipe_config->fdi_lanes <= 2) 7783 return 0; 7784 7785 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C); 7786 other_crtc_state = 7787 intel_atomic_get_crtc_state(state, other_crtc); 7788 if (IS_ERR(other_crtc_state)) 7789 return PTR_ERR(other_crtc_state); 7790 7791 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 7792 drm_dbg_kms(&dev_priv->drm, 7793 "invalid shared fdi lane config on pipe %c: %i lanes\n", 7794 pipe_name(pipe), pipe_config->fdi_lanes); 7795 return -EINVAL; 7796 } 7797 return 0; 7798 case PIPE_C: 7799 if (pipe_config->fdi_lanes > 2) { 7800 drm_dbg_kms(&dev_priv->drm, 7801 "only 2 lanes on pipe %c: required %i lanes\n", 7802 pipe_name(pipe), pipe_config->fdi_lanes); 7803 return -EINVAL; 7804 } 7805 7806 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B); 7807 other_crtc_state = 7808 intel_atomic_get_crtc_state(state, other_crtc); 7809 if (IS_ERR(other_crtc_state)) 7810 return PTR_ERR(other_crtc_state); 7811 7812 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 7813 drm_dbg_kms(&dev_priv->drm, 7814 "fdi link B uses too many lanes to enable link C\n"); 7815 return -EINVAL; 7816 } 7817 return 0; 7818 default: 7819 BUG(); 7820 } 7821 } 7822 7823 #define RETRY 1 7824 static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc, 7825 struct intel_crtc_state *pipe_config) 7826 { 7827 struct drm_device *dev = intel_crtc->base.dev; 7828 struct drm_i915_private *i915 = to_i915(dev); 7829 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 7830 int lane, link_bw, fdi_dotclock, ret; 7831 bool needs_recompute = false; 7832 7833 retry: 7834 /* FDI is a binary signal running at ~2.7GHz, encoding 7835 * each output octet as 10 bits. The actual frequency 7836 * is stored as a divider into a 100MHz clock, and the 7837 * mode pixel clock is stored in units of 1KHz. 7838 * Hence the bw of each lane in terms of the mode signal 7839 * is: 7840 */ 7841 link_bw = intel_fdi_link_freq(i915, pipe_config); 7842 7843 fdi_dotclock = adjusted_mode->crtc_clock; 7844 7845 lane = ilk_get_lanes_required(fdi_dotclock, link_bw, 7846 pipe_config->pipe_bpp); 7847 7848 pipe_config->fdi_lanes = lane; 7849 7850 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 7851 link_bw, &pipe_config->fdi_m_n, false, false); 7852 7853 ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 7854 if (ret == -EDEADLK) 7855 return ret; 7856 7857 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 7858 pipe_config->pipe_bpp -= 2*3; 7859 drm_dbg_kms(&i915->drm, 7860 "fdi link bw constraint, reducing pipe bpp to %i\n", 7861 pipe_config->pipe_bpp); 7862 needs_recompute = true; 7863 pipe_config->bw_constrained = true; 7864 7865 goto retry; 7866 } 7867 7868 if (needs_recompute) 7869 return RETRY; 7870 7871 return ret; 7872 } 7873 7874 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) 7875 { 7876 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 7877 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7878 7879 /* IPS only exists on ULT machines and is tied to pipe A. */ 7880 if (!hsw_crtc_supports_ips(crtc)) 7881 return false; 7882 7883 if (!i915_modparams.enable_ips) 7884 return false; 7885 7886 if (crtc_state->pipe_bpp > 24) 7887 return false; 7888 7889 /* 7890 * We compare against max which means we must take 7891 * the increased cdclk requirement into account when 7892 * calculating the new cdclk. 7893 * 7894 * Should measure whether using a lower cdclk w/o IPS 7895 */ 7896 if (IS_BROADWELL(dev_priv) && 7897 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100) 7898 return false; 7899 7900 return true; 7901 } 7902 7903 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state) 7904 { 7905 struct drm_i915_private *dev_priv = 7906 to_i915(crtc_state->uapi.crtc->dev); 7907 struct intel_atomic_state *state = 7908 to_intel_atomic_state(crtc_state->uapi.state); 7909 7910 crtc_state->ips_enabled = false; 7911 7912 if (!hsw_crtc_state_ips_capable(crtc_state)) 7913 return 0; 7914 7915 /* 7916 * When IPS gets enabled, the pipe CRC changes. Since IPS gets 7917 * enabled and disabled dynamically based on package C states, 7918 * user space can't make reliable use of the CRCs, so let's just 7919 * completely disable it. 7920 */ 7921 if (crtc_state->crc_enabled) 7922 return 0; 7923 7924 /* IPS should be fine as long as at least one plane is enabled. */ 7925 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))) 7926 return 0; 7927 7928 if (IS_BROADWELL(dev_priv)) { 7929 const struct intel_cdclk_state *cdclk_state; 7930 7931 cdclk_state = intel_atomic_get_cdclk_state(state); 7932 if (IS_ERR(cdclk_state)) 7933 return PTR_ERR(cdclk_state); 7934 7935 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 7936 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100) 7937 return 0; 7938 } 7939 7940 crtc_state->ips_enabled = true; 7941 7942 return 0; 7943 } 7944 7945 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 7946 { 7947 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7948 7949 /* GDG double wide on either pipe, otherwise pipe A only */ 7950 return INTEL_GEN(dev_priv) < 4 && 7951 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 7952 } 7953 7954 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) 7955 { 7956 u32 pixel_rate = crtc_state->hw.adjusted_mode.crtc_clock; 7957 unsigned int pipe_w, pipe_h, pfit_w, pfit_h; 7958 7959 /* 7960 * We only use IF-ID interlacing. If we ever use 7961 * PF-ID we'll need to adjust the pixel_rate here. 7962 */ 7963 7964 if (!crtc_state->pch_pfit.enabled) 7965 return pixel_rate; 7966 7967 pipe_w = crtc_state->pipe_src_w; 7968 pipe_h = crtc_state->pipe_src_h; 7969 7970 pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst); 7971 pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst); 7972 7973 if (pipe_w < pfit_w) 7974 pipe_w = pfit_w; 7975 if (pipe_h < pfit_h) 7976 pipe_h = pfit_h; 7977 7978 if (drm_WARN_ON(crtc_state->uapi.crtc->dev, 7979 !pfit_w || !pfit_h)) 7980 return pixel_rate; 7981 7982 return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h), 7983 pfit_w * pfit_h); 7984 } 7985 7986 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 7987 { 7988 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 7989 7990 if (HAS_GMCH(dev_priv)) 7991 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 7992 crtc_state->pixel_rate = 7993 crtc_state->hw.adjusted_mode.crtc_clock; 7994 else 7995 crtc_state->pixel_rate = 7996 ilk_pipe_pixel_rate(crtc_state); 7997 } 7998 7999 static int intel_crtc_compute_config(struct intel_crtc *crtc, 8000 struct intel_crtc_state *pipe_config) 8001 { 8002 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8003 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 8004 int clock_limit = dev_priv->max_dotclk_freq; 8005 8006 if (INTEL_GEN(dev_priv) < 4) { 8007 clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 8008 8009 /* 8010 * Enable double wide mode when the dot clock 8011 * is > 90% of the (display) core speed. 8012 */ 8013 if (intel_crtc_supports_double_wide(crtc) && 8014 adjusted_mode->crtc_clock > clock_limit) { 8015 clock_limit = dev_priv->max_dotclk_freq; 8016 pipe_config->double_wide = true; 8017 } 8018 } 8019 8020 if (adjusted_mode->crtc_clock > clock_limit) { 8021 drm_dbg_kms(&dev_priv->drm, 8022 "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 8023 adjusted_mode->crtc_clock, clock_limit, 8024 yesno(pipe_config->double_wide)); 8025 return -EINVAL; 8026 } 8027 8028 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 8029 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) && 8030 pipe_config->hw.ctm) { 8031 /* 8032 * There is only one pipe CSC unit per pipe, and we need that 8033 * for output conversion from RGB->YCBCR. So if CTM is already 8034 * applied we can't support YCBCR420 output. 8035 */ 8036 drm_dbg_kms(&dev_priv->drm, 8037 "YCBCR420 and CTM together are not possible\n"); 8038 return -EINVAL; 8039 } 8040 8041 /* 8042 * Pipe horizontal size must be even in: 8043 * - DVO ganged mode 8044 * - LVDS dual channel mode 8045 * - Double wide pipe 8046 */ 8047 if (pipe_config->pipe_src_w & 1) { 8048 if (pipe_config->double_wide) { 8049 drm_dbg_kms(&dev_priv->drm, 8050 "Odd pipe source width not supported with double wide pipe\n"); 8051 return -EINVAL; 8052 } 8053 8054 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && 8055 intel_is_dual_link_lvds(dev_priv)) { 8056 drm_dbg_kms(&dev_priv->drm, 8057 "Odd pipe source width not supported with dual link LVDS\n"); 8058 return -EINVAL; 8059 } 8060 } 8061 8062 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 8063 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 8064 */ 8065 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) && 8066 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay) 8067 return -EINVAL; 8068 8069 intel_crtc_compute_pixel_rate(pipe_config); 8070 8071 if (pipe_config->has_pch_encoder) 8072 return ilk_fdi_compute_config(crtc, pipe_config); 8073 8074 return 0; 8075 } 8076 8077 static void 8078 intel_reduce_m_n_ratio(u32 *num, u32 *den) 8079 { 8080 while (*num > DATA_LINK_M_N_MASK || 8081 *den > DATA_LINK_M_N_MASK) { 8082 *num >>= 1; 8083 *den >>= 1; 8084 } 8085 } 8086 8087 static void compute_m_n(unsigned int m, unsigned int n, 8088 u32 *ret_m, u32 *ret_n, 8089 bool constant_n) 8090 { 8091 /* 8092 * Several DP dongles in particular seem to be fussy about 8093 * too large link M/N values. Give N value as 0x8000 that 8094 * should be acceptable by specific devices. 0x8000 is the 8095 * specified fixed N value for asynchronous clock mode, 8096 * which the devices expect also in synchronous clock mode. 8097 */ 8098 if (constant_n) 8099 *ret_n = 0x8000; 8100 else 8101 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 8102 8103 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 8104 intel_reduce_m_n_ratio(ret_m, ret_n); 8105 } 8106 8107 void 8108 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, 8109 int pixel_clock, int link_clock, 8110 struct intel_link_m_n *m_n, 8111 bool constant_n, bool fec_enable) 8112 { 8113 u32 data_clock = bits_per_pixel * pixel_clock; 8114 8115 if (fec_enable) 8116 data_clock = intel_dp_mode_to_fec_clock(data_clock); 8117 8118 m_n->tu = 64; 8119 compute_m_n(data_clock, 8120 link_clock * nlanes * 8, 8121 &m_n->gmch_m, &m_n->gmch_n, 8122 constant_n); 8123 8124 compute_m_n(pixel_clock, link_clock, 8125 &m_n->link_m, &m_n->link_n, 8126 constant_n); 8127 } 8128 8129 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) 8130 { 8131 /* 8132 * There may be no VBT; and if the BIOS enabled SSC we can 8133 * just keep using it to avoid unnecessary flicker. Whereas if the 8134 * BIOS isn't using it, don't assume it will work even if the VBT 8135 * indicates as much. 8136 */ 8137 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 8138 bool bios_lvds_use_ssc = intel_de_read(dev_priv, 8139 PCH_DREF_CONTROL) & 8140 DREF_SSC1_ENABLE; 8141 8142 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 8143 drm_dbg_kms(&dev_priv->drm, 8144 "SSC %s by BIOS, overriding VBT which says %s\n", 8145 enableddisabled(bios_lvds_use_ssc), 8146 enableddisabled(dev_priv->vbt.lvds_use_ssc)); 8147 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 8148 } 8149 } 8150 } 8151 8152 static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 8153 { 8154 if (i915_modparams.panel_use_ssc >= 0) 8155 return i915_modparams.panel_use_ssc != 0; 8156 return dev_priv->vbt.lvds_use_ssc 8157 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 8158 } 8159 8160 static u32 pnv_dpll_compute_fp(struct dpll *dpll) 8161 { 8162 return (1 << dpll->n) << 16 | dpll->m2; 8163 } 8164 8165 static u32 i9xx_dpll_compute_fp(struct dpll *dpll) 8166 { 8167 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 8168 } 8169 8170 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 8171 struct intel_crtc_state *crtc_state, 8172 struct dpll *reduced_clock) 8173 { 8174 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8175 u32 fp, fp2 = 0; 8176 8177 if (IS_PINEVIEW(dev_priv)) { 8178 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 8179 if (reduced_clock) 8180 fp2 = pnv_dpll_compute_fp(reduced_clock); 8181 } else { 8182 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 8183 if (reduced_clock) 8184 fp2 = i9xx_dpll_compute_fp(reduced_clock); 8185 } 8186 8187 crtc_state->dpll_hw_state.fp0 = fp; 8188 8189 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8190 reduced_clock) { 8191 crtc_state->dpll_hw_state.fp1 = fp2; 8192 } else { 8193 crtc_state->dpll_hw_state.fp1 = fp; 8194 } 8195 } 8196 8197 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe 8198 pipe) 8199 { 8200 u32 reg_val; 8201 8202 /* 8203 * PLLB opamp always calibrates to max value of 0x3f, force enable it 8204 * and set it to a reasonable value instead. 8205 */ 8206 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 8207 reg_val &= 0xffffff00; 8208 reg_val |= 0x00000030; 8209 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 8210 8211 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 8212 reg_val &= 0x00ffffff; 8213 reg_val |= 0x8c000000; 8214 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 8215 8216 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 8217 reg_val &= 0xffffff00; 8218 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 8219 8220 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 8221 reg_val &= 0x00ffffff; 8222 reg_val |= 0xb0000000; 8223 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 8224 } 8225 8226 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 8227 const struct intel_link_m_n *m_n) 8228 { 8229 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8230 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8231 enum pipe pipe = crtc->pipe; 8232 8233 intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe), 8234 TU_SIZE(m_n->tu) | m_n->gmch_m); 8235 intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 8236 intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m); 8237 intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n); 8238 } 8239 8240 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, 8241 enum transcoder transcoder) 8242 { 8243 if (IS_HASWELL(dev_priv)) 8244 return transcoder == TRANSCODER_EDP; 8245 8246 /* 8247 * Strictly speaking some registers are available before 8248 * gen7, but we only support DRRS on gen7+ 8249 */ 8250 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv); 8251 } 8252 8253 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, 8254 const struct intel_link_m_n *m_n, 8255 const struct intel_link_m_n *m2_n2) 8256 { 8257 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8258 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8259 enum pipe pipe = crtc->pipe; 8260 enum transcoder transcoder = crtc_state->cpu_transcoder; 8261 8262 if (INTEL_GEN(dev_priv) >= 5) { 8263 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder), 8264 TU_SIZE(m_n->tu) | m_n->gmch_m); 8265 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder), 8266 m_n->gmch_n); 8267 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder), 8268 m_n->link_m); 8269 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder), 8270 m_n->link_n); 8271 /* 8272 * M2_N2 registers are set only if DRRS is supported 8273 * (to make sure the registers are not unnecessarily accessed). 8274 */ 8275 if (m2_n2 && crtc_state->has_drrs && 8276 transcoder_has_m2_n2(dev_priv, transcoder)) { 8277 intel_de_write(dev_priv, PIPE_DATA_M2(transcoder), 8278 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 8279 intel_de_write(dev_priv, PIPE_DATA_N2(transcoder), 8280 m2_n2->gmch_n); 8281 intel_de_write(dev_priv, PIPE_LINK_M2(transcoder), 8282 m2_n2->link_m); 8283 intel_de_write(dev_priv, PIPE_LINK_N2(transcoder), 8284 m2_n2->link_n); 8285 } 8286 } else { 8287 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe), 8288 TU_SIZE(m_n->tu) | m_n->gmch_m); 8289 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 8290 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m); 8291 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n); 8292 } 8293 } 8294 8295 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n) 8296 { 8297 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 8298 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 8299 8300 if (m_n == M1_N1) { 8301 dp_m_n = &crtc_state->dp_m_n; 8302 dp_m2_n2 = &crtc_state->dp_m2_n2; 8303 } else if (m_n == M2_N2) { 8304 8305 /* 8306 * M2_N2 registers are not supported. Hence m2_n2 divider value 8307 * needs to be programmed into M1_N1. 8308 */ 8309 dp_m_n = &crtc_state->dp_m2_n2; 8310 } else { 8311 drm_err(&i915->drm, "Unsupported divider value\n"); 8312 return; 8313 } 8314 8315 if (crtc_state->has_pch_encoder) 8316 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n); 8317 else 8318 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2); 8319 } 8320 8321 static void vlv_compute_dpll(struct intel_crtc *crtc, 8322 struct intel_crtc_state *pipe_config) 8323 { 8324 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | 8325 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 8326 if (crtc->pipe != PIPE_A) 8327 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 8328 8329 /* DPLL not used with DSI, but still need the rest set up */ 8330 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 8331 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | 8332 DPLL_EXT_BUFFER_ENABLE_VLV; 8333 8334 pipe_config->dpll_hw_state.dpll_md = 8335 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 8336 } 8337 8338 static void chv_compute_dpll(struct intel_crtc *crtc, 8339 struct intel_crtc_state *pipe_config) 8340 { 8341 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | 8342 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 8343 if (crtc->pipe != PIPE_A) 8344 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 8345 8346 /* DPLL not used with DSI, but still need the rest set up */ 8347 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 8348 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; 8349 8350 pipe_config->dpll_hw_state.dpll_md = 8351 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 8352 } 8353 8354 static void vlv_prepare_pll(struct intel_crtc *crtc, 8355 const struct intel_crtc_state *pipe_config) 8356 { 8357 struct drm_device *dev = crtc->base.dev; 8358 struct drm_i915_private *dev_priv = to_i915(dev); 8359 enum pipe pipe = crtc->pipe; 8360 u32 mdiv; 8361 u32 bestn, bestm1, bestm2, bestp1, bestp2; 8362 u32 coreclk, reg_val; 8363 8364 /* Enable Refclk */ 8365 intel_de_write(dev_priv, DPLL(pipe), 8366 pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); 8367 8368 /* No need to actually set up the DPLL with DSI */ 8369 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8370 return; 8371 8372 vlv_dpio_get(dev_priv); 8373 8374 bestn = pipe_config->dpll.n; 8375 bestm1 = pipe_config->dpll.m1; 8376 bestm2 = pipe_config->dpll.m2; 8377 bestp1 = pipe_config->dpll.p1; 8378 bestp2 = pipe_config->dpll.p2; 8379 8380 /* See eDP HDMI DPIO driver vbios notes doc */ 8381 8382 /* PLL B needs special handling */ 8383 if (pipe == PIPE_B) 8384 vlv_pllb_recal_opamp(dev_priv, pipe); 8385 8386 /* Set up Tx target for periodic Rcomp update */ 8387 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 8388 8389 /* Disable target IRef on PLL */ 8390 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 8391 reg_val &= 0x00ffffff; 8392 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 8393 8394 /* Disable fast lock */ 8395 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 8396 8397 /* Set idtafcrecal before PLL is enabled */ 8398 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 8399 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 8400 mdiv |= ((bestn << DPIO_N_SHIFT)); 8401 mdiv |= (1 << DPIO_K_SHIFT); 8402 8403 /* 8404 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 8405 * but we don't support that). 8406 * Note: don't use the DAC post divider as it seems unstable. 8407 */ 8408 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 8409 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 8410 8411 mdiv |= DPIO_ENABLE_CALIBRATION; 8412 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 8413 8414 /* Set HBR and RBR LPF coefficients */ 8415 if (pipe_config->port_clock == 162000 || 8416 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) || 8417 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) 8418 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 8419 0x009f0003); 8420 else 8421 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 8422 0x00d0000f); 8423 8424 if (intel_crtc_has_dp_encoder(pipe_config)) { 8425 /* Use SSC source */ 8426 if (pipe == PIPE_A) 8427 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 8428 0x0df40000); 8429 else 8430 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 8431 0x0df70000); 8432 } else { /* HDMI or VGA */ 8433 /* Use bend source */ 8434 if (pipe == PIPE_A) 8435 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 8436 0x0df70000); 8437 else 8438 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 8439 0x0df40000); 8440 } 8441 8442 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 8443 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 8444 if (intel_crtc_has_dp_encoder(pipe_config)) 8445 coreclk |= 0x01000000; 8446 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 8447 8448 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 8449 8450 vlv_dpio_put(dev_priv); 8451 } 8452 8453 static void chv_prepare_pll(struct intel_crtc *crtc, 8454 const struct intel_crtc_state *pipe_config) 8455 { 8456 struct drm_device *dev = crtc->base.dev; 8457 struct drm_i915_private *dev_priv = to_i915(dev); 8458 enum pipe pipe = crtc->pipe; 8459 enum dpio_channel port = vlv_pipe_to_channel(pipe); 8460 u32 loopfilter, tribuf_calcntr; 8461 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 8462 u32 dpio_val; 8463 int vco; 8464 8465 /* Enable Refclk and SSC */ 8466 intel_de_write(dev_priv, DPLL(pipe), 8467 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 8468 8469 /* No need to actually set up the DPLL with DSI */ 8470 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8471 return; 8472 8473 bestn = pipe_config->dpll.n; 8474 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 8475 bestm1 = pipe_config->dpll.m1; 8476 bestm2 = pipe_config->dpll.m2 >> 22; 8477 bestp1 = pipe_config->dpll.p1; 8478 bestp2 = pipe_config->dpll.p2; 8479 vco = pipe_config->dpll.vco; 8480 dpio_val = 0; 8481 loopfilter = 0; 8482 8483 vlv_dpio_get(dev_priv); 8484 8485 /* p1 and p2 divider */ 8486 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 8487 5 << DPIO_CHV_S1_DIV_SHIFT | 8488 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 8489 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 8490 1 << DPIO_CHV_K_DIV_SHIFT); 8491 8492 /* Feedback post-divider - m2 */ 8493 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 8494 8495 /* Feedback refclk divider - n and m1 */ 8496 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 8497 DPIO_CHV_M1_DIV_BY_2 | 8498 1 << DPIO_CHV_N_DIV_SHIFT); 8499 8500 /* M2 fraction division */ 8501 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 8502 8503 /* M2 fraction division enable */ 8504 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 8505 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 8506 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 8507 if (bestm2_frac) 8508 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 8509 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 8510 8511 /* Program digital lock detect threshold */ 8512 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 8513 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 8514 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 8515 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 8516 if (!bestm2_frac) 8517 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 8518 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 8519 8520 /* Loop filter */ 8521 if (vco == 5400000) { 8522 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 8523 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 8524 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 8525 tribuf_calcntr = 0x9; 8526 } else if (vco <= 6200000) { 8527 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 8528 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 8529 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 8530 tribuf_calcntr = 0x9; 8531 } else if (vco <= 6480000) { 8532 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 8533 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 8534 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 8535 tribuf_calcntr = 0x8; 8536 } else { 8537 /* Not supported. Apply the same limits as in the max case */ 8538 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 8539 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 8540 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 8541 tribuf_calcntr = 0; 8542 } 8543 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 8544 8545 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 8546 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 8547 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 8548 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 8549 8550 /* AFC Recal */ 8551 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 8552 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 8553 DPIO_AFC_RECAL); 8554 8555 vlv_dpio_put(dev_priv); 8556 } 8557 8558 /** 8559 * vlv_force_pll_on - forcibly enable just the PLL 8560 * @dev_priv: i915 private structure 8561 * @pipe: pipe PLL to enable 8562 * @dpll: PLL configuration 8563 * 8564 * Enable the PLL for @pipe using the supplied @dpll config. To be used 8565 * in cases where we need the PLL enabled even when @pipe is not going to 8566 * be enabled. 8567 */ 8568 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, 8569 const struct dpll *dpll) 8570 { 8571 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 8572 struct intel_crtc_state *pipe_config; 8573 8574 pipe_config = intel_crtc_state_alloc(crtc); 8575 if (!pipe_config) 8576 return -ENOMEM; 8577 8578 pipe_config->cpu_transcoder = (enum transcoder)pipe; 8579 pipe_config->pixel_multiplier = 1; 8580 pipe_config->dpll = *dpll; 8581 8582 if (IS_CHERRYVIEW(dev_priv)) { 8583 chv_compute_dpll(crtc, pipe_config); 8584 chv_prepare_pll(crtc, pipe_config); 8585 chv_enable_pll(crtc, pipe_config); 8586 } else { 8587 vlv_compute_dpll(crtc, pipe_config); 8588 vlv_prepare_pll(crtc, pipe_config); 8589 vlv_enable_pll(crtc, pipe_config); 8590 } 8591 8592 kfree(pipe_config); 8593 8594 return 0; 8595 } 8596 8597 /** 8598 * vlv_force_pll_off - forcibly disable just the PLL 8599 * @dev_priv: i915 private structure 8600 * @pipe: pipe PLL to disable 8601 * 8602 * Disable the PLL for @pipe. To be used in cases where we need 8603 * the PLL enabled even when @pipe is not going to be enabled. 8604 */ 8605 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) 8606 { 8607 if (IS_CHERRYVIEW(dev_priv)) 8608 chv_disable_pll(dev_priv, pipe); 8609 else 8610 vlv_disable_pll(dev_priv, pipe); 8611 } 8612 8613 static void i9xx_compute_dpll(struct intel_crtc *crtc, 8614 struct intel_crtc_state *crtc_state, 8615 struct dpll *reduced_clock) 8616 { 8617 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8618 u32 dpll; 8619 struct dpll *clock = &crtc_state->dpll; 8620 8621 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 8622 8623 dpll = DPLL_VGA_MODE_DIS; 8624 8625 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 8626 dpll |= DPLLB_MODE_LVDS; 8627 else 8628 dpll |= DPLLB_MODE_DAC_SERIAL; 8629 8630 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 8631 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 8632 dpll |= (crtc_state->pixel_multiplier - 1) 8633 << SDVO_MULTIPLIER_SHIFT_HIRES; 8634 } 8635 8636 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 8637 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 8638 dpll |= DPLL_SDVO_HIGH_SPEED; 8639 8640 if (intel_crtc_has_dp_encoder(crtc_state)) 8641 dpll |= DPLL_SDVO_HIGH_SPEED; 8642 8643 /* compute bitmask from p1 value */ 8644 if (IS_PINEVIEW(dev_priv)) 8645 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 8646 else { 8647 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8648 if (IS_G4X(dev_priv) && reduced_clock) 8649 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 8650 } 8651 switch (clock->p2) { 8652 case 5: 8653 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 8654 break; 8655 case 7: 8656 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 8657 break; 8658 case 10: 8659 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 8660 break; 8661 case 14: 8662 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 8663 break; 8664 } 8665 if (INTEL_GEN(dev_priv) >= 4) 8666 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 8667 8668 if (crtc_state->sdvo_tv_clock) 8669 dpll |= PLL_REF_INPUT_TVCLKINBC; 8670 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8671 intel_panel_use_ssc(dev_priv)) 8672 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8673 else 8674 dpll |= PLL_REF_INPUT_DREFCLK; 8675 8676 dpll |= DPLL_VCO_ENABLE; 8677 crtc_state->dpll_hw_state.dpll = dpll; 8678 8679 if (INTEL_GEN(dev_priv) >= 4) { 8680 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 8681 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 8682 crtc_state->dpll_hw_state.dpll_md = dpll_md; 8683 } 8684 } 8685 8686 static void i8xx_compute_dpll(struct intel_crtc *crtc, 8687 struct intel_crtc_state *crtc_state, 8688 struct dpll *reduced_clock) 8689 { 8690 struct drm_device *dev = crtc->base.dev; 8691 struct drm_i915_private *dev_priv = to_i915(dev); 8692 u32 dpll; 8693 struct dpll *clock = &crtc_state->dpll; 8694 8695 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 8696 8697 dpll = DPLL_VGA_MODE_DIS; 8698 8699 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8700 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8701 } else { 8702 if (clock->p1 == 2) 8703 dpll |= PLL_P1_DIVIDE_BY_TWO; 8704 else 8705 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8706 if (clock->p2 == 4) 8707 dpll |= PLL_P2_DIVIDE_BY_4; 8708 } 8709 8710 /* 8711 * Bspec: 8712 * "[Almador Errata}: For the correct operation of the muxed DVO pins 8713 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data, 8714 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock 8715 * Enable) must be set to “1” in both the DPLL A Control Register 8716 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)." 8717 * 8718 * For simplicity We simply keep both bits always enabled in 8719 * both DPLLS. The spec says we should disable the DVO 2X clock 8720 * when not needed, but this seems to work fine in practice. 8721 */ 8722 if (IS_I830(dev_priv) || 8723 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) 8724 dpll |= DPLL_DVO_2X_MODE; 8725 8726 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8727 intel_panel_use_ssc(dev_priv)) 8728 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8729 else 8730 dpll |= PLL_REF_INPUT_DREFCLK; 8731 8732 dpll |= DPLL_VCO_ENABLE; 8733 crtc_state->dpll_hw_state.dpll = dpll; 8734 } 8735 8736 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state) 8737 { 8738 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8739 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8740 enum pipe pipe = crtc->pipe; 8741 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 8742 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 8743 u32 crtc_vtotal, crtc_vblank_end; 8744 int vsyncshift = 0; 8745 8746 /* We need to be careful not to changed the adjusted mode, for otherwise 8747 * the hw state checker will get angry at the mismatch. */ 8748 crtc_vtotal = adjusted_mode->crtc_vtotal; 8749 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 8750 8751 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 8752 /* the chip adds 2 halflines automatically */ 8753 crtc_vtotal -= 1; 8754 crtc_vblank_end -= 1; 8755 8756 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 8757 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 8758 else 8759 vsyncshift = adjusted_mode->crtc_hsync_start - 8760 adjusted_mode->crtc_htotal / 2; 8761 if (vsyncshift < 0) 8762 vsyncshift += adjusted_mode->crtc_htotal; 8763 } 8764 8765 if (INTEL_GEN(dev_priv) > 3) 8766 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder), 8767 vsyncshift); 8768 8769 intel_de_write(dev_priv, HTOTAL(cpu_transcoder), 8770 (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); 8771 intel_de_write(dev_priv, HBLANK(cpu_transcoder), 8772 (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); 8773 intel_de_write(dev_priv, HSYNC(cpu_transcoder), 8774 (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); 8775 8776 intel_de_write(dev_priv, VTOTAL(cpu_transcoder), 8777 (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16)); 8778 intel_de_write(dev_priv, VBLANK(cpu_transcoder), 8779 (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16)); 8780 intel_de_write(dev_priv, VSYNC(cpu_transcoder), 8781 (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); 8782 8783 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 8784 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 8785 * documented on the DDI_FUNC_CTL register description, EDP Input Select 8786 * bits. */ 8787 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 8788 (pipe == PIPE_B || pipe == PIPE_C)) 8789 intel_de_write(dev_priv, VTOTAL(pipe), 8790 intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); 8791 8792 } 8793 8794 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 8795 { 8796 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8797 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8798 enum pipe pipe = crtc->pipe; 8799 8800 /* pipesrc controls the size that is scaled from, which should 8801 * always be the user's requested size. 8802 */ 8803 intel_de_write(dev_priv, PIPESRC(pipe), 8804 ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1)); 8805 } 8806 8807 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) 8808 { 8809 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 8810 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 8811 8812 if (IS_GEN(dev_priv, 2)) 8813 return false; 8814 8815 if (INTEL_GEN(dev_priv) >= 9 || 8816 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 8817 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW; 8818 else 8819 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK; 8820 } 8821 8822 static void intel_get_pipe_timings(struct intel_crtc *crtc, 8823 struct intel_crtc_state *pipe_config) 8824 { 8825 struct drm_device *dev = crtc->base.dev; 8826 struct drm_i915_private *dev_priv = to_i915(dev); 8827 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 8828 u32 tmp; 8829 8830 tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder)); 8831 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 8832 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 8833 8834 if (!transcoder_is_dsi(cpu_transcoder)) { 8835 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder)); 8836 pipe_config->hw.adjusted_mode.crtc_hblank_start = 8837 (tmp & 0xffff) + 1; 8838 pipe_config->hw.adjusted_mode.crtc_hblank_end = 8839 ((tmp >> 16) & 0xffff) + 1; 8840 } 8841 tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder)); 8842 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 8843 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 8844 8845 tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder)); 8846 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 8847 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 8848 8849 if (!transcoder_is_dsi(cpu_transcoder)) { 8850 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder)); 8851 pipe_config->hw.adjusted_mode.crtc_vblank_start = 8852 (tmp & 0xffff) + 1; 8853 pipe_config->hw.adjusted_mode.crtc_vblank_end = 8854 ((tmp >> 16) & 0xffff) + 1; 8855 } 8856 tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder)); 8857 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 8858 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 8859 8860 if (intel_pipe_is_interlaced(pipe_config)) { 8861 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 8862 pipe_config->hw.adjusted_mode.crtc_vtotal += 1; 8863 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1; 8864 } 8865 } 8866 8867 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 8868 struct intel_crtc_state *pipe_config) 8869 { 8870 struct drm_device *dev = crtc->base.dev; 8871 struct drm_i915_private *dev_priv = to_i915(dev); 8872 u32 tmp; 8873 8874 tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe)); 8875 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 8876 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 8877 8878 pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h; 8879 pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w; 8880 } 8881 8882 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 8883 struct intel_crtc_state *pipe_config) 8884 { 8885 mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay; 8886 mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal; 8887 mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start; 8888 mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end; 8889 8890 mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay; 8891 mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal; 8892 mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start; 8893 mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end; 8894 8895 mode->flags = pipe_config->hw.adjusted_mode.flags; 8896 mode->type = DRM_MODE_TYPE_DRIVER; 8897 8898 mode->clock = pipe_config->hw.adjusted_mode.crtc_clock; 8899 8900 mode->vrefresh = drm_mode_vrefresh(mode); 8901 drm_mode_set_name(mode); 8902 } 8903 8904 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 8905 { 8906 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 8907 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8908 u32 pipeconf; 8909 8910 pipeconf = 0; 8911 8912 /* we keep both pipes enabled on 830 */ 8913 if (IS_I830(dev_priv)) 8914 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE; 8915 8916 if (crtc_state->double_wide) 8917 pipeconf |= PIPECONF_DOUBLE_WIDE; 8918 8919 /* only g4x and later have fancy bpc/dither controls */ 8920 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 8921 IS_CHERRYVIEW(dev_priv)) { 8922 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 8923 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 8924 pipeconf |= PIPECONF_DITHER_EN | 8925 PIPECONF_DITHER_TYPE_SP; 8926 8927 switch (crtc_state->pipe_bpp) { 8928 case 18: 8929 pipeconf |= PIPECONF_6BPC; 8930 break; 8931 case 24: 8932 pipeconf |= PIPECONF_8BPC; 8933 break; 8934 case 30: 8935 pipeconf |= PIPECONF_10BPC; 8936 break; 8937 default: 8938 /* Case prevented by intel_choose_pipe_bpp_dither. */ 8939 BUG(); 8940 } 8941 } 8942 8943 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 8944 if (INTEL_GEN(dev_priv) < 4 || 8945 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 8946 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 8947 else 8948 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 8949 } else { 8950 pipeconf |= PIPECONF_PROGRESSIVE; 8951 } 8952 8953 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 8954 crtc_state->limited_color_range) 8955 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 8956 8957 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 8958 8959 pipeconf |= PIPECONF_FRAME_START_DELAY(0); 8960 8961 intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf); 8962 intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe)); 8963 } 8964 8965 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, 8966 struct intel_crtc_state *crtc_state) 8967 { 8968 struct drm_device *dev = crtc->base.dev; 8969 struct drm_i915_private *dev_priv = to_i915(dev); 8970 const struct intel_limit *limit; 8971 int refclk = 48000; 8972 8973 memset(&crtc_state->dpll_hw_state, 0, 8974 sizeof(crtc_state->dpll_hw_state)); 8975 8976 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8977 if (intel_panel_use_ssc(dev_priv)) { 8978 refclk = dev_priv->vbt.lvds_ssc_freq; 8979 drm_dbg_kms(&dev_priv->drm, 8980 "using SSC reference clock of %d kHz\n", 8981 refclk); 8982 } 8983 8984 limit = &intel_limits_i8xx_lvds; 8985 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) { 8986 limit = &intel_limits_i8xx_dvo; 8987 } else { 8988 limit = &intel_limits_i8xx_dac; 8989 } 8990 8991 if (!crtc_state->clock_set && 8992 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8993 refclk, NULL, &crtc_state->dpll)) { 8994 drm_err(&dev_priv->drm, 8995 "Couldn't find PLL settings for mode!\n"); 8996 return -EINVAL; 8997 } 8998 8999 i8xx_compute_dpll(crtc, crtc_state, NULL); 9000 9001 return 0; 9002 } 9003 9004 static int g4x_crtc_compute_clock(struct intel_crtc *crtc, 9005 struct intel_crtc_state *crtc_state) 9006 { 9007 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9008 const struct intel_limit *limit; 9009 int refclk = 96000; 9010 9011 memset(&crtc_state->dpll_hw_state, 0, 9012 sizeof(crtc_state->dpll_hw_state)); 9013 9014 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9015 if (intel_panel_use_ssc(dev_priv)) { 9016 refclk = dev_priv->vbt.lvds_ssc_freq; 9017 drm_dbg_kms(&dev_priv->drm, 9018 "using SSC reference clock of %d kHz\n", 9019 refclk); 9020 } 9021 9022 if (intel_is_dual_link_lvds(dev_priv)) 9023 limit = &intel_limits_g4x_dual_channel_lvds; 9024 else 9025 limit = &intel_limits_g4x_single_channel_lvds; 9026 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || 9027 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 9028 limit = &intel_limits_g4x_hdmi; 9029 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) { 9030 limit = &intel_limits_g4x_sdvo; 9031 } else { 9032 /* The option is for other outputs */ 9033 limit = &intel_limits_i9xx_sdvo; 9034 } 9035 9036 if (!crtc_state->clock_set && 9037 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9038 refclk, NULL, &crtc_state->dpll)) { 9039 drm_err(&dev_priv->drm, 9040 "Couldn't find PLL settings for mode!\n"); 9041 return -EINVAL; 9042 } 9043 9044 i9xx_compute_dpll(crtc, crtc_state, NULL); 9045 9046 return 0; 9047 } 9048 9049 static int pnv_crtc_compute_clock(struct intel_crtc *crtc, 9050 struct intel_crtc_state *crtc_state) 9051 { 9052 struct drm_device *dev = crtc->base.dev; 9053 struct drm_i915_private *dev_priv = to_i915(dev); 9054 const struct intel_limit *limit; 9055 int refclk = 96000; 9056 9057 memset(&crtc_state->dpll_hw_state, 0, 9058 sizeof(crtc_state->dpll_hw_state)); 9059 9060 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9061 if (intel_panel_use_ssc(dev_priv)) { 9062 refclk = dev_priv->vbt.lvds_ssc_freq; 9063 drm_dbg_kms(&dev_priv->drm, 9064 "using SSC reference clock of %d kHz\n", 9065 refclk); 9066 } 9067 9068 limit = &pnv_limits_lvds; 9069 } else { 9070 limit = &pnv_limits_sdvo; 9071 } 9072 9073 if (!crtc_state->clock_set && 9074 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9075 refclk, NULL, &crtc_state->dpll)) { 9076 drm_err(&dev_priv->drm, 9077 "Couldn't find PLL settings for mode!\n"); 9078 return -EINVAL; 9079 } 9080 9081 i9xx_compute_dpll(crtc, crtc_state, NULL); 9082 9083 return 0; 9084 } 9085 9086 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 9087 struct intel_crtc_state *crtc_state) 9088 { 9089 struct drm_device *dev = crtc->base.dev; 9090 struct drm_i915_private *dev_priv = to_i915(dev); 9091 const struct intel_limit *limit; 9092 int refclk = 96000; 9093 9094 memset(&crtc_state->dpll_hw_state, 0, 9095 sizeof(crtc_state->dpll_hw_state)); 9096 9097 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9098 if (intel_panel_use_ssc(dev_priv)) { 9099 refclk = dev_priv->vbt.lvds_ssc_freq; 9100 drm_dbg_kms(&dev_priv->drm, 9101 "using SSC reference clock of %d kHz\n", 9102 refclk); 9103 } 9104 9105 limit = &intel_limits_i9xx_lvds; 9106 } else { 9107 limit = &intel_limits_i9xx_sdvo; 9108 } 9109 9110 if (!crtc_state->clock_set && 9111 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9112 refclk, NULL, &crtc_state->dpll)) { 9113 drm_err(&dev_priv->drm, 9114 "Couldn't find PLL settings for mode!\n"); 9115 return -EINVAL; 9116 } 9117 9118 i9xx_compute_dpll(crtc, crtc_state, NULL); 9119 9120 return 0; 9121 } 9122 9123 static int chv_crtc_compute_clock(struct intel_crtc *crtc, 9124 struct intel_crtc_state *crtc_state) 9125 { 9126 int refclk = 100000; 9127 const struct intel_limit *limit = &intel_limits_chv; 9128 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 9129 9130 memset(&crtc_state->dpll_hw_state, 0, 9131 sizeof(crtc_state->dpll_hw_state)); 9132 9133 if (!crtc_state->clock_set && 9134 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9135 refclk, NULL, &crtc_state->dpll)) { 9136 drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n"); 9137 return -EINVAL; 9138 } 9139 9140 chv_compute_dpll(crtc, crtc_state); 9141 9142 return 0; 9143 } 9144 9145 static int vlv_crtc_compute_clock(struct intel_crtc *crtc, 9146 struct intel_crtc_state *crtc_state) 9147 { 9148 int refclk = 100000; 9149 const struct intel_limit *limit = &intel_limits_vlv; 9150 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 9151 9152 memset(&crtc_state->dpll_hw_state, 0, 9153 sizeof(crtc_state->dpll_hw_state)); 9154 9155 if (!crtc_state->clock_set && 9156 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9157 refclk, NULL, &crtc_state->dpll)) { 9158 drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n"); 9159 return -EINVAL; 9160 } 9161 9162 vlv_compute_dpll(crtc, crtc_state); 9163 9164 return 0; 9165 } 9166 9167 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) 9168 { 9169 if (IS_I830(dev_priv)) 9170 return false; 9171 9172 return INTEL_GEN(dev_priv) >= 4 || 9173 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 9174 } 9175 9176 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state) 9177 { 9178 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 9179 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9180 u32 tmp; 9181 9182 if (!i9xx_has_pfit(dev_priv)) 9183 return; 9184 9185 tmp = intel_de_read(dev_priv, PFIT_CONTROL); 9186 if (!(tmp & PFIT_ENABLE)) 9187 return; 9188 9189 /* Check whether the pfit is attached to our pipe. */ 9190 if (INTEL_GEN(dev_priv) < 4) { 9191 if (crtc->pipe != PIPE_B) 9192 return; 9193 } else { 9194 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 9195 return; 9196 } 9197 9198 crtc_state->gmch_pfit.control = tmp; 9199 crtc_state->gmch_pfit.pgm_ratios = 9200 intel_de_read(dev_priv, PFIT_PGM_RATIOS); 9201 } 9202 9203 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 9204 struct intel_crtc_state *pipe_config) 9205 { 9206 struct drm_device *dev = crtc->base.dev; 9207 struct drm_i915_private *dev_priv = to_i915(dev); 9208 enum pipe pipe = crtc->pipe; 9209 struct dpll clock; 9210 u32 mdiv; 9211 int refclk = 100000; 9212 9213 /* In case of DSI, DPLL will not be used */ 9214 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 9215 return; 9216 9217 vlv_dpio_get(dev_priv); 9218 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 9219 vlv_dpio_put(dev_priv); 9220 9221 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 9222 clock.m2 = mdiv & DPIO_M2DIV_MASK; 9223 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 9224 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 9225 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 9226 9227 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 9228 } 9229 9230 static void 9231 i9xx_get_initial_plane_config(struct intel_crtc *crtc, 9232 struct intel_initial_plane_config *plane_config) 9233 { 9234 struct drm_device *dev = crtc->base.dev; 9235 struct drm_i915_private *dev_priv = to_i915(dev); 9236 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 9237 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 9238 enum pipe pipe; 9239 u32 val, base, offset; 9240 int fourcc, pixel_format; 9241 unsigned int aligned_height; 9242 struct drm_framebuffer *fb; 9243 struct intel_framebuffer *intel_fb; 9244 9245 if (!plane->get_hw_state(plane, &pipe)) 9246 return; 9247 9248 drm_WARN_ON(dev, pipe != crtc->pipe); 9249 9250 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9251 if (!intel_fb) { 9252 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n"); 9253 return; 9254 } 9255 9256 fb = &intel_fb->base; 9257 9258 fb->dev = dev; 9259 9260 val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); 9261 9262 if (INTEL_GEN(dev_priv) >= 4) { 9263 if (val & DISPPLANE_TILED) { 9264 plane_config->tiling = I915_TILING_X; 9265 fb->modifier = I915_FORMAT_MOD_X_TILED; 9266 } 9267 9268 if (val & DISPPLANE_ROTATE_180) 9269 plane_config->rotation = DRM_MODE_ROTATE_180; 9270 } 9271 9272 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B && 9273 val & DISPPLANE_MIRROR) 9274 plane_config->rotation |= DRM_MODE_REFLECT_X; 9275 9276 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 9277 fourcc = i9xx_format_to_fourcc(pixel_format); 9278 fb->format = drm_format_info(fourcc); 9279 9280 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 9281 offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane)); 9282 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000; 9283 } else if (INTEL_GEN(dev_priv) >= 4) { 9284 if (plane_config->tiling) 9285 offset = intel_de_read(dev_priv, 9286 DSPTILEOFF(i9xx_plane)); 9287 else 9288 offset = intel_de_read(dev_priv, 9289 DSPLINOFF(i9xx_plane)); 9290 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000; 9291 } else { 9292 base = intel_de_read(dev_priv, DSPADDR(i9xx_plane)); 9293 } 9294 plane_config->base = base; 9295 9296 val = intel_de_read(dev_priv, PIPESRC(pipe)); 9297 fb->width = ((val >> 16) & 0xfff) + 1; 9298 fb->height = ((val >> 0) & 0xfff) + 1; 9299 9300 val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane)); 9301 fb->pitches[0] = val & 0xffffffc0; 9302 9303 aligned_height = intel_fb_align_height(fb, 0, fb->height); 9304 9305 plane_config->size = fb->pitches[0] * aligned_height; 9306 9307 drm_dbg_kms(&dev_priv->drm, 9308 "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 9309 crtc->base.name, plane->base.name, fb->width, fb->height, 9310 fb->format->cpp[0] * 8, base, fb->pitches[0], 9311 plane_config->size); 9312 9313 plane_config->fb = intel_fb; 9314 } 9315 9316 static void chv_crtc_clock_get(struct intel_crtc *crtc, 9317 struct intel_crtc_state *pipe_config) 9318 { 9319 struct drm_device *dev = crtc->base.dev; 9320 struct drm_i915_private *dev_priv = to_i915(dev); 9321 enum pipe pipe = crtc->pipe; 9322 enum dpio_channel port = vlv_pipe_to_channel(pipe); 9323 struct dpll clock; 9324 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 9325 int refclk = 100000; 9326 9327 /* In case of DSI, DPLL will not be used */ 9328 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 9329 return; 9330 9331 vlv_dpio_get(dev_priv); 9332 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 9333 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 9334 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 9335 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 9336 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 9337 vlv_dpio_put(dev_priv); 9338 9339 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 9340 clock.m2 = (pll_dw0 & 0xff) << 22; 9341 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 9342 clock.m2 |= pll_dw2 & 0x3fffff; 9343 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 9344 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 9345 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 9346 9347 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 9348 } 9349 9350 static enum intel_output_format 9351 bdw_get_pipemisc_output_format(struct intel_crtc *crtc) 9352 { 9353 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9354 u32 tmp; 9355 9356 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); 9357 9358 if (tmp & PIPEMISC_YUV420_ENABLE) { 9359 /* We support 4:2:0 in full blend mode only */ 9360 drm_WARN_ON(&dev_priv->drm, 9361 (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0); 9362 9363 return INTEL_OUTPUT_FORMAT_YCBCR420; 9364 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { 9365 return INTEL_OUTPUT_FORMAT_YCBCR444; 9366 } else { 9367 return INTEL_OUTPUT_FORMAT_RGB; 9368 } 9369 } 9370 9371 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) 9372 { 9373 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 9374 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 9375 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9376 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 9377 u32 tmp; 9378 9379 tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); 9380 9381 if (tmp & DISPPLANE_GAMMA_ENABLE) 9382 crtc_state->gamma_enable = true; 9383 9384 if (!HAS_GMCH(dev_priv) && 9385 tmp & DISPPLANE_PIPE_CSC_ENABLE) 9386 crtc_state->csc_enable = true; 9387 } 9388 9389 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 9390 struct intel_crtc_state *pipe_config) 9391 { 9392 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9393 enum intel_display_power_domain power_domain; 9394 intel_wakeref_t wakeref; 9395 u32 tmp; 9396 bool ret; 9397 9398 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9399 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 9400 if (!wakeref) 9401 return false; 9402 9403 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 9404 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9405 pipe_config->shared_dpll = NULL; 9406 9407 ret = false; 9408 9409 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); 9410 if (!(tmp & PIPECONF_ENABLE)) 9411 goto out; 9412 9413 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 9414 IS_CHERRYVIEW(dev_priv)) { 9415 switch (tmp & PIPECONF_BPC_MASK) { 9416 case PIPECONF_6BPC: 9417 pipe_config->pipe_bpp = 18; 9418 break; 9419 case PIPECONF_8BPC: 9420 pipe_config->pipe_bpp = 24; 9421 break; 9422 case PIPECONF_10BPC: 9423 pipe_config->pipe_bpp = 30; 9424 break; 9425 default: 9426 break; 9427 } 9428 } 9429 9430 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 9431 (tmp & PIPECONF_COLOR_RANGE_SELECT)) 9432 pipe_config->limited_color_range = true; 9433 9434 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >> 9435 PIPECONF_GAMMA_MODE_SHIFT; 9436 9437 if (IS_CHERRYVIEW(dev_priv)) 9438 pipe_config->cgm_mode = intel_de_read(dev_priv, 9439 CGM_PIPE_MODE(crtc->pipe)); 9440 9441 i9xx_get_pipe_color_config(pipe_config); 9442 intel_color_get_config(pipe_config); 9443 9444 if (INTEL_GEN(dev_priv) < 4) 9445 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 9446 9447 intel_get_pipe_timings(crtc, pipe_config); 9448 intel_get_pipe_src_size(crtc, pipe_config); 9449 9450 i9xx_get_pfit_config(pipe_config); 9451 9452 if (INTEL_GEN(dev_priv) >= 4) { 9453 /* No way to read it out on pipes B and C */ 9454 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 9455 tmp = dev_priv->chv_dpll_md[crtc->pipe]; 9456 else 9457 tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe)); 9458 pipe_config->pixel_multiplier = 9459 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 9460 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 9461 pipe_config->dpll_hw_state.dpll_md = tmp; 9462 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 9463 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 9464 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe)); 9465 pipe_config->pixel_multiplier = 9466 ((tmp & SDVO_MULTIPLIER_MASK) 9467 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 9468 } else { 9469 /* Note that on i915G/GM the pixel multiplier is in the sdvo 9470 * port and will be fixed up in the encoder->get_config 9471 * function. */ 9472 pipe_config->pixel_multiplier = 1; 9473 } 9474 pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv, 9475 DPLL(crtc->pipe)); 9476 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 9477 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv, 9478 FP0(crtc->pipe)); 9479 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv, 9480 FP1(crtc->pipe)); 9481 } else { 9482 /* Mask out read-only status bits. */ 9483 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 9484 DPLL_PORTC_READY_MASK | 9485 DPLL_PORTB_READY_MASK); 9486 } 9487 9488 if (IS_CHERRYVIEW(dev_priv)) 9489 chv_crtc_clock_get(crtc, pipe_config); 9490 else if (IS_VALLEYVIEW(dev_priv)) 9491 vlv_crtc_clock_get(crtc, pipe_config); 9492 else 9493 i9xx_crtc_clock_get(crtc, pipe_config); 9494 9495 /* 9496 * Normally the dotclock is filled in by the encoder .get_config() 9497 * but in case the pipe is enabled w/o any ports we need a sane 9498 * default. 9499 */ 9500 pipe_config->hw.adjusted_mode.crtc_clock = 9501 pipe_config->port_clock / pipe_config->pixel_multiplier; 9502 9503 ret = true; 9504 9505 out: 9506 intel_display_power_put(dev_priv, power_domain, wakeref); 9507 9508 return ret; 9509 } 9510 9511 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) 9512 { 9513 struct intel_encoder *encoder; 9514 int i; 9515 u32 val, final; 9516 bool has_lvds = false; 9517 bool has_cpu_edp = false; 9518 bool has_panel = false; 9519 bool has_ck505 = false; 9520 bool can_ssc = false; 9521 bool using_ssc_source = false; 9522 9523 /* We need to take the global config into account */ 9524 for_each_intel_encoder(&dev_priv->drm, encoder) { 9525 switch (encoder->type) { 9526 case INTEL_OUTPUT_LVDS: 9527 has_panel = true; 9528 has_lvds = true; 9529 break; 9530 case INTEL_OUTPUT_EDP: 9531 has_panel = true; 9532 if (encoder->port == PORT_A) 9533 has_cpu_edp = true; 9534 break; 9535 default: 9536 break; 9537 } 9538 } 9539 9540 if (HAS_PCH_IBX(dev_priv)) { 9541 has_ck505 = dev_priv->vbt.display_clock_mode; 9542 can_ssc = has_ck505; 9543 } else { 9544 has_ck505 = false; 9545 can_ssc = true; 9546 } 9547 9548 /* Check if any DPLLs are using the SSC source */ 9549 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { 9550 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i)); 9551 9552 if (!(temp & DPLL_VCO_ENABLE)) 9553 continue; 9554 9555 if ((temp & PLL_REF_INPUT_MASK) == 9556 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 9557 using_ssc_source = true; 9558 break; 9559 } 9560 } 9561 9562 drm_dbg_kms(&dev_priv->drm, 9563 "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 9564 has_panel, has_lvds, has_ck505, using_ssc_source); 9565 9566 /* Ironlake: try to setup display ref clock before DPLL 9567 * enabling. This is only under driver's control after 9568 * PCH B stepping, previous chipset stepping should be 9569 * ignoring this setting. 9570 */ 9571 val = intel_de_read(dev_priv, PCH_DREF_CONTROL); 9572 9573 /* As we must carefully and slowly disable/enable each source in turn, 9574 * compute the final state we want first and check if we need to 9575 * make any changes at all. 9576 */ 9577 final = val; 9578 final &= ~DREF_NONSPREAD_SOURCE_MASK; 9579 if (has_ck505) 9580 final |= DREF_NONSPREAD_CK505_ENABLE; 9581 else 9582 final |= DREF_NONSPREAD_SOURCE_ENABLE; 9583 9584 final &= ~DREF_SSC_SOURCE_MASK; 9585 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9586 final &= ~DREF_SSC1_ENABLE; 9587 9588 if (has_panel) { 9589 final |= DREF_SSC_SOURCE_ENABLE; 9590 9591 if (intel_panel_use_ssc(dev_priv) && can_ssc) 9592 final |= DREF_SSC1_ENABLE; 9593 9594 if (has_cpu_edp) { 9595 if (intel_panel_use_ssc(dev_priv) && can_ssc) 9596 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 9597 else 9598 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 9599 } else 9600 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9601 } else if (using_ssc_source) { 9602 final |= DREF_SSC_SOURCE_ENABLE; 9603 final |= DREF_SSC1_ENABLE; 9604 } 9605 9606 if (final == val) 9607 return; 9608 9609 /* Always enable nonspread source */ 9610 val &= ~DREF_NONSPREAD_SOURCE_MASK; 9611 9612 if (has_ck505) 9613 val |= DREF_NONSPREAD_CK505_ENABLE; 9614 else 9615 val |= DREF_NONSPREAD_SOURCE_ENABLE; 9616 9617 if (has_panel) { 9618 val &= ~DREF_SSC_SOURCE_MASK; 9619 val |= DREF_SSC_SOURCE_ENABLE; 9620 9621 /* SSC must be turned on before enabling the CPU output */ 9622 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 9623 drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n"); 9624 val |= DREF_SSC1_ENABLE; 9625 } else 9626 val &= ~DREF_SSC1_ENABLE; 9627 9628 /* Get SSC going before enabling the outputs */ 9629 intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 9630 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 9631 udelay(200); 9632 9633 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9634 9635 /* Enable CPU source on CPU attached eDP */ 9636 if (has_cpu_edp) { 9637 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 9638 drm_dbg_kms(&dev_priv->drm, 9639 "Using SSC on eDP\n"); 9640 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 9641 } else 9642 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 9643 } else 9644 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9645 9646 intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 9647 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 9648 udelay(200); 9649 } else { 9650 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n"); 9651 9652 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 9653 9654 /* Turn off CPU output */ 9655 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 9656 9657 intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 9658 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 9659 udelay(200); 9660 9661 if (!using_ssc_source) { 9662 drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n"); 9663 9664 /* Turn off the SSC source */ 9665 val &= ~DREF_SSC_SOURCE_MASK; 9666 val |= DREF_SSC_SOURCE_DISABLE; 9667 9668 /* Turn off SSC1 */ 9669 val &= ~DREF_SSC1_ENABLE; 9670 9671 intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 9672 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 9673 udelay(200); 9674 } 9675 } 9676 9677 BUG_ON(val != final); 9678 } 9679 9680 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 9681 { 9682 u32 tmp; 9683 9684 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); 9685 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 9686 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); 9687 9688 if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) & 9689 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 9690 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n"); 9691 9692 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); 9693 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 9694 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); 9695 9696 if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) & 9697 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 9698 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n"); 9699 } 9700 9701 /* WaMPhyProgramming:hsw */ 9702 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 9703 { 9704 u32 tmp; 9705 9706 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 9707 tmp &= ~(0xFF << 24); 9708 tmp |= (0x12 << 24); 9709 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 9710 9711 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 9712 tmp |= (1 << 11); 9713 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 9714 9715 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 9716 tmp |= (1 << 11); 9717 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 9718 9719 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 9720 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 9721 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 9722 9723 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 9724 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 9725 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 9726 9727 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 9728 tmp &= ~(7 << 13); 9729 tmp |= (5 << 13); 9730 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 9731 9732 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 9733 tmp &= ~(7 << 13); 9734 tmp |= (5 << 13); 9735 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 9736 9737 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 9738 tmp &= ~0xFF; 9739 tmp |= 0x1C; 9740 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 9741 9742 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 9743 tmp &= ~0xFF; 9744 tmp |= 0x1C; 9745 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 9746 9747 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 9748 tmp &= ~(0xFF << 16); 9749 tmp |= (0x1C << 16); 9750 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 9751 9752 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 9753 tmp &= ~(0xFF << 16); 9754 tmp |= (0x1C << 16); 9755 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 9756 9757 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 9758 tmp |= (1 << 27); 9759 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 9760 9761 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 9762 tmp |= (1 << 27); 9763 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 9764 9765 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 9766 tmp &= ~(0xF << 28); 9767 tmp |= (4 << 28); 9768 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 9769 9770 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 9771 tmp &= ~(0xF << 28); 9772 tmp |= (4 << 28); 9773 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 9774 } 9775 9776 /* Implements 3 different sequences from BSpec chapter "Display iCLK 9777 * Programming" based on the parameters passed: 9778 * - Sequence to enable CLKOUT_DP 9779 * - Sequence to enable CLKOUT_DP without spread 9780 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 9781 */ 9782 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, 9783 bool with_spread, bool with_fdi) 9784 { 9785 u32 reg, tmp; 9786 9787 if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread, 9788 "FDI requires downspread\n")) 9789 with_spread = true; 9790 if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) && 9791 with_fdi, "LP PCH doesn't have FDI\n")) 9792 with_fdi = false; 9793 9794 mutex_lock(&dev_priv->sb_lock); 9795 9796 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9797 tmp &= ~SBI_SSCCTL_DISABLE; 9798 tmp |= SBI_SSCCTL_PATHALT; 9799 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9800 9801 udelay(24); 9802 9803 if (with_spread) { 9804 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9805 tmp &= ~SBI_SSCCTL_PATHALT; 9806 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9807 9808 if (with_fdi) { 9809 lpt_reset_fdi_mphy(dev_priv); 9810 lpt_program_fdi_mphy(dev_priv); 9811 } 9812 } 9813 9814 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 9815 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 9816 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 9817 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 9818 9819 mutex_unlock(&dev_priv->sb_lock); 9820 } 9821 9822 /* Sequence to disable CLKOUT_DP */ 9823 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) 9824 { 9825 u32 reg, tmp; 9826 9827 mutex_lock(&dev_priv->sb_lock); 9828 9829 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 9830 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 9831 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 9832 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 9833 9834 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 9835 if (!(tmp & SBI_SSCCTL_DISABLE)) { 9836 if (!(tmp & SBI_SSCCTL_PATHALT)) { 9837 tmp |= SBI_SSCCTL_PATHALT; 9838 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9839 udelay(32); 9840 } 9841 tmp |= SBI_SSCCTL_DISABLE; 9842 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 9843 } 9844 9845 mutex_unlock(&dev_priv->sb_lock); 9846 } 9847 9848 #define BEND_IDX(steps) ((50 + (steps)) / 5) 9849 9850 static const u16 sscdivintphase[] = { 9851 [BEND_IDX( 50)] = 0x3B23, 9852 [BEND_IDX( 45)] = 0x3B23, 9853 [BEND_IDX( 40)] = 0x3C23, 9854 [BEND_IDX( 35)] = 0x3C23, 9855 [BEND_IDX( 30)] = 0x3D23, 9856 [BEND_IDX( 25)] = 0x3D23, 9857 [BEND_IDX( 20)] = 0x3E23, 9858 [BEND_IDX( 15)] = 0x3E23, 9859 [BEND_IDX( 10)] = 0x3F23, 9860 [BEND_IDX( 5)] = 0x3F23, 9861 [BEND_IDX( 0)] = 0x0025, 9862 [BEND_IDX( -5)] = 0x0025, 9863 [BEND_IDX(-10)] = 0x0125, 9864 [BEND_IDX(-15)] = 0x0125, 9865 [BEND_IDX(-20)] = 0x0225, 9866 [BEND_IDX(-25)] = 0x0225, 9867 [BEND_IDX(-30)] = 0x0325, 9868 [BEND_IDX(-35)] = 0x0325, 9869 [BEND_IDX(-40)] = 0x0425, 9870 [BEND_IDX(-45)] = 0x0425, 9871 [BEND_IDX(-50)] = 0x0525, 9872 }; 9873 9874 /* 9875 * Bend CLKOUT_DP 9876 * steps -50 to 50 inclusive, in steps of 5 9877 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 9878 * change in clock period = -(steps / 10) * 5.787 ps 9879 */ 9880 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 9881 { 9882 u32 tmp; 9883 int idx = BEND_IDX(steps); 9884 9885 if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0)) 9886 return; 9887 9888 if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase))) 9889 return; 9890 9891 mutex_lock(&dev_priv->sb_lock); 9892 9893 if (steps % 10 != 0) 9894 tmp = 0xAAAAAAAB; 9895 else 9896 tmp = 0x00000000; 9897 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 9898 9899 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 9900 tmp &= 0xffff0000; 9901 tmp |= sscdivintphase[idx]; 9902 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 9903 9904 mutex_unlock(&dev_priv->sb_lock); 9905 } 9906 9907 #undef BEND_IDX 9908 9909 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) 9910 { 9911 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); 9912 u32 ctl = intel_de_read(dev_priv, SPLL_CTL); 9913 9914 if ((ctl & SPLL_PLL_ENABLE) == 0) 9915 return false; 9916 9917 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && 9918 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 9919 return true; 9920 9921 if (IS_BROADWELL(dev_priv) && 9922 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) 9923 return true; 9924 9925 return false; 9926 } 9927 9928 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, 9929 enum intel_dpll_id id) 9930 { 9931 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); 9932 u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id)); 9933 9934 if ((ctl & WRPLL_PLL_ENABLE) == 0) 9935 return false; 9936 9937 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) 9938 return true; 9939 9940 if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && 9941 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && 9942 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 9943 return true; 9944 9945 return false; 9946 } 9947 9948 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) 9949 { 9950 struct intel_encoder *encoder; 9951 bool has_fdi = false; 9952 9953 for_each_intel_encoder(&dev_priv->drm, encoder) { 9954 switch (encoder->type) { 9955 case INTEL_OUTPUT_ANALOG: 9956 has_fdi = true; 9957 break; 9958 default: 9959 break; 9960 } 9961 } 9962 9963 /* 9964 * The BIOS may have decided to use the PCH SSC 9965 * reference so we must not disable it until the 9966 * relevant PLLs have stopped relying on it. We'll 9967 * just leave the PCH SSC reference enabled in case 9968 * any active PLL is using it. It will get disabled 9969 * after runtime suspend if we don't have FDI. 9970 * 9971 * TODO: Move the whole reference clock handling 9972 * to the modeset sequence proper so that we can 9973 * actually enable/disable/reconfigure these things 9974 * safely. To do that we need to introduce a real 9975 * clock hierarchy. That would also allow us to do 9976 * clock bending finally. 9977 */ 9978 dev_priv->pch_ssc_use = 0; 9979 9980 if (spll_uses_pch_ssc(dev_priv)) { 9981 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n"); 9982 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL); 9983 } 9984 9985 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { 9986 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n"); 9987 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1); 9988 } 9989 9990 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { 9991 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n"); 9992 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2); 9993 } 9994 9995 if (dev_priv->pch_ssc_use) 9996 return; 9997 9998 if (has_fdi) { 9999 lpt_bend_clkout_dp(dev_priv, 0); 10000 lpt_enable_clkout_dp(dev_priv, true, true); 10001 } else { 10002 lpt_disable_clkout_dp(dev_priv); 10003 } 10004 } 10005 10006 /* 10007 * Initialize reference clocks when the driver loads 10008 */ 10009 void intel_init_pch_refclk(struct drm_i915_private *dev_priv) 10010 { 10011 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 10012 ilk_init_pch_refclk(dev_priv); 10013 else if (HAS_PCH_LPT(dev_priv)) 10014 lpt_init_pch_refclk(dev_priv); 10015 } 10016 10017 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) 10018 { 10019 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 10020 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10021 enum pipe pipe = crtc->pipe; 10022 u32 val; 10023 10024 val = 0; 10025 10026 switch (crtc_state->pipe_bpp) { 10027 case 18: 10028 val |= PIPECONF_6BPC; 10029 break; 10030 case 24: 10031 val |= PIPECONF_8BPC; 10032 break; 10033 case 30: 10034 val |= PIPECONF_10BPC; 10035 break; 10036 case 36: 10037 val |= PIPECONF_12BPC; 10038 break; 10039 default: 10040 /* Case prevented by intel_choose_pipe_bpp_dither. */ 10041 BUG(); 10042 } 10043 10044 if (crtc_state->dither) 10045 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 10046 10047 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 10048 val |= PIPECONF_INTERLACED_ILK; 10049 else 10050 val |= PIPECONF_PROGRESSIVE; 10051 10052 /* 10053 * This would end up with an odd purple hue over 10054 * the entire display. Make sure we don't do it. 10055 */ 10056 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range && 10057 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); 10058 10059 if (crtc_state->limited_color_range) 10060 val |= PIPECONF_COLOR_RANGE_SELECT; 10061 10062 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 10063 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709; 10064 10065 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 10066 10067 val |= PIPECONF_FRAME_START_DELAY(0); 10068 10069 intel_de_write(dev_priv, PIPECONF(pipe), val); 10070 intel_de_posting_read(dev_priv, PIPECONF(pipe)); 10071 } 10072 10073 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state) 10074 { 10075 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 10076 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10077 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 10078 u32 val = 0; 10079 10080 if (IS_HASWELL(dev_priv) && crtc_state->dither) 10081 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 10082 10083 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 10084 val |= PIPECONF_INTERLACED_ILK; 10085 else 10086 val |= PIPECONF_PROGRESSIVE; 10087 10088 if (IS_HASWELL(dev_priv) && 10089 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 10090 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW; 10091 10092 intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val); 10093 intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder)); 10094 } 10095 10096 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) 10097 { 10098 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 10099 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10100 u32 val = 0; 10101 10102 switch (crtc_state->pipe_bpp) { 10103 case 18: 10104 val |= PIPEMISC_DITHER_6_BPC; 10105 break; 10106 case 24: 10107 val |= PIPEMISC_DITHER_8_BPC; 10108 break; 10109 case 30: 10110 val |= PIPEMISC_DITHER_10_BPC; 10111 break; 10112 case 36: 10113 val |= PIPEMISC_DITHER_12_BPC; 10114 break; 10115 default: 10116 MISSING_CASE(crtc_state->pipe_bpp); 10117 break; 10118 } 10119 10120 if (crtc_state->dither) 10121 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 10122 10123 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 10124 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 10125 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV; 10126 10127 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 10128 val |= PIPEMISC_YUV420_ENABLE | 10129 PIPEMISC_YUV420_MODE_FULL_BLEND; 10130 10131 if (INTEL_GEN(dev_priv) >= 11 && 10132 (crtc_state->active_planes & ~(icl_hdr_plane_mask() | 10133 BIT(PLANE_CURSOR))) == 0) 10134 val |= PIPEMISC_HDR_MODE_PRECISION; 10135 10136 if (INTEL_GEN(dev_priv) >= 12) 10137 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC; 10138 10139 intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val); 10140 } 10141 10142 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) 10143 { 10144 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10145 u32 tmp; 10146 10147 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); 10148 10149 switch (tmp & PIPEMISC_DITHER_BPC_MASK) { 10150 case PIPEMISC_DITHER_6_BPC: 10151 return 18; 10152 case PIPEMISC_DITHER_8_BPC: 10153 return 24; 10154 case PIPEMISC_DITHER_10_BPC: 10155 return 30; 10156 case PIPEMISC_DITHER_12_BPC: 10157 return 36; 10158 default: 10159 MISSING_CASE(tmp); 10160 return 0; 10161 } 10162 } 10163 10164 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) 10165 { 10166 /* 10167 * Account for spread spectrum to avoid 10168 * oversubscribing the link. Max center spread 10169 * is 2.5%; use 5% for safety's sake. 10170 */ 10171 u32 bps = target_clock * bpp * 21 / 20; 10172 return DIV_ROUND_UP(bps, link_bw * 8); 10173 } 10174 10175 static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor) 10176 { 10177 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 10178 } 10179 10180 static void ilk_compute_dpll(struct intel_crtc *crtc, 10181 struct intel_crtc_state *crtc_state, 10182 struct dpll *reduced_clock) 10183 { 10184 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10185 u32 dpll, fp, fp2; 10186 int factor; 10187 10188 /* Enable autotuning of the PLL clock (if permissible) */ 10189 factor = 21; 10190 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 10191 if ((intel_panel_use_ssc(dev_priv) && 10192 dev_priv->vbt.lvds_ssc_freq == 100000) || 10193 (HAS_PCH_IBX(dev_priv) && 10194 intel_is_dual_link_lvds(dev_priv))) 10195 factor = 25; 10196 } else if (crtc_state->sdvo_tv_clock) { 10197 factor = 20; 10198 } 10199 10200 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 10201 10202 if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor)) 10203 fp |= FP_CB_TUNE; 10204 10205 if (reduced_clock) { 10206 fp2 = i9xx_dpll_compute_fp(reduced_clock); 10207 10208 if (reduced_clock->m < factor * reduced_clock->n) 10209 fp2 |= FP_CB_TUNE; 10210 } else { 10211 fp2 = fp; 10212 } 10213 10214 dpll = 0; 10215 10216 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 10217 dpll |= DPLLB_MODE_LVDS; 10218 else 10219 dpll |= DPLLB_MODE_DAC_SERIAL; 10220 10221 dpll |= (crtc_state->pixel_multiplier - 1) 10222 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 10223 10224 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 10225 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 10226 dpll |= DPLL_SDVO_HIGH_SPEED; 10227 10228 if (intel_crtc_has_dp_encoder(crtc_state)) 10229 dpll |= DPLL_SDVO_HIGH_SPEED; 10230 10231 /* 10232 * The high speed IO clock is only really required for 10233 * SDVO/HDMI/DP, but we also enable it for CRT to make it 10234 * possible to share the DPLL between CRT and HDMI. Enabling 10235 * the clock needlessly does no real harm, except use up a 10236 * bit of power potentially. 10237 * 10238 * We'll limit this to IVB with 3 pipes, since it has only two 10239 * DPLLs and so DPLL sharing is the only way to get three pipes 10240 * driving PCH ports at the same time. On SNB we could do this, 10241 * and potentially avoid enabling the second DPLL, but it's not 10242 * clear if it''s a win or loss power wise. No point in doing 10243 * this on ILK at all since it has a fixed DPLL<->pipe mapping. 10244 */ 10245 if (INTEL_NUM_PIPES(dev_priv) == 3 && 10246 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) 10247 dpll |= DPLL_SDVO_HIGH_SPEED; 10248 10249 /* compute bitmask from p1 value */ 10250 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 10251 /* also FPA1 */ 10252 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 10253 10254 switch (crtc_state->dpll.p2) { 10255 case 5: 10256 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 10257 break; 10258 case 7: 10259 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 10260 break; 10261 case 10: 10262 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 10263 break; 10264 case 14: 10265 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 10266 break; 10267 } 10268 10269 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 10270 intel_panel_use_ssc(dev_priv)) 10271 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 10272 else 10273 dpll |= PLL_REF_INPUT_DREFCLK; 10274 10275 dpll |= DPLL_VCO_ENABLE; 10276 10277 crtc_state->dpll_hw_state.dpll = dpll; 10278 crtc_state->dpll_hw_state.fp0 = fp; 10279 crtc_state->dpll_hw_state.fp1 = fp2; 10280 } 10281 10282 static int ilk_crtc_compute_clock(struct intel_crtc *crtc, 10283 struct intel_crtc_state *crtc_state) 10284 { 10285 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10286 struct intel_atomic_state *state = 10287 to_intel_atomic_state(crtc_state->uapi.state); 10288 const struct intel_limit *limit; 10289 int refclk = 120000; 10290 10291 memset(&crtc_state->dpll_hw_state, 0, 10292 sizeof(crtc_state->dpll_hw_state)); 10293 10294 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 10295 if (!crtc_state->has_pch_encoder) 10296 return 0; 10297 10298 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 10299 if (intel_panel_use_ssc(dev_priv)) { 10300 drm_dbg_kms(&dev_priv->drm, 10301 "using SSC reference clock of %d kHz\n", 10302 dev_priv->vbt.lvds_ssc_freq); 10303 refclk = dev_priv->vbt.lvds_ssc_freq; 10304 } 10305 10306 if (intel_is_dual_link_lvds(dev_priv)) { 10307 if (refclk == 100000) 10308 limit = &ilk_limits_dual_lvds_100m; 10309 else 10310 limit = &ilk_limits_dual_lvds; 10311 } else { 10312 if (refclk == 100000) 10313 limit = &ilk_limits_single_lvds_100m; 10314 else 10315 limit = &ilk_limits_single_lvds; 10316 } 10317 } else { 10318 limit = &ilk_limits_dac; 10319 } 10320 10321 if (!crtc_state->clock_set && 10322 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 10323 refclk, NULL, &crtc_state->dpll)) { 10324 drm_err(&dev_priv->drm, 10325 "Couldn't find PLL settings for mode!\n"); 10326 return -EINVAL; 10327 } 10328 10329 ilk_compute_dpll(crtc, crtc_state, NULL); 10330 10331 if (!intel_reserve_shared_dplls(state, crtc, NULL)) { 10332 drm_dbg_kms(&dev_priv->drm, 10333 "failed to find PLL for pipe %c\n", 10334 pipe_name(crtc->pipe)); 10335 return -EINVAL; 10336 } 10337 10338 return 0; 10339 } 10340 10341 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 10342 struct intel_link_m_n *m_n) 10343 { 10344 struct drm_device *dev = crtc->base.dev; 10345 struct drm_i915_private *dev_priv = to_i915(dev); 10346 enum pipe pipe = crtc->pipe; 10347 10348 m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe)); 10349 m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe)); 10350 m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe)) 10351 & ~TU_SIZE_MASK; 10352 m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe)); 10353 m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe)) 10354 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 10355 } 10356 10357 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 10358 enum transcoder transcoder, 10359 struct intel_link_m_n *m_n, 10360 struct intel_link_m_n *m2_n2) 10361 { 10362 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10363 enum pipe pipe = crtc->pipe; 10364 10365 if (INTEL_GEN(dev_priv) >= 5) { 10366 m_n->link_m = intel_de_read(dev_priv, 10367 PIPE_LINK_M1(transcoder)); 10368 m_n->link_n = intel_de_read(dev_priv, 10369 PIPE_LINK_N1(transcoder)); 10370 m_n->gmch_m = intel_de_read(dev_priv, 10371 PIPE_DATA_M1(transcoder)) 10372 & ~TU_SIZE_MASK; 10373 m_n->gmch_n = intel_de_read(dev_priv, 10374 PIPE_DATA_N1(transcoder)); 10375 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder)) 10376 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 10377 10378 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) { 10379 m2_n2->link_m = intel_de_read(dev_priv, 10380 PIPE_LINK_M2(transcoder)); 10381 m2_n2->link_n = intel_de_read(dev_priv, 10382 PIPE_LINK_N2(transcoder)); 10383 m2_n2->gmch_m = intel_de_read(dev_priv, 10384 PIPE_DATA_M2(transcoder)) 10385 & ~TU_SIZE_MASK; 10386 m2_n2->gmch_n = intel_de_read(dev_priv, 10387 PIPE_DATA_N2(transcoder)); 10388 m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder)) 10389 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 10390 } 10391 } else { 10392 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe)); 10393 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe)); 10394 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe)) 10395 & ~TU_SIZE_MASK; 10396 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe)); 10397 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe)) 10398 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 10399 } 10400 } 10401 10402 void intel_dp_get_m_n(struct intel_crtc *crtc, 10403 struct intel_crtc_state *pipe_config) 10404 { 10405 if (pipe_config->has_pch_encoder) 10406 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 10407 else 10408 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 10409 &pipe_config->dp_m_n, 10410 &pipe_config->dp_m2_n2); 10411 } 10412 10413 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, 10414 struct intel_crtc_state *pipe_config) 10415 { 10416 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 10417 &pipe_config->fdi_m_n, NULL); 10418 } 10419 10420 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state, 10421 u32 pos, u32 size) 10422 { 10423 drm_rect_init(&crtc_state->pch_pfit.dst, 10424 pos >> 16, pos & 0xffff, 10425 size >> 16, size & 0xffff); 10426 } 10427 10428 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state) 10429 { 10430 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 10431 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10432 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; 10433 int id = -1; 10434 int i; 10435 10436 /* find scaler attached to this pipe */ 10437 for (i = 0; i < crtc->num_scalers; i++) { 10438 u32 ctl, pos, size; 10439 10440 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i)); 10441 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN) 10442 continue; 10443 10444 id = i; 10445 crtc_state->pch_pfit.enabled = true; 10446 10447 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i)); 10448 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i)); 10449 10450 ilk_get_pfit_pos_size(crtc_state, pos, size); 10451 10452 scaler_state->scalers[i].in_use = true; 10453 break; 10454 } 10455 10456 scaler_state->scaler_id = id; 10457 if (id >= 0) 10458 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 10459 else 10460 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 10461 } 10462 10463 static void 10464 skl_get_initial_plane_config(struct intel_crtc *crtc, 10465 struct intel_initial_plane_config *plane_config) 10466 { 10467 struct drm_device *dev = crtc->base.dev; 10468 struct drm_i915_private *dev_priv = to_i915(dev); 10469 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 10470 enum plane_id plane_id = plane->id; 10471 enum pipe pipe; 10472 u32 val, base, offset, stride_mult, tiling, alpha; 10473 int fourcc, pixel_format; 10474 unsigned int aligned_height; 10475 struct drm_framebuffer *fb; 10476 struct intel_framebuffer *intel_fb; 10477 10478 if (!plane->get_hw_state(plane, &pipe)) 10479 return; 10480 10481 drm_WARN_ON(dev, pipe != crtc->pipe); 10482 10483 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 10484 if (!intel_fb) { 10485 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n"); 10486 return; 10487 } 10488 10489 fb = &intel_fb->base; 10490 10491 fb->dev = dev; 10492 10493 val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id)); 10494 10495 if (INTEL_GEN(dev_priv) >= 11) 10496 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK; 10497 else 10498 pixel_format = val & PLANE_CTL_FORMAT_MASK; 10499 10500 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { 10501 alpha = intel_de_read(dev_priv, 10502 PLANE_COLOR_CTL(pipe, plane_id)); 10503 alpha &= PLANE_COLOR_ALPHA_MASK; 10504 } else { 10505 alpha = val & PLANE_CTL_ALPHA_MASK; 10506 } 10507 10508 fourcc = skl_format_to_fourcc(pixel_format, 10509 val & PLANE_CTL_ORDER_RGBX, alpha); 10510 fb->format = drm_format_info(fourcc); 10511 10512 tiling = val & PLANE_CTL_TILED_MASK; 10513 switch (tiling) { 10514 case PLANE_CTL_TILED_LINEAR: 10515 fb->modifier = DRM_FORMAT_MOD_LINEAR; 10516 break; 10517 case PLANE_CTL_TILED_X: 10518 plane_config->tiling = I915_TILING_X; 10519 fb->modifier = I915_FORMAT_MOD_X_TILED; 10520 break; 10521 case PLANE_CTL_TILED_Y: 10522 plane_config->tiling = I915_TILING_Y; 10523 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) 10524 fb->modifier = INTEL_GEN(dev_priv) >= 12 ? 10525 I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS : 10526 I915_FORMAT_MOD_Y_TILED_CCS; 10527 else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE) 10528 fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; 10529 else 10530 fb->modifier = I915_FORMAT_MOD_Y_TILED; 10531 break; 10532 case PLANE_CTL_TILED_YF: 10533 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) 10534 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS; 10535 else 10536 fb->modifier = I915_FORMAT_MOD_Yf_TILED; 10537 break; 10538 default: 10539 MISSING_CASE(tiling); 10540 goto error; 10541 } 10542 10543 /* 10544 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 10545 * while i915 HW rotation is clockwise, thats why this swapping. 10546 */ 10547 switch (val & PLANE_CTL_ROTATE_MASK) { 10548 case PLANE_CTL_ROTATE_0: 10549 plane_config->rotation = DRM_MODE_ROTATE_0; 10550 break; 10551 case PLANE_CTL_ROTATE_90: 10552 plane_config->rotation = DRM_MODE_ROTATE_270; 10553 break; 10554 case PLANE_CTL_ROTATE_180: 10555 plane_config->rotation = DRM_MODE_ROTATE_180; 10556 break; 10557 case PLANE_CTL_ROTATE_270: 10558 plane_config->rotation = DRM_MODE_ROTATE_90; 10559 break; 10560 } 10561 10562 if (INTEL_GEN(dev_priv) >= 10 && 10563 val & PLANE_CTL_FLIP_HORIZONTAL) 10564 plane_config->rotation |= DRM_MODE_REFLECT_X; 10565 10566 base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000; 10567 plane_config->base = base; 10568 10569 offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id)); 10570 10571 val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id)); 10572 fb->height = ((val >> 16) & 0xffff) + 1; 10573 fb->width = ((val >> 0) & 0xffff) + 1; 10574 10575 val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id)); 10576 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); 10577 fb->pitches[0] = (val & 0x3ff) * stride_mult; 10578 10579 aligned_height = intel_fb_align_height(fb, 0, fb->height); 10580 10581 plane_config->size = fb->pitches[0] * aligned_height; 10582 10583 drm_dbg_kms(&dev_priv->drm, 10584 "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 10585 crtc->base.name, plane->base.name, fb->width, fb->height, 10586 fb->format->cpp[0] * 8, base, fb->pitches[0], 10587 plane_config->size); 10588 10589 plane_config->fb = intel_fb; 10590 return; 10591 10592 error: 10593 kfree(intel_fb); 10594 } 10595 10596 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state) 10597 { 10598 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 10599 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10600 u32 ctl, pos, size; 10601 10602 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe)); 10603 if ((ctl & PF_ENABLE) == 0) 10604 return; 10605 10606 crtc_state->pch_pfit.enabled = true; 10607 10608 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe)); 10609 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe)); 10610 10611 ilk_get_pfit_pos_size(crtc_state, pos, size); 10612 10613 /* 10614 * We currently do not free assignements of panel fitters on 10615 * ivb/hsw (since we don't use the higher upscaling modes which 10616 * differentiates them) so just WARN about this case for now. 10617 */ 10618 drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) && 10619 (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe)); 10620 } 10621 10622 static bool ilk_get_pipe_config(struct intel_crtc *crtc, 10623 struct intel_crtc_state *pipe_config) 10624 { 10625 struct drm_device *dev = crtc->base.dev; 10626 struct drm_i915_private *dev_priv = to_i915(dev); 10627 enum intel_display_power_domain power_domain; 10628 intel_wakeref_t wakeref; 10629 u32 tmp; 10630 bool ret; 10631 10632 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 10633 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 10634 if (!wakeref) 10635 return false; 10636 10637 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 10638 pipe_config->shared_dpll = NULL; 10639 10640 ret = false; 10641 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); 10642 if (!(tmp & PIPECONF_ENABLE)) 10643 goto out; 10644 10645 switch (tmp & PIPECONF_BPC_MASK) { 10646 case PIPECONF_6BPC: 10647 pipe_config->pipe_bpp = 18; 10648 break; 10649 case PIPECONF_8BPC: 10650 pipe_config->pipe_bpp = 24; 10651 break; 10652 case PIPECONF_10BPC: 10653 pipe_config->pipe_bpp = 30; 10654 break; 10655 case PIPECONF_12BPC: 10656 pipe_config->pipe_bpp = 36; 10657 break; 10658 default: 10659 break; 10660 } 10661 10662 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 10663 pipe_config->limited_color_range = true; 10664 10665 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) { 10666 case PIPECONF_OUTPUT_COLORSPACE_YUV601: 10667 case PIPECONF_OUTPUT_COLORSPACE_YUV709: 10668 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 10669 break; 10670 default: 10671 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 10672 break; 10673 } 10674 10675 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >> 10676 PIPECONF_GAMMA_MODE_SHIFT; 10677 10678 pipe_config->csc_mode = intel_de_read(dev_priv, 10679 PIPE_CSC_MODE(crtc->pipe)); 10680 10681 i9xx_get_pipe_color_config(pipe_config); 10682 intel_color_get_config(pipe_config); 10683 10684 if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 10685 struct intel_shared_dpll *pll; 10686 enum intel_dpll_id pll_id; 10687 10688 pipe_config->has_pch_encoder = true; 10689 10690 tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe)); 10691 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 10692 FDI_DP_PORT_WIDTH_SHIFT) + 1; 10693 10694 ilk_get_fdi_m_n_config(crtc, pipe_config); 10695 10696 if (HAS_PCH_IBX(dev_priv)) { 10697 /* 10698 * The pipe->pch transcoder and pch transcoder->pll 10699 * mapping is fixed. 10700 */ 10701 pll_id = (enum intel_dpll_id) crtc->pipe; 10702 } else { 10703 tmp = intel_de_read(dev_priv, PCH_DPLL_SEL); 10704 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 10705 pll_id = DPLL_ID_PCH_PLL_B; 10706 else 10707 pll_id= DPLL_ID_PCH_PLL_A; 10708 } 10709 10710 pipe_config->shared_dpll = 10711 intel_get_shared_dpll_by_id(dev_priv, pll_id); 10712 pll = pipe_config->shared_dpll; 10713 10714 drm_WARN_ON(dev, !pll->info->funcs->get_hw_state(dev_priv, pll, 10715 &pipe_config->dpll_hw_state)); 10716 10717 tmp = pipe_config->dpll_hw_state.dpll; 10718 pipe_config->pixel_multiplier = 10719 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 10720 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 10721 10722 ilk_pch_clock_get(crtc, pipe_config); 10723 } else { 10724 pipe_config->pixel_multiplier = 1; 10725 } 10726 10727 intel_get_pipe_timings(crtc, pipe_config); 10728 intel_get_pipe_src_size(crtc, pipe_config); 10729 10730 ilk_get_pfit_config(pipe_config); 10731 10732 ret = true; 10733 10734 out: 10735 intel_display_power_put(dev_priv, power_domain, wakeref); 10736 10737 return ret; 10738 } 10739 10740 static int hsw_crtc_compute_clock(struct intel_crtc *crtc, 10741 struct intel_crtc_state *crtc_state) 10742 { 10743 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10744 struct intel_atomic_state *state = 10745 to_intel_atomic_state(crtc_state->uapi.state); 10746 10747 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) || 10748 INTEL_GEN(dev_priv) >= 11) { 10749 struct intel_encoder *encoder = 10750 intel_get_crtc_new_encoder(state, crtc_state); 10751 10752 if (!intel_reserve_shared_dplls(state, crtc, encoder)) { 10753 drm_dbg_kms(&dev_priv->drm, 10754 "failed to find PLL for pipe %c\n", 10755 pipe_name(crtc->pipe)); 10756 return -EINVAL; 10757 } 10758 } 10759 10760 return 0; 10761 } 10762 10763 static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 10764 struct intel_crtc_state *pipe_config) 10765 { 10766 enum intel_dpll_id id; 10767 u32 temp; 10768 10769 temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); 10770 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); 10771 10772 if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2)) 10773 return; 10774 10775 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10776 } 10777 10778 static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 10779 struct intel_crtc_state *pipe_config) 10780 { 10781 enum phy phy = intel_port_to_phy(dev_priv, port); 10782 enum icl_port_dpll_id port_dpll_id; 10783 enum intel_dpll_id id; 10784 u32 temp; 10785 10786 if (intel_phy_is_combo(dev_priv, phy)) { 10787 temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) & 10788 ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); 10789 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); 10790 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 10791 } else if (intel_phy_is_tc(dev_priv, phy)) { 10792 u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; 10793 10794 if (clk_sel == DDI_CLK_SEL_MG) { 10795 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, 10796 port)); 10797 port_dpll_id = ICL_PORT_DPLL_MG_PHY; 10798 } else { 10799 drm_WARN_ON(&dev_priv->drm, 10800 clk_sel < DDI_CLK_SEL_TBT_162); 10801 id = DPLL_ID_ICL_TBTPLL; 10802 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 10803 } 10804 } else { 10805 drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port); 10806 return; 10807 } 10808 10809 pipe_config->icl_port_dplls[port_dpll_id].pll = 10810 intel_get_shared_dpll_by_id(dev_priv, id); 10811 10812 icl_set_active_port_dpll(pipe_config, port_dpll_id); 10813 } 10814 10815 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 10816 enum port port, 10817 struct intel_crtc_state *pipe_config) 10818 { 10819 enum intel_dpll_id id; 10820 10821 switch (port) { 10822 case PORT_A: 10823 id = DPLL_ID_SKL_DPLL0; 10824 break; 10825 case PORT_B: 10826 id = DPLL_ID_SKL_DPLL1; 10827 break; 10828 case PORT_C: 10829 id = DPLL_ID_SKL_DPLL2; 10830 break; 10831 default: 10832 drm_err(&dev_priv->drm, "Incorrect port type\n"); 10833 return; 10834 } 10835 10836 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10837 } 10838 10839 static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 10840 struct intel_crtc_state *pipe_config) 10841 { 10842 enum intel_dpll_id id; 10843 u32 temp; 10844 10845 temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 10846 id = temp >> (port * 3 + 1); 10847 10848 if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3)) 10849 return; 10850 10851 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10852 } 10853 10854 static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, 10855 struct intel_crtc_state *pipe_config) 10856 { 10857 enum intel_dpll_id id; 10858 u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port)); 10859 10860 switch (ddi_pll_sel) { 10861 case PORT_CLK_SEL_WRPLL1: 10862 id = DPLL_ID_WRPLL1; 10863 break; 10864 case PORT_CLK_SEL_WRPLL2: 10865 id = DPLL_ID_WRPLL2; 10866 break; 10867 case PORT_CLK_SEL_SPLL: 10868 id = DPLL_ID_SPLL; 10869 break; 10870 case PORT_CLK_SEL_LCPLL_810: 10871 id = DPLL_ID_LCPLL_810; 10872 break; 10873 case PORT_CLK_SEL_LCPLL_1350: 10874 id = DPLL_ID_LCPLL_1350; 10875 break; 10876 case PORT_CLK_SEL_LCPLL_2700: 10877 id = DPLL_ID_LCPLL_2700; 10878 break; 10879 default: 10880 MISSING_CASE(ddi_pll_sel); 10881 /* fall through */ 10882 case PORT_CLK_SEL_NONE: 10883 return; 10884 } 10885 10886 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10887 } 10888 10889 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 10890 struct intel_crtc_state *pipe_config, 10891 u64 *power_domain_mask, 10892 intel_wakeref_t *wakerefs) 10893 { 10894 struct drm_device *dev = crtc->base.dev; 10895 struct drm_i915_private *dev_priv = to_i915(dev); 10896 enum intel_display_power_domain power_domain; 10897 unsigned long panel_transcoder_mask = 0; 10898 unsigned long enabled_panel_transcoders = 0; 10899 enum transcoder panel_transcoder; 10900 intel_wakeref_t wf; 10901 u32 tmp; 10902 10903 if (INTEL_GEN(dev_priv) >= 11) 10904 panel_transcoder_mask |= 10905 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 10906 10907 if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP)) 10908 panel_transcoder_mask |= BIT(TRANSCODER_EDP); 10909 10910 /* 10911 * The pipe->transcoder mapping is fixed with the exception of the eDP 10912 * and DSI transcoders handled below. 10913 */ 10914 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 10915 10916 /* 10917 * XXX: Do intel_display_power_get_if_enabled before reading this (for 10918 * consistency and less surprising code; it's in always on power). 10919 */ 10920 for_each_set_bit(panel_transcoder, 10921 &panel_transcoder_mask, 10922 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) { 10923 bool force_thru = false; 10924 enum pipe trans_pipe; 10925 10926 tmp = intel_de_read(dev_priv, 10927 TRANS_DDI_FUNC_CTL(panel_transcoder)); 10928 if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 10929 continue; 10930 10931 /* 10932 * Log all enabled ones, only use the first one. 10933 * 10934 * FIXME: This won't work for two separate DSI displays. 10935 */ 10936 enabled_panel_transcoders |= BIT(panel_transcoder); 10937 if (enabled_panel_transcoders != BIT(panel_transcoder)) 10938 continue; 10939 10940 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 10941 default: 10942 drm_WARN(dev, 1, 10943 "unknown pipe linked to transcoder %s\n", 10944 transcoder_name(panel_transcoder)); 10945 /* fall through */ 10946 case TRANS_DDI_EDP_INPUT_A_ONOFF: 10947 force_thru = true; 10948 /* fall through */ 10949 case TRANS_DDI_EDP_INPUT_A_ON: 10950 trans_pipe = PIPE_A; 10951 break; 10952 case TRANS_DDI_EDP_INPUT_B_ONOFF: 10953 trans_pipe = PIPE_B; 10954 break; 10955 case TRANS_DDI_EDP_INPUT_C_ONOFF: 10956 trans_pipe = PIPE_C; 10957 break; 10958 case TRANS_DDI_EDP_INPUT_D_ONOFF: 10959 trans_pipe = PIPE_D; 10960 break; 10961 } 10962 10963 if (trans_pipe == crtc->pipe) { 10964 pipe_config->cpu_transcoder = panel_transcoder; 10965 pipe_config->pch_pfit.force_thru = force_thru; 10966 } 10967 } 10968 10969 /* 10970 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1 10971 */ 10972 drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) && 10973 enabled_panel_transcoders != BIT(TRANSCODER_EDP)); 10974 10975 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); 10976 drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain)); 10977 10978 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10979 if (!wf) 10980 return false; 10981 10982 wakerefs[power_domain] = wf; 10983 *power_domain_mask |= BIT_ULL(power_domain); 10984 10985 tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder)); 10986 10987 return tmp & PIPECONF_ENABLE; 10988 } 10989 10990 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 10991 struct intel_crtc_state *pipe_config, 10992 u64 *power_domain_mask, 10993 intel_wakeref_t *wakerefs) 10994 { 10995 struct drm_device *dev = crtc->base.dev; 10996 struct drm_i915_private *dev_priv = to_i915(dev); 10997 enum intel_display_power_domain power_domain; 10998 enum transcoder cpu_transcoder; 10999 intel_wakeref_t wf; 11000 enum port port; 11001 u32 tmp; 11002 11003 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 11004 if (port == PORT_A) 11005 cpu_transcoder = TRANSCODER_DSI_A; 11006 else 11007 cpu_transcoder = TRANSCODER_DSI_C; 11008 11009 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 11010 drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain)); 11011 11012 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 11013 if (!wf) 11014 continue; 11015 11016 wakerefs[power_domain] = wf; 11017 *power_domain_mask |= BIT_ULL(power_domain); 11018 11019 /* 11020 * The PLL needs to be enabled with a valid divider 11021 * configuration, otherwise accessing DSI registers will hang 11022 * the machine. See BSpec North Display Engine 11023 * registers/MIPI[BXT]. We can break out here early, since we 11024 * need the same DSI PLL to be enabled for both DSI ports. 11025 */ 11026 if (!bxt_dsi_pll_is_enabled(dev_priv)) 11027 break; 11028 11029 /* XXX: this works for video mode only */ 11030 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)); 11031 if (!(tmp & DPI_ENABLE)) 11032 continue; 11033 11034 tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); 11035 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 11036 continue; 11037 11038 pipe_config->cpu_transcoder = cpu_transcoder; 11039 break; 11040 } 11041 11042 return transcoder_is_dsi(pipe_config->cpu_transcoder); 11043 } 11044 11045 static void hsw_get_ddi_port_state(struct intel_crtc *crtc, 11046 struct intel_crtc_state *pipe_config) 11047 { 11048 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11049 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 11050 struct intel_shared_dpll *pll; 11051 enum port port; 11052 u32 tmp; 11053 11054 if (transcoder_is_dsi(cpu_transcoder)) { 11055 port = (cpu_transcoder == TRANSCODER_DSI_A) ? 11056 PORT_A : PORT_B; 11057 } else { 11058 tmp = intel_de_read(dev_priv, 11059 TRANS_DDI_FUNC_CTL(cpu_transcoder)); 11060 if (INTEL_GEN(dev_priv) >= 12) 11061 port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); 11062 else 11063 port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); 11064 } 11065 11066 if (INTEL_GEN(dev_priv) >= 11) 11067 icl_get_ddi_pll(dev_priv, port, pipe_config); 11068 else if (IS_CANNONLAKE(dev_priv)) 11069 cnl_get_ddi_pll(dev_priv, port, pipe_config); 11070 else if (IS_GEN9_BC(dev_priv)) 11071 skl_get_ddi_pll(dev_priv, port, pipe_config); 11072 else if (IS_GEN9_LP(dev_priv)) 11073 bxt_get_ddi_pll(dev_priv, port, pipe_config); 11074 else 11075 hsw_get_ddi_pll(dev_priv, port, pipe_config); 11076 11077 pll = pipe_config->shared_dpll; 11078 if (pll) { 11079 drm_WARN_ON(&dev_priv->drm, 11080 !pll->info->funcs->get_hw_state(dev_priv, pll, 11081 &pipe_config->dpll_hw_state)); 11082 } 11083 11084 /* 11085 * Haswell has only FDI/PCH transcoder A. It is which is connected to 11086 * DDI E. So just check whether this pipe is wired to DDI E and whether 11087 * the PCH transcoder is on. 11088 */ 11089 if (INTEL_GEN(dev_priv) < 9 && 11090 (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) { 11091 pipe_config->has_pch_encoder = true; 11092 11093 tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); 11094 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 11095 FDI_DP_PORT_WIDTH_SHIFT) + 1; 11096 11097 ilk_get_fdi_m_n_config(crtc, pipe_config); 11098 } 11099 } 11100 11101 static bool hsw_get_pipe_config(struct intel_crtc *crtc, 11102 struct intel_crtc_state *pipe_config) 11103 { 11104 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11105 intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf; 11106 enum intel_display_power_domain power_domain; 11107 u64 power_domain_mask; 11108 bool active; 11109 u32 tmp; 11110 11111 pipe_config->master_transcoder = INVALID_TRANSCODER; 11112 11113 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 11114 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 11115 if (!wf) 11116 return false; 11117 11118 wakerefs[power_domain] = wf; 11119 power_domain_mask = BIT_ULL(power_domain); 11120 11121 pipe_config->shared_dpll = NULL; 11122 11123 active = hsw_get_transcoder_state(crtc, pipe_config, 11124 &power_domain_mask, wakerefs); 11125 11126 if (IS_GEN9_LP(dev_priv) && 11127 bxt_get_dsi_transcoder_state(crtc, pipe_config, 11128 &power_domain_mask, wakerefs)) { 11129 drm_WARN_ON(&dev_priv->drm, active); 11130 active = true; 11131 } 11132 11133 if (!active) 11134 goto out; 11135 11136 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 11137 INTEL_GEN(dev_priv) >= 11) { 11138 hsw_get_ddi_port_state(crtc, pipe_config); 11139 intel_get_pipe_timings(crtc, pipe_config); 11140 } 11141 11142 intel_get_pipe_src_size(crtc, pipe_config); 11143 11144 if (IS_HASWELL(dev_priv)) { 11145 u32 tmp = intel_de_read(dev_priv, 11146 PIPECONF(pipe_config->cpu_transcoder)); 11147 11148 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW) 11149 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 11150 else 11151 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 11152 } else { 11153 pipe_config->output_format = 11154 bdw_get_pipemisc_output_format(crtc); 11155 11156 /* 11157 * Currently there is no interface defined to 11158 * check user preference between RGB/YCBCR444 11159 * or YCBCR420. So the only possible case for 11160 * YCBCR444 usage is driving YCBCR420 output 11161 * with LSPCON, when pipe is configured for 11162 * YCBCR444 output and LSPCON takes care of 11163 * downsampling it. 11164 */ 11165 pipe_config->lspcon_downsampling = 11166 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444; 11167 } 11168 11169 pipe_config->gamma_mode = intel_de_read(dev_priv, 11170 GAMMA_MODE(crtc->pipe)); 11171 11172 pipe_config->csc_mode = intel_de_read(dev_priv, 11173 PIPE_CSC_MODE(crtc->pipe)); 11174 11175 if (INTEL_GEN(dev_priv) >= 9) { 11176 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe)); 11177 11178 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) 11179 pipe_config->gamma_enable = true; 11180 11181 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE) 11182 pipe_config->csc_enable = true; 11183 } else { 11184 i9xx_get_pipe_color_config(pipe_config); 11185 } 11186 11187 intel_color_get_config(pipe_config); 11188 11189 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe)); 11190 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp); 11191 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 11192 pipe_config->ips_linetime = 11193 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp); 11194 11195 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 11196 drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain)); 11197 11198 wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 11199 if (wf) { 11200 wakerefs[power_domain] = wf; 11201 power_domain_mask |= BIT_ULL(power_domain); 11202 11203 if (INTEL_GEN(dev_priv) >= 9) 11204 skl_get_pfit_config(pipe_config); 11205 else 11206 ilk_get_pfit_config(pipe_config); 11207 } 11208 11209 if (hsw_crtc_supports_ips(crtc)) { 11210 if (IS_HASWELL(dev_priv)) 11211 pipe_config->ips_enabled = intel_de_read(dev_priv, 11212 IPS_CTL) & IPS_ENABLE; 11213 else { 11214 /* 11215 * We cannot readout IPS state on broadwell, set to 11216 * true so we can set it to a defined state on first 11217 * commit. 11218 */ 11219 pipe_config->ips_enabled = true; 11220 } 11221 } 11222 11223 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 11224 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 11225 pipe_config->pixel_multiplier = 11226 intel_de_read(dev_priv, 11227 PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 11228 } else { 11229 pipe_config->pixel_multiplier = 1; 11230 } 11231 11232 out: 11233 for_each_power_domain(power_domain, power_domain_mask) 11234 intel_display_power_put(dev_priv, 11235 power_domain, wakerefs[power_domain]); 11236 11237 return active; 11238 } 11239 11240 static u32 intel_cursor_base(const struct intel_plane_state *plane_state) 11241 { 11242 struct drm_i915_private *dev_priv = 11243 to_i915(plane_state->uapi.plane->dev); 11244 const struct drm_framebuffer *fb = plane_state->hw.fb; 11245 const struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11246 u32 base; 11247 11248 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) 11249 base = sg_dma_address(obj->mm.pages->sgl); 11250 else 11251 base = intel_plane_ggtt_offset(plane_state); 11252 11253 return base + plane_state->color_plane[0].offset; 11254 } 11255 11256 static u32 intel_cursor_position(const struct intel_plane_state *plane_state) 11257 { 11258 int x = plane_state->uapi.dst.x1; 11259 int y = plane_state->uapi.dst.y1; 11260 u32 pos = 0; 11261 11262 if (x < 0) { 11263 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 11264 x = -x; 11265 } 11266 pos |= x << CURSOR_X_SHIFT; 11267 11268 if (y < 0) { 11269 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 11270 y = -y; 11271 } 11272 pos |= y << CURSOR_Y_SHIFT; 11273 11274 return pos; 11275 } 11276 11277 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) 11278 { 11279 const struct drm_mode_config *config = 11280 &plane_state->uapi.plane->dev->mode_config; 11281 int width = drm_rect_width(&plane_state->uapi.dst); 11282 int height = drm_rect_height(&plane_state->uapi.dst); 11283 11284 return width > 0 && width <= config->cursor_width && 11285 height > 0 && height <= config->cursor_height; 11286 } 11287 11288 static int intel_cursor_check_surface(struct intel_plane_state *plane_state) 11289 { 11290 struct drm_i915_private *dev_priv = 11291 to_i915(plane_state->uapi.plane->dev); 11292 unsigned int rotation = plane_state->hw.rotation; 11293 int src_x, src_y; 11294 u32 offset; 11295 int ret; 11296 11297 ret = intel_plane_compute_gtt(plane_state); 11298 if (ret) 11299 return ret; 11300 11301 if (!plane_state->uapi.visible) 11302 return 0; 11303 11304 src_x = plane_state->uapi.src.x1 >> 16; 11305 src_y = plane_state->uapi.src.y1 >> 16; 11306 11307 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 11308 offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 11309 plane_state, 0); 11310 11311 if (src_x != 0 || src_y != 0) { 11312 drm_dbg_kms(&dev_priv->drm, 11313 "Arbitrary cursor panning not supported\n"); 11314 return -EINVAL; 11315 } 11316 11317 /* 11318 * Put the final coordinates back so that the src 11319 * coordinate checks will see the right values. 11320 */ 11321 drm_rect_translate_to(&plane_state->uapi.src, 11322 src_x << 16, src_y << 16); 11323 11324 /* ILK+ do this automagically in hardware */ 11325 if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) { 11326 const struct drm_framebuffer *fb = plane_state->hw.fb; 11327 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 11328 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 11329 11330 offset += (src_h * src_w - 1) * fb->format->cpp[0]; 11331 } 11332 11333 plane_state->color_plane[0].offset = offset; 11334 plane_state->color_plane[0].x = src_x; 11335 plane_state->color_plane[0].y = src_y; 11336 11337 return 0; 11338 } 11339 11340 static int intel_check_cursor(struct intel_crtc_state *crtc_state, 11341 struct intel_plane_state *plane_state) 11342 { 11343 const struct drm_framebuffer *fb = plane_state->hw.fb; 11344 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 11345 int ret; 11346 11347 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) { 11348 drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n"); 11349 return -EINVAL; 11350 } 11351 11352 ret = drm_atomic_helper_check_plane_state(&plane_state->uapi, 11353 &crtc_state->uapi, 11354 DRM_PLANE_HELPER_NO_SCALING, 11355 DRM_PLANE_HELPER_NO_SCALING, 11356 true, true); 11357 if (ret) 11358 return ret; 11359 11360 /* Use the unclipped src/dst rectangles, which we program to hw */ 11361 plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi); 11362 plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi); 11363 11364 ret = intel_cursor_check_surface(plane_state); 11365 if (ret) 11366 return ret; 11367 11368 if (!plane_state->uapi.visible) 11369 return 0; 11370 11371 ret = intel_plane_check_src_coordinates(plane_state); 11372 if (ret) 11373 return ret; 11374 11375 return 0; 11376 } 11377 11378 static unsigned int 11379 i845_cursor_max_stride(struct intel_plane *plane, 11380 u32 pixel_format, u64 modifier, 11381 unsigned int rotation) 11382 { 11383 return 2048; 11384 } 11385 11386 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 11387 { 11388 u32 cntl = 0; 11389 11390 if (crtc_state->gamma_enable) 11391 cntl |= CURSOR_GAMMA_ENABLE; 11392 11393 return cntl; 11394 } 11395 11396 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, 11397 const struct intel_plane_state *plane_state) 11398 { 11399 return CURSOR_ENABLE | 11400 CURSOR_FORMAT_ARGB | 11401 CURSOR_STRIDE(plane_state->color_plane[0].stride); 11402 } 11403 11404 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) 11405 { 11406 int width = drm_rect_width(&plane_state->uapi.dst); 11407 11408 /* 11409 * 845g/865g are only limited by the width of their cursors, 11410 * the height is arbitrary up to the precision of the register. 11411 */ 11412 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64); 11413 } 11414 11415 static int i845_check_cursor(struct intel_crtc_state *crtc_state, 11416 struct intel_plane_state *plane_state) 11417 { 11418 const struct drm_framebuffer *fb = plane_state->hw.fb; 11419 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 11420 int ret; 11421 11422 ret = intel_check_cursor(crtc_state, plane_state); 11423 if (ret) 11424 return ret; 11425 11426 /* if we want to turn off the cursor ignore width and height */ 11427 if (!fb) 11428 return 0; 11429 11430 /* Check for which cursor types we support */ 11431 if (!i845_cursor_size_ok(plane_state)) { 11432 drm_dbg_kms(&i915->drm, 11433 "Cursor dimension %dx%d not supported\n", 11434 drm_rect_width(&plane_state->uapi.dst), 11435 drm_rect_height(&plane_state->uapi.dst)); 11436 return -EINVAL; 11437 } 11438 11439 drm_WARN_ON(&i915->drm, plane_state->uapi.visible && 11440 plane_state->color_plane[0].stride != fb->pitches[0]); 11441 11442 switch (fb->pitches[0]) { 11443 case 256: 11444 case 512: 11445 case 1024: 11446 case 2048: 11447 break; 11448 default: 11449 drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n", 11450 fb->pitches[0]); 11451 return -EINVAL; 11452 } 11453 11454 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state); 11455 11456 return 0; 11457 } 11458 11459 static void i845_update_cursor(struct intel_plane *plane, 11460 const struct intel_crtc_state *crtc_state, 11461 const struct intel_plane_state *plane_state) 11462 { 11463 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11464 u32 cntl = 0, base = 0, pos = 0, size = 0; 11465 unsigned long irqflags; 11466 11467 if (plane_state && plane_state->uapi.visible) { 11468 unsigned int width = drm_rect_width(&plane_state->uapi.dst); 11469 unsigned int height = drm_rect_height(&plane_state->uapi.dst); 11470 11471 cntl = plane_state->ctl | 11472 i845_cursor_ctl_crtc(crtc_state); 11473 11474 size = (height << 12) | width; 11475 11476 base = intel_cursor_base(plane_state); 11477 pos = intel_cursor_position(plane_state); 11478 } 11479 11480 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 11481 11482 /* On these chipsets we can only modify the base/size/stride 11483 * whilst the cursor is disabled. 11484 */ 11485 if (plane->cursor.base != base || 11486 plane->cursor.size != size || 11487 plane->cursor.cntl != cntl) { 11488 intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0); 11489 intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base); 11490 intel_de_write_fw(dev_priv, CURSIZE, size); 11491 intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos); 11492 intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl); 11493 11494 plane->cursor.base = base; 11495 plane->cursor.size = size; 11496 plane->cursor.cntl = cntl; 11497 } else { 11498 intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos); 11499 } 11500 11501 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 11502 } 11503 11504 static void i845_disable_cursor(struct intel_plane *plane, 11505 const struct intel_crtc_state *crtc_state) 11506 { 11507 i845_update_cursor(plane, crtc_state, NULL); 11508 } 11509 11510 static bool i845_cursor_get_hw_state(struct intel_plane *plane, 11511 enum pipe *pipe) 11512 { 11513 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11514 enum intel_display_power_domain power_domain; 11515 intel_wakeref_t wakeref; 11516 bool ret; 11517 11518 power_domain = POWER_DOMAIN_PIPE(PIPE_A); 11519 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 11520 if (!wakeref) 11521 return false; 11522 11523 ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE; 11524 11525 *pipe = PIPE_A; 11526 11527 intel_display_power_put(dev_priv, power_domain, wakeref); 11528 11529 return ret; 11530 } 11531 11532 static unsigned int 11533 i9xx_cursor_max_stride(struct intel_plane *plane, 11534 u32 pixel_format, u64 modifier, 11535 unsigned int rotation) 11536 { 11537 return plane->base.dev->mode_config.cursor_width * 4; 11538 } 11539 11540 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 11541 { 11542 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 11543 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11544 u32 cntl = 0; 11545 11546 if (INTEL_GEN(dev_priv) >= 11) 11547 return cntl; 11548 11549 if (crtc_state->gamma_enable) 11550 cntl = MCURSOR_GAMMA_ENABLE; 11551 11552 if (crtc_state->csc_enable) 11553 cntl |= MCURSOR_PIPE_CSC_ENABLE; 11554 11555 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 11556 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); 11557 11558 return cntl; 11559 } 11560 11561 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, 11562 const struct intel_plane_state *plane_state) 11563 { 11564 struct drm_i915_private *dev_priv = 11565 to_i915(plane_state->uapi.plane->dev); 11566 u32 cntl = 0; 11567 11568 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 11569 cntl |= MCURSOR_TRICKLE_FEED_DISABLE; 11570 11571 switch (drm_rect_width(&plane_state->uapi.dst)) { 11572 case 64: 11573 cntl |= MCURSOR_MODE_64_ARGB_AX; 11574 break; 11575 case 128: 11576 cntl |= MCURSOR_MODE_128_ARGB_AX; 11577 break; 11578 case 256: 11579 cntl |= MCURSOR_MODE_256_ARGB_AX; 11580 break; 11581 default: 11582 MISSING_CASE(drm_rect_width(&plane_state->uapi.dst)); 11583 return 0; 11584 } 11585 11586 if (plane_state->hw.rotation & DRM_MODE_ROTATE_180) 11587 cntl |= MCURSOR_ROTATE_180; 11588 11589 return cntl; 11590 } 11591 11592 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) 11593 { 11594 struct drm_i915_private *dev_priv = 11595 to_i915(plane_state->uapi.plane->dev); 11596 int width = drm_rect_width(&plane_state->uapi.dst); 11597 int height = drm_rect_height(&plane_state->uapi.dst); 11598 11599 if (!intel_cursor_size_ok(plane_state)) 11600 return false; 11601 11602 /* Cursor width is limited to a few power-of-two sizes */ 11603 switch (width) { 11604 case 256: 11605 case 128: 11606 case 64: 11607 break; 11608 default: 11609 return false; 11610 } 11611 11612 /* 11613 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor 11614 * height from 8 lines up to the cursor width, when the 11615 * cursor is not rotated. Everything else requires square 11616 * cursors. 11617 */ 11618 if (HAS_CUR_FBC(dev_priv) && 11619 plane_state->hw.rotation & DRM_MODE_ROTATE_0) { 11620 if (height < 8 || height > width) 11621 return false; 11622 } else { 11623 if (height != width) 11624 return false; 11625 } 11626 11627 return true; 11628 } 11629 11630 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, 11631 struct intel_plane_state *plane_state) 11632 { 11633 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 11634 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11635 const struct drm_framebuffer *fb = plane_state->hw.fb; 11636 enum pipe pipe = plane->pipe; 11637 int ret; 11638 11639 ret = intel_check_cursor(crtc_state, plane_state); 11640 if (ret) 11641 return ret; 11642 11643 /* if we want to turn off the cursor ignore width and height */ 11644 if (!fb) 11645 return 0; 11646 11647 /* Check for which cursor types we support */ 11648 if (!i9xx_cursor_size_ok(plane_state)) { 11649 drm_dbg(&dev_priv->drm, 11650 "Cursor dimension %dx%d not supported\n", 11651 drm_rect_width(&plane_state->uapi.dst), 11652 drm_rect_height(&plane_state->uapi.dst)); 11653 return -EINVAL; 11654 } 11655 11656 drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible && 11657 plane_state->color_plane[0].stride != fb->pitches[0]); 11658 11659 if (fb->pitches[0] != 11660 drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) { 11661 drm_dbg_kms(&dev_priv->drm, 11662 "Invalid cursor stride (%u) (cursor width %d)\n", 11663 fb->pitches[0], 11664 drm_rect_width(&plane_state->uapi.dst)); 11665 return -EINVAL; 11666 } 11667 11668 /* 11669 * There's something wrong with the cursor on CHV pipe C. 11670 * If it straddles the left edge of the screen then 11671 * moving it away from the edge or disabling it often 11672 * results in a pipe underrun, and often that can lead to 11673 * dead pipe (constant underrun reported, and it scans 11674 * out just a solid color). To recover from that, the 11675 * display power well must be turned off and on again. 11676 * Refuse the put the cursor into that compromised position. 11677 */ 11678 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && 11679 plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) { 11680 drm_dbg_kms(&dev_priv->drm, 11681 "CHV cursor C not allowed to straddle the left screen edge\n"); 11682 return -EINVAL; 11683 } 11684 11685 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state); 11686 11687 return 0; 11688 } 11689 11690 static void i9xx_update_cursor(struct intel_plane *plane, 11691 const struct intel_crtc_state *crtc_state, 11692 const struct intel_plane_state *plane_state) 11693 { 11694 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11695 enum pipe pipe = plane->pipe; 11696 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0; 11697 unsigned long irqflags; 11698 11699 if (plane_state && plane_state->uapi.visible) { 11700 unsigned width = drm_rect_width(&plane_state->uapi.dst); 11701 unsigned height = drm_rect_height(&plane_state->uapi.dst); 11702 11703 cntl = plane_state->ctl | 11704 i9xx_cursor_ctl_crtc(crtc_state); 11705 11706 if (width != height) 11707 fbc_ctl = CUR_FBC_CTL_EN | (height - 1); 11708 11709 base = intel_cursor_base(plane_state); 11710 pos = intel_cursor_position(plane_state); 11711 } 11712 11713 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 11714 11715 /* 11716 * On some platforms writing CURCNTR first will also 11717 * cause CURPOS to be armed by the CURBASE write. 11718 * Without the CURCNTR write the CURPOS write would 11719 * arm itself. Thus we always update CURCNTR before 11720 * CURPOS. 11721 * 11722 * On other platforms CURPOS always requires the 11723 * CURBASE write to arm the update. Additonally 11724 * a write to any of the cursor register will cancel 11725 * an already armed cursor update. Thus leaving out 11726 * the CURBASE write after CURPOS could lead to a 11727 * cursor that doesn't appear to move, or even change 11728 * shape. Thus we always write CURBASE. 11729 * 11730 * The other registers are armed by by the CURBASE write 11731 * except when the plane is getting enabled at which time 11732 * the CURCNTR write arms the update. 11733 */ 11734 11735 if (INTEL_GEN(dev_priv) >= 9) 11736 skl_write_cursor_wm(plane, crtc_state); 11737 11738 if (plane->cursor.base != base || 11739 plane->cursor.size != fbc_ctl || 11740 plane->cursor.cntl != cntl) { 11741 if (HAS_CUR_FBC(dev_priv)) 11742 intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe), 11743 fbc_ctl); 11744 intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl); 11745 intel_de_write_fw(dev_priv, CURPOS(pipe), pos); 11746 intel_de_write_fw(dev_priv, CURBASE(pipe), base); 11747 11748 plane->cursor.base = base; 11749 plane->cursor.size = fbc_ctl; 11750 plane->cursor.cntl = cntl; 11751 } else { 11752 intel_de_write_fw(dev_priv, CURPOS(pipe), pos); 11753 intel_de_write_fw(dev_priv, CURBASE(pipe), base); 11754 } 11755 11756 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 11757 } 11758 11759 static void i9xx_disable_cursor(struct intel_plane *plane, 11760 const struct intel_crtc_state *crtc_state) 11761 { 11762 i9xx_update_cursor(plane, crtc_state, NULL); 11763 } 11764 11765 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, 11766 enum pipe *pipe) 11767 { 11768 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11769 enum intel_display_power_domain power_domain; 11770 intel_wakeref_t wakeref; 11771 bool ret; 11772 u32 val; 11773 11774 /* 11775 * Not 100% correct for planes that can move between pipes, 11776 * but that's only the case for gen2-3 which don't have any 11777 * display power wells. 11778 */ 11779 power_domain = POWER_DOMAIN_PIPE(plane->pipe); 11780 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 11781 if (!wakeref) 11782 return false; 11783 11784 val = intel_de_read(dev_priv, CURCNTR(plane->pipe)); 11785 11786 ret = val & MCURSOR_MODE; 11787 11788 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 11789 *pipe = plane->pipe; 11790 else 11791 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >> 11792 MCURSOR_PIPE_SELECT_SHIFT; 11793 11794 intel_display_power_put(dev_priv, power_domain, wakeref); 11795 11796 return ret; 11797 } 11798 11799 /* VESA 640x480x72Hz mode to set on the pipe */ 11800 static const struct drm_display_mode load_detect_mode = { 11801 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 11802 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 11803 }; 11804 11805 struct drm_framebuffer * 11806 intel_framebuffer_create(struct drm_i915_gem_object *obj, 11807 struct drm_mode_fb_cmd2 *mode_cmd) 11808 { 11809 struct intel_framebuffer *intel_fb; 11810 int ret; 11811 11812 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 11813 if (!intel_fb) 11814 return ERR_PTR(-ENOMEM); 11815 11816 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd); 11817 if (ret) 11818 goto err; 11819 11820 return &intel_fb->base; 11821 11822 err: 11823 kfree(intel_fb); 11824 return ERR_PTR(ret); 11825 } 11826 11827 static int intel_modeset_disable_planes(struct drm_atomic_state *state, 11828 struct drm_crtc *crtc) 11829 { 11830 struct drm_plane *plane; 11831 struct drm_plane_state *plane_state; 11832 int ret, i; 11833 11834 ret = drm_atomic_add_affected_planes(state, crtc); 11835 if (ret) 11836 return ret; 11837 11838 for_each_new_plane_in_state(state, plane, plane_state, i) { 11839 if (plane_state->crtc != crtc) 11840 continue; 11841 11842 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); 11843 if (ret) 11844 return ret; 11845 11846 drm_atomic_set_fb_for_plane(plane_state, NULL); 11847 } 11848 11849 return 0; 11850 } 11851 11852 int intel_get_load_detect_pipe(struct drm_connector *connector, 11853 struct intel_load_detect_pipe *old, 11854 struct drm_modeset_acquire_ctx *ctx) 11855 { 11856 struct intel_crtc *intel_crtc; 11857 struct intel_encoder *intel_encoder = 11858 intel_attached_encoder(to_intel_connector(connector)); 11859 struct drm_crtc *possible_crtc; 11860 struct drm_encoder *encoder = &intel_encoder->base; 11861 struct drm_crtc *crtc = NULL; 11862 struct drm_device *dev = encoder->dev; 11863 struct drm_i915_private *dev_priv = to_i915(dev); 11864 struct drm_mode_config *config = &dev->mode_config; 11865 struct drm_atomic_state *state = NULL, *restore_state = NULL; 11866 struct drm_connector_state *connector_state; 11867 struct intel_crtc_state *crtc_state; 11868 int ret, i = -1; 11869 11870 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 11871 connector->base.id, connector->name, 11872 encoder->base.id, encoder->name); 11873 11874 old->restore_state = NULL; 11875 11876 drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex)); 11877 11878 /* 11879 * Algorithm gets a little messy: 11880 * 11881 * - if the connector already has an assigned crtc, use it (but make 11882 * sure it's on first) 11883 * 11884 * - try to find the first unused crtc that can drive this connector, 11885 * and use that if we find one 11886 */ 11887 11888 /* See if we already have a CRTC for this connector */ 11889 if (connector->state->crtc) { 11890 crtc = connector->state->crtc; 11891 11892 ret = drm_modeset_lock(&crtc->mutex, ctx); 11893 if (ret) 11894 goto fail; 11895 11896 /* Make sure the crtc and connector are running */ 11897 goto found; 11898 } 11899 11900 /* Find an unused one (if possible) */ 11901 for_each_crtc(dev, possible_crtc) { 11902 i++; 11903 if (!(encoder->possible_crtcs & (1 << i))) 11904 continue; 11905 11906 ret = drm_modeset_lock(&possible_crtc->mutex, ctx); 11907 if (ret) 11908 goto fail; 11909 11910 if (possible_crtc->state->enable) { 11911 drm_modeset_unlock(&possible_crtc->mutex); 11912 continue; 11913 } 11914 11915 crtc = possible_crtc; 11916 break; 11917 } 11918 11919 /* 11920 * If we didn't find an unused CRTC, don't use any. 11921 */ 11922 if (!crtc) { 11923 drm_dbg_kms(&dev_priv->drm, 11924 "no pipe available for load-detect\n"); 11925 ret = -ENODEV; 11926 goto fail; 11927 } 11928 11929 found: 11930 intel_crtc = to_intel_crtc(crtc); 11931 11932 state = drm_atomic_state_alloc(dev); 11933 restore_state = drm_atomic_state_alloc(dev); 11934 if (!state || !restore_state) { 11935 ret = -ENOMEM; 11936 goto fail; 11937 } 11938 11939 state->acquire_ctx = ctx; 11940 restore_state->acquire_ctx = ctx; 11941 11942 connector_state = drm_atomic_get_connector_state(state, connector); 11943 if (IS_ERR(connector_state)) { 11944 ret = PTR_ERR(connector_state); 11945 goto fail; 11946 } 11947 11948 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc); 11949 if (ret) 11950 goto fail; 11951 11952 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 11953 if (IS_ERR(crtc_state)) { 11954 ret = PTR_ERR(crtc_state); 11955 goto fail; 11956 } 11957 11958 crtc_state->uapi.active = true; 11959 11960 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi, 11961 &load_detect_mode); 11962 if (ret) 11963 goto fail; 11964 11965 ret = intel_modeset_disable_planes(state, crtc); 11966 if (ret) 11967 goto fail; 11968 11969 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); 11970 if (!ret) 11971 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc)); 11972 if (!ret) 11973 ret = drm_atomic_add_affected_planes(restore_state, crtc); 11974 if (ret) { 11975 drm_dbg_kms(&dev_priv->drm, 11976 "Failed to create a copy of old state to restore: %i\n", 11977 ret); 11978 goto fail; 11979 } 11980 11981 ret = drm_atomic_commit(state); 11982 if (ret) { 11983 drm_dbg_kms(&dev_priv->drm, 11984 "failed to set mode on load-detect pipe\n"); 11985 goto fail; 11986 } 11987 11988 old->restore_state = restore_state; 11989 drm_atomic_state_put(state); 11990 11991 /* let the connector get through one full cycle before testing */ 11992 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 11993 return true; 11994 11995 fail: 11996 if (state) { 11997 drm_atomic_state_put(state); 11998 state = NULL; 11999 } 12000 if (restore_state) { 12001 drm_atomic_state_put(restore_state); 12002 restore_state = NULL; 12003 } 12004 12005 if (ret == -EDEADLK) 12006 return ret; 12007 12008 return false; 12009 } 12010 12011 void intel_release_load_detect_pipe(struct drm_connector *connector, 12012 struct intel_load_detect_pipe *old, 12013 struct drm_modeset_acquire_ctx *ctx) 12014 { 12015 struct intel_encoder *intel_encoder = 12016 intel_attached_encoder(to_intel_connector(connector)); 12017 struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev); 12018 struct drm_encoder *encoder = &intel_encoder->base; 12019 struct drm_atomic_state *state = old->restore_state; 12020 int ret; 12021 12022 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 12023 connector->base.id, connector->name, 12024 encoder->base.id, encoder->name); 12025 12026 if (!state) 12027 return; 12028 12029 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 12030 if (ret) 12031 drm_dbg_kms(&i915->drm, 12032 "Couldn't release load detect pipe: %i\n", ret); 12033 drm_atomic_state_put(state); 12034 } 12035 12036 static int i9xx_pll_refclk(struct drm_device *dev, 12037 const struct intel_crtc_state *pipe_config) 12038 { 12039 struct drm_i915_private *dev_priv = to_i915(dev); 12040 u32 dpll = pipe_config->dpll_hw_state.dpll; 12041 12042 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 12043 return dev_priv->vbt.lvds_ssc_freq; 12044 else if (HAS_PCH_SPLIT(dev_priv)) 12045 return 120000; 12046 else if (!IS_GEN(dev_priv, 2)) 12047 return 96000; 12048 else 12049 return 48000; 12050 } 12051 12052 /* Returns the clock of the currently programmed mode of the given pipe. */ 12053 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 12054 struct intel_crtc_state *pipe_config) 12055 { 12056 struct drm_device *dev = crtc->base.dev; 12057 struct drm_i915_private *dev_priv = to_i915(dev); 12058 enum pipe pipe = crtc->pipe; 12059 u32 dpll = pipe_config->dpll_hw_state.dpll; 12060 u32 fp; 12061 struct dpll clock; 12062 int port_clock; 12063 int refclk = i9xx_pll_refclk(dev, pipe_config); 12064 12065 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 12066 fp = pipe_config->dpll_hw_state.fp0; 12067 else 12068 fp = pipe_config->dpll_hw_state.fp1; 12069 12070 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 12071 if (IS_PINEVIEW(dev_priv)) { 12072 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 12073 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 12074 } else { 12075 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 12076 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 12077 } 12078 12079 if (!IS_GEN(dev_priv, 2)) { 12080 if (IS_PINEVIEW(dev_priv)) 12081 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 12082 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 12083 else 12084 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 12085 DPLL_FPA01_P1_POST_DIV_SHIFT); 12086 12087 switch (dpll & DPLL_MODE_MASK) { 12088 case DPLLB_MODE_DAC_SERIAL: 12089 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 12090 5 : 10; 12091 break; 12092 case DPLLB_MODE_LVDS: 12093 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 12094 7 : 14; 12095 break; 12096 default: 12097 drm_dbg_kms(&dev_priv->drm, 12098 "Unknown DPLL mode %08x in programmed " 12099 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 12100 return; 12101 } 12102 12103 if (IS_PINEVIEW(dev_priv)) 12104 port_clock = pnv_calc_dpll_params(refclk, &clock); 12105 else 12106 port_clock = i9xx_calc_dpll_params(refclk, &clock); 12107 } else { 12108 u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv, 12109 LVDS); 12110 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 12111 12112 if (is_lvds) { 12113 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 12114 DPLL_FPA01_P1_POST_DIV_SHIFT); 12115 12116 if (lvds & LVDS_CLKB_POWER_UP) 12117 clock.p2 = 7; 12118 else 12119 clock.p2 = 14; 12120 } else { 12121 if (dpll & PLL_P1_DIVIDE_BY_TWO) 12122 clock.p1 = 2; 12123 else { 12124 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 12125 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 12126 } 12127 if (dpll & PLL_P2_DIVIDE_BY_4) 12128 clock.p2 = 4; 12129 else 12130 clock.p2 = 2; 12131 } 12132 12133 port_clock = i9xx_calc_dpll_params(refclk, &clock); 12134 } 12135 12136 /* 12137 * This value includes pixel_multiplier. We will use 12138 * port_clock to compute adjusted_mode.crtc_clock in the 12139 * encoder's get_config() function. 12140 */ 12141 pipe_config->port_clock = port_clock; 12142 } 12143 12144 int intel_dotclock_calculate(int link_freq, 12145 const struct intel_link_m_n *m_n) 12146 { 12147 /* 12148 * The calculation for the data clock is: 12149 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 12150 * But we want to avoid losing precison if possible, so: 12151 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 12152 * 12153 * and the link clock is simpler: 12154 * link_clock = (m * link_clock) / n 12155 */ 12156 12157 if (!m_n->link_n) 12158 return 0; 12159 12160 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); 12161 } 12162 12163 static void ilk_pch_clock_get(struct intel_crtc *crtc, 12164 struct intel_crtc_state *pipe_config) 12165 { 12166 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12167 12168 /* read out port_clock from the DPLL */ 12169 i9xx_crtc_clock_get(crtc, pipe_config); 12170 12171 /* 12172 * In case there is an active pipe without active ports, 12173 * we may need some idea for the dotclock anyway. 12174 * Calculate one based on the FDI configuration. 12175 */ 12176 pipe_config->hw.adjusted_mode.crtc_clock = 12177 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 12178 &pipe_config->fdi_m_n); 12179 } 12180 12181 static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state, 12182 struct intel_crtc *crtc) 12183 { 12184 memset(crtc_state, 0, sizeof(*crtc_state)); 12185 12186 __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base); 12187 12188 crtc_state->cpu_transcoder = INVALID_TRANSCODER; 12189 crtc_state->master_transcoder = INVALID_TRANSCODER; 12190 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 12191 crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID; 12192 crtc_state->scaler_state.scaler_id = -1; 12193 crtc_state->mst_master_transcoder = INVALID_TRANSCODER; 12194 } 12195 12196 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc) 12197 { 12198 struct intel_crtc_state *crtc_state; 12199 12200 crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL); 12201 12202 if (crtc_state) 12203 intel_crtc_state_reset(crtc_state, crtc); 12204 12205 return crtc_state; 12206 } 12207 12208 /* Returns the currently programmed mode of the given encoder. */ 12209 struct drm_display_mode * 12210 intel_encoder_current_mode(struct intel_encoder *encoder) 12211 { 12212 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 12213 struct intel_crtc_state *crtc_state; 12214 struct drm_display_mode *mode; 12215 struct intel_crtc *crtc; 12216 enum pipe pipe; 12217 12218 if (!encoder->get_hw_state(encoder, &pipe)) 12219 return NULL; 12220 12221 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 12222 12223 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 12224 if (!mode) 12225 return NULL; 12226 12227 crtc_state = intel_crtc_state_alloc(crtc); 12228 if (!crtc_state) { 12229 kfree(mode); 12230 return NULL; 12231 } 12232 12233 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) { 12234 kfree(crtc_state); 12235 kfree(mode); 12236 return NULL; 12237 } 12238 12239 encoder->get_config(encoder, crtc_state); 12240 12241 intel_mode_from_pipe_config(mode, crtc_state); 12242 12243 kfree(crtc_state); 12244 12245 return mode; 12246 } 12247 12248 static void intel_crtc_destroy(struct drm_crtc *crtc) 12249 { 12250 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12251 12252 drm_crtc_cleanup(crtc); 12253 kfree(intel_crtc); 12254 } 12255 12256 /** 12257 * intel_wm_need_update - Check whether watermarks need updating 12258 * @cur: current plane state 12259 * @new: new plane state 12260 * 12261 * Check current plane state versus the new one to determine whether 12262 * watermarks need to be recalculated. 12263 * 12264 * Returns true or false. 12265 */ 12266 static bool intel_wm_need_update(const struct intel_plane_state *cur, 12267 struct intel_plane_state *new) 12268 { 12269 /* Update watermarks on tiling or size changes. */ 12270 if (new->uapi.visible != cur->uapi.visible) 12271 return true; 12272 12273 if (!cur->hw.fb || !new->hw.fb) 12274 return false; 12275 12276 if (cur->hw.fb->modifier != new->hw.fb->modifier || 12277 cur->hw.rotation != new->hw.rotation || 12278 drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) || 12279 drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) || 12280 drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) || 12281 drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst)) 12282 return true; 12283 12284 return false; 12285 } 12286 12287 static bool needs_scaling(const struct intel_plane_state *state) 12288 { 12289 int src_w = drm_rect_width(&state->uapi.src) >> 16; 12290 int src_h = drm_rect_height(&state->uapi.src) >> 16; 12291 int dst_w = drm_rect_width(&state->uapi.dst); 12292 int dst_h = drm_rect_height(&state->uapi.dst); 12293 12294 return (src_w != dst_w || src_h != dst_h); 12295 } 12296 12297 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, 12298 struct intel_crtc_state *crtc_state, 12299 const struct intel_plane_state *old_plane_state, 12300 struct intel_plane_state *plane_state) 12301 { 12302 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 12303 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 12304 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12305 bool mode_changed = needs_modeset(crtc_state); 12306 bool was_crtc_enabled = old_crtc_state->hw.active; 12307 bool is_crtc_enabled = crtc_state->hw.active; 12308 bool turn_off, turn_on, visible, was_visible; 12309 int ret; 12310 12311 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { 12312 ret = skl_update_scaler_plane(crtc_state, plane_state); 12313 if (ret) 12314 return ret; 12315 } 12316 12317 was_visible = old_plane_state->uapi.visible; 12318 visible = plane_state->uapi.visible; 12319 12320 if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible)) 12321 was_visible = false; 12322 12323 /* 12324 * Visibility is calculated as if the crtc was on, but 12325 * after scaler setup everything depends on it being off 12326 * when the crtc isn't active. 12327 * 12328 * FIXME this is wrong for watermarks. Watermarks should also 12329 * be computed as if the pipe would be active. Perhaps move 12330 * per-plane wm computation to the .check_plane() hook, and 12331 * only combine the results from all planes in the current place? 12332 */ 12333 if (!is_crtc_enabled) { 12334 intel_plane_set_invisible(crtc_state, plane_state); 12335 visible = false; 12336 } 12337 12338 if (!was_visible && !visible) 12339 return 0; 12340 12341 turn_off = was_visible && (!visible || mode_changed); 12342 turn_on = visible && (!was_visible || mode_changed); 12343 12344 drm_dbg_atomic(&dev_priv->drm, 12345 "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", 12346 crtc->base.base.id, crtc->base.name, 12347 plane->base.base.id, plane->base.name, 12348 was_visible, visible, 12349 turn_off, turn_on, mode_changed); 12350 12351 if (turn_on) { 12352 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 12353 crtc_state->update_wm_pre = true; 12354 12355 /* must disable cxsr around plane enable/disable */ 12356 if (plane->id != PLANE_CURSOR) 12357 crtc_state->disable_cxsr = true; 12358 } else if (turn_off) { 12359 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 12360 crtc_state->update_wm_post = true; 12361 12362 /* must disable cxsr around plane enable/disable */ 12363 if (plane->id != PLANE_CURSOR) 12364 crtc_state->disable_cxsr = true; 12365 } else if (intel_wm_need_update(old_plane_state, plane_state)) { 12366 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) { 12367 /* FIXME bollocks */ 12368 crtc_state->update_wm_pre = true; 12369 crtc_state->update_wm_post = true; 12370 } 12371 } 12372 12373 if (visible || was_visible) 12374 crtc_state->fb_bits |= plane->frontbuffer_bit; 12375 12376 /* 12377 * ILK/SNB DVSACNTR/Sprite Enable 12378 * IVB SPR_CTL/Sprite Enable 12379 * "When in Self Refresh Big FIFO mode, a write to enable the 12380 * plane will be internally buffered and delayed while Big FIFO 12381 * mode is exiting." 12382 * 12383 * Which means that enabling the sprite can take an extra frame 12384 * when we start in big FIFO mode (LP1+). Thus we need to drop 12385 * down to LP0 and wait for vblank in order to make sure the 12386 * sprite gets enabled on the next vblank after the register write. 12387 * Doing otherwise would risk enabling the sprite one frame after 12388 * we've already signalled flip completion. We can resume LP1+ 12389 * once the sprite has been enabled. 12390 * 12391 * 12392 * WaCxSRDisabledForSpriteScaling:ivb 12393 * IVB SPR_SCALE/Scaling Enable 12394 * "Low Power watermarks must be disabled for at least one 12395 * frame before enabling sprite scaling, and kept disabled 12396 * until sprite scaling is disabled." 12397 * 12398 * ILK/SNB DVSASCALE/Scaling Enable 12399 * "When in Self Refresh Big FIFO mode, scaling enable will be 12400 * masked off while Big FIFO mode is exiting." 12401 * 12402 * Despite the w/a only being listed for IVB we assume that 12403 * the ILK/SNB note has similar ramifications, hence we apply 12404 * the w/a on all three platforms. 12405 * 12406 * With experimental results seems this is needed also for primary 12407 * plane, not only sprite plane. 12408 */ 12409 if (plane->id != PLANE_CURSOR && 12410 (IS_GEN_RANGE(dev_priv, 5, 6) || 12411 IS_IVYBRIDGE(dev_priv)) && 12412 (turn_on || (!needs_scaling(old_plane_state) && 12413 needs_scaling(plane_state)))) 12414 crtc_state->disable_lp_wm = true; 12415 12416 return 0; 12417 } 12418 12419 static bool encoders_cloneable(const struct intel_encoder *a, 12420 const struct intel_encoder *b) 12421 { 12422 /* masks could be asymmetric, so check both ways */ 12423 return a == b || (a->cloneable & (1 << b->type) && 12424 b->cloneable & (1 << a->type)); 12425 } 12426 12427 static bool check_single_encoder_cloning(struct drm_atomic_state *state, 12428 struct intel_crtc *crtc, 12429 struct intel_encoder *encoder) 12430 { 12431 struct intel_encoder *source_encoder; 12432 struct drm_connector *connector; 12433 struct drm_connector_state *connector_state; 12434 int i; 12435 12436 for_each_new_connector_in_state(state, connector, connector_state, i) { 12437 if (connector_state->crtc != &crtc->base) 12438 continue; 12439 12440 source_encoder = 12441 to_intel_encoder(connector_state->best_encoder); 12442 if (!encoders_cloneable(encoder, source_encoder)) 12443 return false; 12444 } 12445 12446 return true; 12447 } 12448 12449 static int icl_add_linked_planes(struct intel_atomic_state *state) 12450 { 12451 struct intel_plane *plane, *linked; 12452 struct intel_plane_state *plane_state, *linked_plane_state; 12453 int i; 12454 12455 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12456 linked = plane_state->planar_linked_plane; 12457 12458 if (!linked) 12459 continue; 12460 12461 linked_plane_state = intel_atomic_get_plane_state(state, linked); 12462 if (IS_ERR(linked_plane_state)) 12463 return PTR_ERR(linked_plane_state); 12464 12465 drm_WARN_ON(state->base.dev, 12466 linked_plane_state->planar_linked_plane != plane); 12467 drm_WARN_ON(state->base.dev, 12468 linked_plane_state->planar_slave == plane_state->planar_slave); 12469 } 12470 12471 return 0; 12472 } 12473 12474 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) 12475 { 12476 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 12477 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12478 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); 12479 struct intel_plane *plane, *linked; 12480 struct intel_plane_state *plane_state; 12481 int i; 12482 12483 if (INTEL_GEN(dev_priv) < 11) 12484 return 0; 12485 12486 /* 12487 * Destroy all old plane links and make the slave plane invisible 12488 * in the crtc_state->active_planes mask. 12489 */ 12490 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12491 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane) 12492 continue; 12493 12494 plane_state->planar_linked_plane = NULL; 12495 if (plane_state->planar_slave && !plane_state->uapi.visible) { 12496 crtc_state->active_planes &= ~BIT(plane->id); 12497 crtc_state->update_planes |= BIT(plane->id); 12498 } 12499 12500 plane_state->planar_slave = false; 12501 } 12502 12503 if (!crtc_state->nv12_planes) 12504 return 0; 12505 12506 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 12507 struct intel_plane_state *linked_state = NULL; 12508 12509 if (plane->pipe != crtc->pipe || 12510 !(crtc_state->nv12_planes & BIT(plane->id))) 12511 continue; 12512 12513 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { 12514 if (!icl_is_nv12_y_plane(linked->id)) 12515 continue; 12516 12517 if (crtc_state->active_planes & BIT(linked->id)) 12518 continue; 12519 12520 linked_state = intel_atomic_get_plane_state(state, linked); 12521 if (IS_ERR(linked_state)) 12522 return PTR_ERR(linked_state); 12523 12524 break; 12525 } 12526 12527 if (!linked_state) { 12528 drm_dbg_kms(&dev_priv->drm, 12529 "Need %d free Y planes for planar YUV\n", 12530 hweight8(crtc_state->nv12_planes)); 12531 12532 return -EINVAL; 12533 } 12534 12535 plane_state->planar_linked_plane = linked; 12536 12537 linked_state->planar_slave = true; 12538 linked_state->planar_linked_plane = plane; 12539 crtc_state->active_planes |= BIT(linked->id); 12540 crtc_state->update_planes |= BIT(linked->id); 12541 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n", 12542 linked->base.name, plane->base.name); 12543 12544 /* Copy parameters to slave plane */ 12545 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; 12546 linked_state->color_ctl = plane_state->color_ctl; 12547 linked_state->view = plane_state->view; 12548 memcpy(linked_state->color_plane, plane_state->color_plane, 12549 sizeof(linked_state->color_plane)); 12550 12551 intel_plane_copy_uapi_to_hw_state(linked_state, plane_state); 12552 linked_state->uapi.src = plane_state->uapi.src; 12553 linked_state->uapi.dst = plane_state->uapi.dst; 12554 12555 if (icl_is_hdr_plane(dev_priv, plane->id)) { 12556 if (linked->id == PLANE_SPRITE5) 12557 plane_state->cus_ctl |= PLANE_CUS_PLANE_7; 12558 else if (linked->id == PLANE_SPRITE4) 12559 plane_state->cus_ctl |= PLANE_CUS_PLANE_6; 12560 else 12561 MISSING_CASE(linked->id); 12562 } 12563 } 12564 12565 return 0; 12566 } 12567 12568 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) 12569 { 12570 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 12571 struct intel_atomic_state *state = 12572 to_intel_atomic_state(new_crtc_state->uapi.state); 12573 const struct intel_crtc_state *old_crtc_state = 12574 intel_atomic_get_old_crtc_state(state, crtc); 12575 12576 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; 12577 } 12578 12579 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state) 12580 { 12581 const struct drm_display_mode *adjusted_mode = 12582 &crtc_state->hw.adjusted_mode; 12583 12584 if (!crtc_state->hw.enable) 12585 return 0; 12586 12587 return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 12588 adjusted_mode->crtc_clock); 12589 } 12590 12591 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, 12592 const struct intel_cdclk_state *cdclk_state) 12593 { 12594 const struct drm_display_mode *adjusted_mode = 12595 &crtc_state->hw.adjusted_mode; 12596 12597 if (!crtc_state->hw.enable) 12598 return 0; 12599 12600 return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 12601 cdclk_state->logical.cdclk); 12602 } 12603 12604 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) 12605 { 12606 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 12607 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12608 const struct drm_display_mode *adjusted_mode = 12609 &crtc_state->hw.adjusted_mode; 12610 u16 linetime_wm; 12611 12612 if (!crtc_state->hw.enable) 12613 return 0; 12614 12615 linetime_wm = DIV_ROUND_UP(adjusted_mode->crtc_htotal * 1000 * 8, 12616 crtc_state->pixel_rate); 12617 12618 /* Display WA #1135: BXT:ALL GLK:ALL */ 12619 if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled) 12620 linetime_wm /= 2; 12621 12622 return linetime_wm; 12623 } 12624 12625 static int hsw_compute_linetime_wm(struct intel_atomic_state *state, 12626 struct intel_crtc *crtc) 12627 { 12628 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12629 struct intel_crtc_state *crtc_state = 12630 intel_atomic_get_new_crtc_state(state, crtc); 12631 const struct intel_cdclk_state *cdclk_state; 12632 12633 if (INTEL_GEN(dev_priv) >= 9) 12634 crtc_state->linetime = skl_linetime_wm(crtc_state); 12635 else 12636 crtc_state->linetime = hsw_linetime_wm(crtc_state); 12637 12638 if (!hsw_crtc_supports_ips(crtc)) 12639 return 0; 12640 12641 cdclk_state = intel_atomic_get_cdclk_state(state); 12642 if (IS_ERR(cdclk_state)) 12643 return PTR_ERR(cdclk_state); 12644 12645 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state, 12646 cdclk_state); 12647 12648 return 0; 12649 } 12650 12651 static int intel_crtc_atomic_check(struct intel_atomic_state *state, 12652 struct intel_crtc *crtc) 12653 { 12654 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12655 struct intel_crtc_state *crtc_state = 12656 intel_atomic_get_new_crtc_state(state, crtc); 12657 bool mode_changed = needs_modeset(crtc_state); 12658 int ret; 12659 12660 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) && 12661 mode_changed && !crtc_state->hw.active) 12662 crtc_state->update_wm_post = true; 12663 12664 if (mode_changed && crtc_state->hw.enable && 12665 dev_priv->display.crtc_compute_clock && 12666 !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) { 12667 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state); 12668 if (ret) 12669 return ret; 12670 } 12671 12672 /* 12673 * May need to update pipe gamma enable bits 12674 * when C8 planes are getting enabled/disabled. 12675 */ 12676 if (c8_planes_changed(crtc_state)) 12677 crtc_state->uapi.color_mgmt_changed = true; 12678 12679 if (mode_changed || crtc_state->update_pipe || 12680 crtc_state->uapi.color_mgmt_changed) { 12681 ret = intel_color_check(crtc_state); 12682 if (ret) 12683 return ret; 12684 } 12685 12686 if (dev_priv->display.compute_pipe_wm) { 12687 ret = dev_priv->display.compute_pipe_wm(crtc_state); 12688 if (ret) { 12689 drm_dbg_kms(&dev_priv->drm, 12690 "Target pipe watermarks are invalid\n"); 12691 return ret; 12692 } 12693 } 12694 12695 if (dev_priv->display.compute_intermediate_wm) { 12696 if (drm_WARN_ON(&dev_priv->drm, 12697 !dev_priv->display.compute_pipe_wm)) 12698 return 0; 12699 12700 /* 12701 * Calculate 'intermediate' watermarks that satisfy both the 12702 * old state and the new state. We can program these 12703 * immediately. 12704 */ 12705 ret = dev_priv->display.compute_intermediate_wm(crtc_state); 12706 if (ret) { 12707 drm_dbg_kms(&dev_priv->drm, 12708 "No valid intermediate pipe watermarks are possible\n"); 12709 return ret; 12710 } 12711 } 12712 12713 if (INTEL_GEN(dev_priv) >= 9) { 12714 if (mode_changed || crtc_state->update_pipe) { 12715 ret = skl_update_scaler_crtc(crtc_state); 12716 if (ret) 12717 return ret; 12718 } 12719 12720 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state); 12721 if (ret) 12722 return ret; 12723 } 12724 12725 if (HAS_IPS(dev_priv)) { 12726 ret = hsw_compute_ips_config(crtc_state); 12727 if (ret) 12728 return ret; 12729 } 12730 12731 if (INTEL_GEN(dev_priv) >= 9 || 12732 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 12733 ret = hsw_compute_linetime_wm(state, crtc); 12734 if (ret) 12735 return ret; 12736 12737 } 12738 12739 return 0; 12740 } 12741 12742 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 12743 { 12744 struct intel_connector *connector; 12745 struct drm_connector_list_iter conn_iter; 12746 12747 drm_connector_list_iter_begin(dev, &conn_iter); 12748 for_each_intel_connector_iter(connector, &conn_iter) { 12749 if (connector->base.state->crtc) 12750 drm_connector_put(&connector->base); 12751 12752 if (connector->base.encoder) { 12753 connector->base.state->best_encoder = 12754 connector->base.encoder; 12755 connector->base.state->crtc = 12756 connector->base.encoder->crtc; 12757 12758 drm_connector_get(&connector->base); 12759 } else { 12760 connector->base.state->best_encoder = NULL; 12761 connector->base.state->crtc = NULL; 12762 } 12763 } 12764 drm_connector_list_iter_end(&conn_iter); 12765 } 12766 12767 static int 12768 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 12769 struct intel_crtc_state *pipe_config) 12770 { 12771 struct drm_connector *connector = conn_state->connector; 12772 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); 12773 const struct drm_display_info *info = &connector->display_info; 12774 int bpp; 12775 12776 switch (conn_state->max_bpc) { 12777 case 6 ... 7: 12778 bpp = 6 * 3; 12779 break; 12780 case 8 ... 9: 12781 bpp = 8 * 3; 12782 break; 12783 case 10 ... 11: 12784 bpp = 10 * 3; 12785 break; 12786 case 12: 12787 bpp = 12 * 3; 12788 break; 12789 default: 12790 return -EINVAL; 12791 } 12792 12793 if (bpp < pipe_config->pipe_bpp) { 12794 drm_dbg_kms(&i915->drm, 12795 "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of " 12796 "EDID bpp %d, requested bpp %d, max platform bpp %d\n", 12797 connector->base.id, connector->name, 12798 bpp, 3 * info->bpc, 12799 3 * conn_state->max_requested_bpc, 12800 pipe_config->pipe_bpp); 12801 12802 pipe_config->pipe_bpp = bpp; 12803 } 12804 12805 return 0; 12806 } 12807 12808 static int 12809 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 12810 struct intel_crtc_state *pipe_config) 12811 { 12812 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12813 struct drm_atomic_state *state = pipe_config->uapi.state; 12814 struct drm_connector *connector; 12815 struct drm_connector_state *connector_state; 12816 int bpp, i; 12817 12818 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 12819 IS_CHERRYVIEW(dev_priv))) 12820 bpp = 10*3; 12821 else if (INTEL_GEN(dev_priv) >= 5) 12822 bpp = 12*3; 12823 else 12824 bpp = 8*3; 12825 12826 pipe_config->pipe_bpp = bpp; 12827 12828 /* Clamp display bpp to connector max bpp */ 12829 for_each_new_connector_in_state(state, connector, connector_state, i) { 12830 int ret; 12831 12832 if (connector_state->crtc != &crtc->base) 12833 continue; 12834 12835 ret = compute_sink_pipe_bpp(connector_state, pipe_config); 12836 if (ret) 12837 return ret; 12838 } 12839 12840 return 0; 12841 } 12842 12843 static void intel_dump_crtc_timings(struct drm_i915_private *i915, 12844 const struct drm_display_mode *mode) 12845 { 12846 drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, " 12847 "type: 0x%x flags: 0x%x\n", 12848 mode->crtc_clock, 12849 mode->crtc_hdisplay, mode->crtc_hsync_start, 12850 mode->crtc_hsync_end, mode->crtc_htotal, 12851 mode->crtc_vdisplay, mode->crtc_vsync_start, 12852 mode->crtc_vsync_end, mode->crtc_vtotal, 12853 mode->type, mode->flags); 12854 } 12855 12856 static void 12857 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config, 12858 const char *id, unsigned int lane_count, 12859 const struct intel_link_m_n *m_n) 12860 { 12861 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); 12862 12863 drm_dbg_kms(&i915->drm, 12864 "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 12865 id, lane_count, 12866 m_n->gmch_m, m_n->gmch_n, 12867 m_n->link_m, m_n->link_n, m_n->tu); 12868 } 12869 12870 static void 12871 intel_dump_infoframe(struct drm_i915_private *dev_priv, 12872 const union hdmi_infoframe *frame) 12873 { 12874 if (!drm_debug_enabled(DRM_UT_KMS)) 12875 return; 12876 12877 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame); 12878 } 12879 12880 static void 12881 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv, 12882 const struct drm_dp_vsc_sdp *vsc) 12883 { 12884 if (!drm_debug_enabled(DRM_UT_KMS)) 12885 return; 12886 12887 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc); 12888 } 12889 12890 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x 12891 12892 static const char * const output_type_str[] = { 12893 OUTPUT_TYPE(UNUSED), 12894 OUTPUT_TYPE(ANALOG), 12895 OUTPUT_TYPE(DVO), 12896 OUTPUT_TYPE(SDVO), 12897 OUTPUT_TYPE(LVDS), 12898 OUTPUT_TYPE(TVOUT), 12899 OUTPUT_TYPE(HDMI), 12900 OUTPUT_TYPE(DP), 12901 OUTPUT_TYPE(EDP), 12902 OUTPUT_TYPE(DSI), 12903 OUTPUT_TYPE(DDI), 12904 OUTPUT_TYPE(DP_MST), 12905 }; 12906 12907 #undef OUTPUT_TYPE 12908 12909 static void snprintf_output_types(char *buf, size_t len, 12910 unsigned int output_types) 12911 { 12912 char *str = buf; 12913 int i; 12914 12915 str[0] = '\0'; 12916 12917 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) { 12918 int r; 12919 12920 if ((output_types & BIT(i)) == 0) 12921 continue; 12922 12923 r = snprintf(str, len, "%s%s", 12924 str != buf ? "," : "", output_type_str[i]); 12925 if (r >= len) 12926 break; 12927 str += r; 12928 len -= r; 12929 12930 output_types &= ~BIT(i); 12931 } 12932 12933 WARN_ON_ONCE(output_types != 0); 12934 } 12935 12936 static const char * const output_format_str[] = { 12937 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid", 12938 [INTEL_OUTPUT_FORMAT_RGB] = "RGB", 12939 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0", 12940 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4", 12941 }; 12942 12943 static const char *output_formats(enum intel_output_format format) 12944 { 12945 if (format >= ARRAY_SIZE(output_format_str)) 12946 format = INTEL_OUTPUT_FORMAT_INVALID; 12947 return output_format_str[format]; 12948 } 12949 12950 static void intel_dump_plane_state(const struct intel_plane_state *plane_state) 12951 { 12952 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 12953 struct drm_i915_private *i915 = to_i915(plane->base.dev); 12954 const struct drm_framebuffer *fb = plane_state->hw.fb; 12955 struct drm_format_name_buf format_name; 12956 12957 if (!fb) { 12958 drm_dbg_kms(&i915->drm, 12959 "[PLANE:%d:%s] fb: [NOFB], visible: %s\n", 12960 plane->base.base.id, plane->base.name, 12961 yesno(plane_state->uapi.visible)); 12962 return; 12963 } 12964 12965 drm_dbg_kms(&i915->drm, 12966 "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n", 12967 plane->base.base.id, plane->base.name, 12968 fb->base.id, fb->width, fb->height, 12969 drm_get_format_name(fb->format->format, &format_name), 12970 yesno(plane_state->uapi.visible)); 12971 drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n", 12972 plane_state->hw.rotation, plane_state->scaler_id); 12973 if (plane_state->uapi.visible) 12974 drm_dbg_kms(&i915->drm, 12975 "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n", 12976 DRM_RECT_FP_ARG(&plane_state->uapi.src), 12977 DRM_RECT_ARG(&plane_state->uapi.dst)); 12978 } 12979 12980 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, 12981 struct intel_atomic_state *state, 12982 const char *context) 12983 { 12984 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 12985 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12986 const struct intel_plane_state *plane_state; 12987 struct intel_plane *plane; 12988 char buf[64]; 12989 int i; 12990 12991 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n", 12992 crtc->base.base.id, crtc->base.name, 12993 yesno(pipe_config->hw.enable), context); 12994 12995 if (!pipe_config->hw.enable) 12996 goto dump_planes; 12997 12998 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types); 12999 drm_dbg_kms(&dev_priv->drm, 13000 "active: %s, output_types: %s (0x%x), output format: %s\n", 13001 yesno(pipe_config->hw.active), 13002 buf, pipe_config->output_types, 13003 output_formats(pipe_config->output_format)); 13004 13005 drm_dbg_kms(&dev_priv->drm, 13006 "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", 13007 transcoder_name(pipe_config->cpu_transcoder), 13008 pipe_config->pipe_bpp, pipe_config->dither); 13009 13010 drm_dbg_kms(&dev_priv->drm, 13011 "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n", 13012 transcoder_name(pipe_config->master_transcoder), 13013 pipe_config->sync_mode_slaves_mask); 13014 13015 if (pipe_config->has_pch_encoder) 13016 intel_dump_m_n_config(pipe_config, "fdi", 13017 pipe_config->fdi_lanes, 13018 &pipe_config->fdi_m_n); 13019 13020 if (intel_crtc_has_dp_encoder(pipe_config)) { 13021 intel_dump_m_n_config(pipe_config, "dp m_n", 13022 pipe_config->lane_count, &pipe_config->dp_m_n); 13023 if (pipe_config->has_drrs) 13024 intel_dump_m_n_config(pipe_config, "dp m2_n2", 13025 pipe_config->lane_count, 13026 &pipe_config->dp_m2_n2); 13027 } 13028 13029 drm_dbg_kms(&dev_priv->drm, 13030 "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n", 13031 pipe_config->has_audio, pipe_config->has_infoframe, 13032 pipe_config->infoframes.enable); 13033 13034 if (pipe_config->infoframes.enable & 13035 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) 13036 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n", 13037 pipe_config->infoframes.gcp); 13038 if (pipe_config->infoframes.enable & 13039 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) 13040 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi); 13041 if (pipe_config->infoframes.enable & 13042 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD)) 13043 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd); 13044 if (pipe_config->infoframes.enable & 13045 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR)) 13046 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi); 13047 if (pipe_config->infoframes.enable & 13048 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM)) 13049 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm); 13050 if (pipe_config->infoframes.enable & 13051 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA)) 13052 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm); 13053 if (pipe_config->infoframes.enable & 13054 intel_hdmi_infoframe_enable(DP_SDP_VSC)) 13055 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc); 13056 13057 drm_dbg_kms(&dev_priv->drm, "requested mode:\n"); 13058 drm_mode_debug_printmodeline(&pipe_config->hw.mode); 13059 drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n"); 13060 drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode); 13061 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode); 13062 drm_dbg_kms(&dev_priv->drm, 13063 "port clock: %d, pipe src size: %dx%d, pixel rate %d\n", 13064 pipe_config->port_clock, 13065 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 13066 pipe_config->pixel_rate); 13067 13068 drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n", 13069 pipe_config->linetime, pipe_config->ips_linetime); 13070 13071 if (INTEL_GEN(dev_priv) >= 9) 13072 drm_dbg_kms(&dev_priv->drm, 13073 "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 13074 crtc->num_scalers, 13075 pipe_config->scaler_state.scaler_users, 13076 pipe_config->scaler_state.scaler_id); 13077 13078 if (HAS_GMCH(dev_priv)) 13079 drm_dbg_kms(&dev_priv->drm, 13080 "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 13081 pipe_config->gmch_pfit.control, 13082 pipe_config->gmch_pfit.pgm_ratios, 13083 pipe_config->gmch_pfit.lvds_border_bits); 13084 else 13085 drm_dbg_kms(&dev_priv->drm, 13086 "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n", 13087 DRM_RECT_ARG(&pipe_config->pch_pfit.dst), 13088 enableddisabled(pipe_config->pch_pfit.enabled), 13089 yesno(pipe_config->pch_pfit.force_thru)); 13090 13091 drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n", 13092 pipe_config->ips_enabled, pipe_config->double_wide); 13093 13094 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state); 13095 13096 if (IS_CHERRYVIEW(dev_priv)) 13097 drm_dbg_kms(&dev_priv->drm, 13098 "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", 13099 pipe_config->cgm_mode, pipe_config->gamma_mode, 13100 pipe_config->gamma_enable, pipe_config->csc_enable); 13101 else 13102 drm_dbg_kms(&dev_priv->drm, 13103 "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", 13104 pipe_config->csc_mode, pipe_config->gamma_mode, 13105 pipe_config->gamma_enable, pipe_config->csc_enable); 13106 13107 drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n", 13108 transcoder_name(pipe_config->mst_master_transcoder)); 13109 13110 dump_planes: 13111 if (!state) 13112 return; 13113 13114 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 13115 if (plane->pipe == crtc->pipe) 13116 intel_dump_plane_state(plane_state); 13117 } 13118 } 13119 13120 static bool check_digital_port_conflicts(struct intel_atomic_state *state) 13121 { 13122 struct drm_device *dev = state->base.dev; 13123 struct drm_connector *connector; 13124 struct drm_connector_list_iter conn_iter; 13125 unsigned int used_ports = 0; 13126 unsigned int used_mst_ports = 0; 13127 bool ret = true; 13128 13129 /* 13130 * We're going to peek into connector->state, 13131 * hence connection_mutex must be held. 13132 */ 13133 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex); 13134 13135 /* 13136 * Walk the connector list instead of the encoder 13137 * list to detect the problem on ddi platforms 13138 * where there's just one encoder per digital port. 13139 */ 13140 drm_connector_list_iter_begin(dev, &conn_iter); 13141 drm_for_each_connector_iter(connector, &conn_iter) { 13142 struct drm_connector_state *connector_state; 13143 struct intel_encoder *encoder; 13144 13145 connector_state = 13146 drm_atomic_get_new_connector_state(&state->base, 13147 connector); 13148 if (!connector_state) 13149 connector_state = connector->state; 13150 13151 if (!connector_state->best_encoder) 13152 continue; 13153 13154 encoder = to_intel_encoder(connector_state->best_encoder); 13155 13156 drm_WARN_ON(dev, !connector_state->crtc); 13157 13158 switch (encoder->type) { 13159 case INTEL_OUTPUT_DDI: 13160 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev)))) 13161 break; 13162 /* else, fall through */ 13163 case INTEL_OUTPUT_DP: 13164 case INTEL_OUTPUT_HDMI: 13165 case INTEL_OUTPUT_EDP: 13166 /* the same port mustn't appear more than once */ 13167 if (used_ports & BIT(encoder->port)) 13168 ret = false; 13169 13170 used_ports |= BIT(encoder->port); 13171 break; 13172 case INTEL_OUTPUT_DP_MST: 13173 used_mst_ports |= 13174 1 << encoder->port; 13175 break; 13176 default: 13177 break; 13178 } 13179 } 13180 drm_connector_list_iter_end(&conn_iter); 13181 13182 /* can't mix MST and SST/HDMI on the same port */ 13183 if (used_ports & used_mst_ports) 13184 return false; 13185 13186 return ret; 13187 } 13188 13189 static void 13190 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state) 13191 { 13192 intel_crtc_copy_color_blobs(crtc_state); 13193 } 13194 13195 static void 13196 intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state) 13197 { 13198 crtc_state->hw.enable = crtc_state->uapi.enable; 13199 crtc_state->hw.active = crtc_state->uapi.active; 13200 crtc_state->hw.mode = crtc_state->uapi.mode; 13201 crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode; 13202 intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state); 13203 } 13204 13205 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state) 13206 { 13207 crtc_state->uapi.enable = crtc_state->hw.enable; 13208 crtc_state->uapi.active = crtc_state->hw.active; 13209 drm_WARN_ON(crtc_state->uapi.crtc->dev, 13210 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0); 13211 13212 crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode; 13213 13214 /* copy color blobs to uapi */ 13215 drm_property_replace_blob(&crtc_state->uapi.degamma_lut, 13216 crtc_state->hw.degamma_lut); 13217 drm_property_replace_blob(&crtc_state->uapi.gamma_lut, 13218 crtc_state->hw.gamma_lut); 13219 drm_property_replace_blob(&crtc_state->uapi.ctm, 13220 crtc_state->hw.ctm); 13221 } 13222 13223 static int 13224 intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state) 13225 { 13226 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 13227 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13228 struct intel_crtc_state *saved_state; 13229 13230 saved_state = intel_crtc_state_alloc(crtc); 13231 if (!saved_state) 13232 return -ENOMEM; 13233 13234 /* free the old crtc_state->hw members */ 13235 intel_crtc_free_hw_state(crtc_state); 13236 13237 /* FIXME: before the switch to atomic started, a new pipe_config was 13238 * kzalloc'd. Code that depends on any field being zero should be 13239 * fixed, so that the crtc_state can be safely duplicated. For now, 13240 * only fields that are know to not cause problems are preserved. */ 13241 13242 saved_state->uapi = crtc_state->uapi; 13243 saved_state->scaler_state = crtc_state->scaler_state; 13244 saved_state->shared_dpll = crtc_state->shared_dpll; 13245 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 13246 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 13247 sizeof(saved_state->icl_port_dplls)); 13248 saved_state->crc_enabled = crtc_state->crc_enabled; 13249 if (IS_G4X(dev_priv) || 13250 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 13251 saved_state->wm = crtc_state->wm; 13252 13253 memcpy(crtc_state, saved_state, sizeof(*crtc_state)); 13254 kfree(saved_state); 13255 13256 intel_crtc_copy_uapi_to_hw_state(crtc_state); 13257 13258 return 0; 13259 } 13260 13261 static int 13262 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config) 13263 { 13264 struct drm_crtc *crtc = pipe_config->uapi.crtc; 13265 struct drm_atomic_state *state = pipe_config->uapi.state; 13266 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); 13267 struct drm_connector *connector; 13268 struct drm_connector_state *connector_state; 13269 int base_bpp, ret, i; 13270 bool retry = true; 13271 13272 pipe_config->cpu_transcoder = 13273 (enum transcoder) to_intel_crtc(crtc)->pipe; 13274 13275 /* 13276 * Sanitize sync polarity flags based on requested ones. If neither 13277 * positive or negative polarity is requested, treat this as meaning 13278 * negative polarity. 13279 */ 13280 if (!(pipe_config->hw.adjusted_mode.flags & 13281 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 13282 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 13283 13284 if (!(pipe_config->hw.adjusted_mode.flags & 13285 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 13286 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 13287 13288 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 13289 pipe_config); 13290 if (ret) 13291 return ret; 13292 13293 base_bpp = pipe_config->pipe_bpp; 13294 13295 /* 13296 * Determine the real pipe dimensions. Note that stereo modes can 13297 * increase the actual pipe size due to the frame doubling and 13298 * insertion of additional space for blanks between the frame. This 13299 * is stored in the crtc timings. We use the requested mode to do this 13300 * computation to clearly distinguish it from the adjusted mode, which 13301 * can be changed by the connectors in the below retry loop. 13302 */ 13303 drm_mode_get_hv_timing(&pipe_config->hw.mode, 13304 &pipe_config->pipe_src_w, 13305 &pipe_config->pipe_src_h); 13306 13307 for_each_new_connector_in_state(state, connector, connector_state, i) { 13308 struct intel_encoder *encoder = 13309 to_intel_encoder(connector_state->best_encoder); 13310 13311 if (connector_state->crtc != crtc) 13312 continue; 13313 13314 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { 13315 drm_dbg_kms(&i915->drm, 13316 "rejecting invalid cloning configuration\n"); 13317 return -EINVAL; 13318 } 13319 13320 /* 13321 * Determine output_types before calling the .compute_config() 13322 * hooks so that the hooks can use this information safely. 13323 */ 13324 if (encoder->compute_output_type) 13325 pipe_config->output_types |= 13326 BIT(encoder->compute_output_type(encoder, pipe_config, 13327 connector_state)); 13328 else 13329 pipe_config->output_types |= BIT(encoder->type); 13330 } 13331 13332 encoder_retry: 13333 /* Ensure the port clock defaults are reset when retrying. */ 13334 pipe_config->port_clock = 0; 13335 pipe_config->pixel_multiplier = 1; 13336 13337 /* Fill in default crtc timings, allow encoders to overwrite them. */ 13338 drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode, 13339 CRTC_STEREO_DOUBLE); 13340 13341 /* Pass our mode to the connectors and the CRTC to give them a chance to 13342 * adjust it according to limitations or connector properties, and also 13343 * a chance to reject the mode entirely. 13344 */ 13345 for_each_new_connector_in_state(state, connector, connector_state, i) { 13346 struct intel_encoder *encoder = 13347 to_intel_encoder(connector_state->best_encoder); 13348 13349 if (connector_state->crtc != crtc) 13350 continue; 13351 13352 ret = encoder->compute_config(encoder, pipe_config, 13353 connector_state); 13354 if (ret < 0) { 13355 if (ret != -EDEADLK) 13356 drm_dbg_kms(&i915->drm, 13357 "Encoder config failure: %d\n", 13358 ret); 13359 return ret; 13360 } 13361 } 13362 13363 /* Set default port clock if not overwritten by the encoder. Needs to be 13364 * done afterwards in case the encoder adjusts the mode. */ 13365 if (!pipe_config->port_clock) 13366 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock 13367 * pipe_config->pixel_multiplier; 13368 13369 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 13370 if (ret == -EDEADLK) 13371 return ret; 13372 if (ret < 0) { 13373 drm_dbg_kms(&i915->drm, "CRTC fixup failed\n"); 13374 return ret; 13375 } 13376 13377 if (ret == RETRY) { 13378 if (drm_WARN(&i915->drm, !retry, 13379 "loop in pipe configuration computation\n")) 13380 return -EINVAL; 13381 13382 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n"); 13383 retry = false; 13384 goto encoder_retry; 13385 } 13386 13387 /* Dithering seems to not pass-through bits correctly when it should, so 13388 * only enable it on 6bpc panels and when its not a compliance 13389 * test requesting 6bpc video pattern. 13390 */ 13391 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) && 13392 !pipe_config->dither_force_disable; 13393 drm_dbg_kms(&i915->drm, 13394 "hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 13395 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 13396 13397 /* 13398 * Make drm_calc_timestamping_constants in 13399 * drm_atomic_helper_update_legacy_modeset_state() happy 13400 */ 13401 pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode; 13402 13403 return 0; 13404 } 13405 13406 static int 13407 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state) 13408 { 13409 struct intel_atomic_state *state = 13410 to_intel_atomic_state(crtc_state->uapi.state); 13411 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 13412 struct drm_connector_state *conn_state; 13413 struct drm_connector *connector; 13414 int i; 13415 13416 for_each_new_connector_in_state(&state->base, connector, 13417 conn_state, i) { 13418 struct intel_encoder *encoder = 13419 to_intel_encoder(conn_state->best_encoder); 13420 int ret; 13421 13422 if (conn_state->crtc != &crtc->base || 13423 !encoder->compute_config_late) 13424 continue; 13425 13426 ret = encoder->compute_config_late(encoder, crtc_state, 13427 conn_state); 13428 if (ret) 13429 return ret; 13430 } 13431 13432 return 0; 13433 } 13434 13435 bool intel_fuzzy_clock_check(int clock1, int clock2) 13436 { 13437 int diff; 13438 13439 if (clock1 == clock2) 13440 return true; 13441 13442 if (!clock1 || !clock2) 13443 return false; 13444 13445 diff = abs(clock1 - clock2); 13446 13447 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 13448 return true; 13449 13450 return false; 13451 } 13452 13453 static bool 13454 intel_compare_m_n(unsigned int m, unsigned int n, 13455 unsigned int m2, unsigned int n2, 13456 bool exact) 13457 { 13458 if (m == m2 && n == n2) 13459 return true; 13460 13461 if (exact || !m || !n || !m2 || !n2) 13462 return false; 13463 13464 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 13465 13466 if (n > n2) { 13467 while (n > n2) { 13468 m2 <<= 1; 13469 n2 <<= 1; 13470 } 13471 } else if (n < n2) { 13472 while (n < n2) { 13473 m <<= 1; 13474 n <<= 1; 13475 } 13476 } 13477 13478 if (n != n2) 13479 return false; 13480 13481 return intel_fuzzy_clock_check(m, m2); 13482 } 13483 13484 static bool 13485 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 13486 const struct intel_link_m_n *m2_n2, 13487 bool exact) 13488 { 13489 return m_n->tu == m2_n2->tu && 13490 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, 13491 m2_n2->gmch_m, m2_n2->gmch_n, exact) && 13492 intel_compare_m_n(m_n->link_m, m_n->link_n, 13493 m2_n2->link_m, m2_n2->link_n, exact); 13494 } 13495 13496 static bool 13497 intel_compare_infoframe(const union hdmi_infoframe *a, 13498 const union hdmi_infoframe *b) 13499 { 13500 return memcmp(a, b, sizeof(*a)) == 0; 13501 } 13502 13503 static bool 13504 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a, 13505 const struct drm_dp_vsc_sdp *b) 13506 { 13507 return memcmp(a, b, sizeof(*a)) == 0; 13508 } 13509 13510 static void 13511 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, 13512 bool fastset, const char *name, 13513 const union hdmi_infoframe *a, 13514 const union hdmi_infoframe *b) 13515 { 13516 if (fastset) { 13517 if (!drm_debug_enabled(DRM_UT_KMS)) 13518 return; 13519 13520 drm_dbg_kms(&dev_priv->drm, 13521 "fastset mismatch in %s infoframe\n", name); 13522 drm_dbg_kms(&dev_priv->drm, "expected:\n"); 13523 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); 13524 drm_dbg_kms(&dev_priv->drm, "found:\n"); 13525 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); 13526 } else { 13527 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name); 13528 drm_err(&dev_priv->drm, "expected:\n"); 13529 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); 13530 drm_err(&dev_priv->drm, "found:\n"); 13531 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); 13532 } 13533 } 13534 13535 static void 13536 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv, 13537 bool fastset, const char *name, 13538 const struct drm_dp_vsc_sdp *a, 13539 const struct drm_dp_vsc_sdp *b) 13540 { 13541 if (fastset) { 13542 if (!drm_debug_enabled(DRM_UT_KMS)) 13543 return; 13544 13545 drm_dbg_kms(&dev_priv->drm, 13546 "fastset mismatch in %s dp sdp\n", name); 13547 drm_dbg_kms(&dev_priv->drm, "expected:\n"); 13548 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a); 13549 drm_dbg_kms(&dev_priv->drm, "found:\n"); 13550 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b); 13551 } else { 13552 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name); 13553 drm_err(&dev_priv->drm, "expected:\n"); 13554 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a); 13555 drm_err(&dev_priv->drm, "found:\n"); 13556 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b); 13557 } 13558 } 13559 13560 static void __printf(4, 5) 13561 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, 13562 const char *name, const char *format, ...) 13563 { 13564 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 13565 struct va_format vaf; 13566 va_list args; 13567 13568 va_start(args, format); 13569 vaf.fmt = format; 13570 vaf.va = &args; 13571 13572 if (fastset) 13573 drm_dbg_kms(&i915->drm, 13574 "[CRTC:%d:%s] fastset mismatch in %s %pV\n", 13575 crtc->base.base.id, crtc->base.name, name, &vaf); 13576 else 13577 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n", 13578 crtc->base.base.id, crtc->base.name, name, &vaf); 13579 13580 va_end(args); 13581 } 13582 13583 static bool fastboot_enabled(struct drm_i915_private *dev_priv) 13584 { 13585 if (i915_modparams.fastboot != -1) 13586 return i915_modparams.fastboot; 13587 13588 /* Enable fastboot by default on Skylake and newer */ 13589 if (INTEL_GEN(dev_priv) >= 9) 13590 return true; 13591 13592 /* Enable fastboot by default on VLV and CHV */ 13593 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 13594 return true; 13595 13596 /* Disabled by default on all others */ 13597 return false; 13598 } 13599 13600 static bool 13601 intel_pipe_config_compare(const struct intel_crtc_state *current_config, 13602 const struct intel_crtc_state *pipe_config, 13603 bool fastset) 13604 { 13605 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev); 13606 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 13607 bool ret = true; 13608 u32 bp_gamma = 0; 13609 bool fixup_inherited = fastset && 13610 (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) && 13611 !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED); 13612 13613 if (fixup_inherited && !fastboot_enabled(dev_priv)) { 13614 drm_dbg_kms(&dev_priv->drm, 13615 "initial modeset and fastboot not set\n"); 13616 ret = false; 13617 } 13618 13619 #define PIPE_CONF_CHECK_X(name) do { \ 13620 if (current_config->name != pipe_config->name) { \ 13621 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13622 "(expected 0x%08x, found 0x%08x)", \ 13623 current_config->name, \ 13624 pipe_config->name); \ 13625 ret = false; \ 13626 } \ 13627 } while (0) 13628 13629 #define PIPE_CONF_CHECK_I(name) do { \ 13630 if (current_config->name != pipe_config->name) { \ 13631 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13632 "(expected %i, found %i)", \ 13633 current_config->name, \ 13634 pipe_config->name); \ 13635 ret = false; \ 13636 } \ 13637 } while (0) 13638 13639 #define PIPE_CONF_CHECK_BOOL(name) do { \ 13640 if (current_config->name != pipe_config->name) { \ 13641 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13642 "(expected %s, found %s)", \ 13643 yesno(current_config->name), \ 13644 yesno(pipe_config->name)); \ 13645 ret = false; \ 13646 } \ 13647 } while (0) 13648 13649 /* 13650 * Checks state where we only read out the enabling, but not the entire 13651 * state itself (like full infoframes or ELD for audio). These states 13652 * require a full modeset on bootup to fix up. 13653 */ 13654 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \ 13655 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \ 13656 PIPE_CONF_CHECK_BOOL(name); \ 13657 } else { \ 13658 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13659 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \ 13660 yesno(current_config->name), \ 13661 yesno(pipe_config->name)); \ 13662 ret = false; \ 13663 } \ 13664 } while (0) 13665 13666 #define PIPE_CONF_CHECK_P(name) do { \ 13667 if (current_config->name != pipe_config->name) { \ 13668 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13669 "(expected %p, found %p)", \ 13670 current_config->name, \ 13671 pipe_config->name); \ 13672 ret = false; \ 13673 } \ 13674 } while (0) 13675 13676 #define PIPE_CONF_CHECK_M_N(name) do { \ 13677 if (!intel_compare_link_m_n(¤t_config->name, \ 13678 &pipe_config->name,\ 13679 !fastset)) { \ 13680 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13681 "(expected tu %i gmch %i/%i link %i/%i, " \ 13682 "found tu %i, gmch %i/%i link %i/%i)", \ 13683 current_config->name.tu, \ 13684 current_config->name.gmch_m, \ 13685 current_config->name.gmch_n, \ 13686 current_config->name.link_m, \ 13687 current_config->name.link_n, \ 13688 pipe_config->name.tu, \ 13689 pipe_config->name.gmch_m, \ 13690 pipe_config->name.gmch_n, \ 13691 pipe_config->name.link_m, \ 13692 pipe_config->name.link_n); \ 13693 ret = false; \ 13694 } \ 13695 } while (0) 13696 13697 /* This is required for BDW+ where there is only one set of registers for 13698 * switching between high and low RR. 13699 * This macro can be used whenever a comparison has to be made between one 13700 * hw state and multiple sw state variables. 13701 */ 13702 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \ 13703 if (!intel_compare_link_m_n(¤t_config->name, \ 13704 &pipe_config->name, !fastset) && \ 13705 !intel_compare_link_m_n(¤t_config->alt_name, \ 13706 &pipe_config->name, !fastset)) { \ 13707 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13708 "(expected tu %i gmch %i/%i link %i/%i, " \ 13709 "or tu %i gmch %i/%i link %i/%i, " \ 13710 "found tu %i, gmch %i/%i link %i/%i)", \ 13711 current_config->name.tu, \ 13712 current_config->name.gmch_m, \ 13713 current_config->name.gmch_n, \ 13714 current_config->name.link_m, \ 13715 current_config->name.link_n, \ 13716 current_config->alt_name.tu, \ 13717 current_config->alt_name.gmch_m, \ 13718 current_config->alt_name.gmch_n, \ 13719 current_config->alt_name.link_m, \ 13720 current_config->alt_name.link_n, \ 13721 pipe_config->name.tu, \ 13722 pipe_config->name.gmch_m, \ 13723 pipe_config->name.gmch_n, \ 13724 pipe_config->name.link_m, \ 13725 pipe_config->name.link_n); \ 13726 ret = false; \ 13727 } \ 13728 } while (0) 13729 13730 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 13731 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 13732 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13733 "(%x) (expected %i, found %i)", \ 13734 (mask), \ 13735 current_config->name & (mask), \ 13736 pipe_config->name & (mask)); \ 13737 ret = false; \ 13738 } \ 13739 } while (0) 13740 13741 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \ 13742 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 13743 pipe_config_mismatch(fastset, crtc, __stringify(name), \ 13744 "(expected %i, found %i)", \ 13745 current_config->name, \ 13746 pipe_config->name); \ 13747 ret = false; \ 13748 } \ 13749 } while (0) 13750 13751 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 13752 if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 13753 &pipe_config->infoframes.name)) { \ 13754 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \ 13755 ¤t_config->infoframes.name, \ 13756 &pipe_config->infoframes.name); \ 13757 ret = false; \ 13758 } \ 13759 } while (0) 13760 13761 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \ 13762 if (!current_config->has_psr && !pipe_config->has_psr && \ 13763 !intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \ 13764 &pipe_config->infoframes.name)) { \ 13765 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \ 13766 ¤t_config->infoframes.name, \ 13767 &pipe_config->infoframes.name); \ 13768 ret = false; \ 13769 } \ 13770 } while (0) 13771 13772 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \ 13773 if (current_config->name1 != pipe_config->name1) { \ 13774 pipe_config_mismatch(fastset, crtc, __stringify(name1), \ 13775 "(expected %i, found %i, won't compare lut values)", \ 13776 current_config->name1, \ 13777 pipe_config->name1); \ 13778 ret = false;\ 13779 } else { \ 13780 if (!intel_color_lut_equal(current_config->name2, \ 13781 pipe_config->name2, pipe_config->name1, \ 13782 bit_precision)) { \ 13783 pipe_config_mismatch(fastset, crtc, __stringify(name2), \ 13784 "hw_state doesn't match sw_state"); \ 13785 ret = false; \ 13786 } \ 13787 } \ 13788 } while (0) 13789 13790 #define PIPE_CONF_QUIRK(quirk) \ 13791 ((current_config->quirks | pipe_config->quirks) & (quirk)) 13792 13793 PIPE_CONF_CHECK_I(cpu_transcoder); 13794 13795 PIPE_CONF_CHECK_BOOL(has_pch_encoder); 13796 PIPE_CONF_CHECK_I(fdi_lanes); 13797 PIPE_CONF_CHECK_M_N(fdi_m_n); 13798 13799 PIPE_CONF_CHECK_I(lane_count); 13800 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 13801 13802 if (INTEL_GEN(dev_priv) < 8) { 13803 PIPE_CONF_CHECK_M_N(dp_m_n); 13804 13805 if (current_config->has_drrs) 13806 PIPE_CONF_CHECK_M_N(dp_m2_n2); 13807 } else 13808 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 13809 13810 PIPE_CONF_CHECK_X(output_types); 13811 13812 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay); 13813 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal); 13814 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start); 13815 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end); 13816 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start); 13817 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end); 13818 13819 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay); 13820 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal); 13821 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start); 13822 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end); 13823 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start); 13824 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end); 13825 13826 PIPE_CONF_CHECK_I(pixel_multiplier); 13827 PIPE_CONF_CHECK_I(output_format); 13828 PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 13829 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 13830 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 13831 PIPE_CONF_CHECK_BOOL(limited_color_range); 13832 13833 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 13834 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 13835 PIPE_CONF_CHECK_BOOL(has_infoframe); 13836 PIPE_CONF_CHECK_BOOL(fec_enable); 13837 13838 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); 13839 13840 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13841 DRM_MODE_FLAG_INTERLACE); 13842 13843 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 13844 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13845 DRM_MODE_FLAG_PHSYNC); 13846 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13847 DRM_MODE_FLAG_NHSYNC); 13848 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13849 DRM_MODE_FLAG_PVSYNC); 13850 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 13851 DRM_MODE_FLAG_NVSYNC); 13852 } 13853 13854 PIPE_CONF_CHECK_X(gmch_pfit.control); 13855 /* pfit ratios are autocomputed by the hw on gen4+ */ 13856 if (INTEL_GEN(dev_priv) < 4) 13857 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 13858 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 13859 13860 /* 13861 * Changing the EDP transcoder input mux 13862 * (A_ONOFF vs. A_ON) requires a full modeset. 13863 */ 13864 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 13865 13866 if (!fastset) { 13867 PIPE_CONF_CHECK_I(pipe_src_w); 13868 PIPE_CONF_CHECK_I(pipe_src_h); 13869 13870 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 13871 if (current_config->pch_pfit.enabled) { 13872 PIPE_CONF_CHECK_I(pch_pfit.dst.x1); 13873 PIPE_CONF_CHECK_I(pch_pfit.dst.y1); 13874 PIPE_CONF_CHECK_I(pch_pfit.dst.x2); 13875 PIPE_CONF_CHECK_I(pch_pfit.dst.y2); 13876 } 13877 13878 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 13879 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); 13880 13881 PIPE_CONF_CHECK_X(gamma_mode); 13882 if (IS_CHERRYVIEW(dev_priv)) 13883 PIPE_CONF_CHECK_X(cgm_mode); 13884 else 13885 PIPE_CONF_CHECK_X(csc_mode); 13886 PIPE_CONF_CHECK_BOOL(gamma_enable); 13887 PIPE_CONF_CHECK_BOOL(csc_enable); 13888 13889 PIPE_CONF_CHECK_I(linetime); 13890 PIPE_CONF_CHECK_I(ips_linetime); 13891 13892 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config); 13893 if (bp_gamma) 13894 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma); 13895 } 13896 13897 PIPE_CONF_CHECK_BOOL(double_wide); 13898 13899 PIPE_CONF_CHECK_P(shared_dpll); 13900 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 13901 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 13902 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 13903 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 13904 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 13905 PIPE_CONF_CHECK_X(dpll_hw_state.spll); 13906 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 13907 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 13908 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 13909 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0); 13910 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0); 13911 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4); 13912 PIPE_CONF_CHECK_X(dpll_hw_state.pll0); 13913 PIPE_CONF_CHECK_X(dpll_hw_state.pll1); 13914 PIPE_CONF_CHECK_X(dpll_hw_state.pll2); 13915 PIPE_CONF_CHECK_X(dpll_hw_state.pll3); 13916 PIPE_CONF_CHECK_X(dpll_hw_state.pll6); 13917 PIPE_CONF_CHECK_X(dpll_hw_state.pll8); 13918 PIPE_CONF_CHECK_X(dpll_hw_state.pll9); 13919 PIPE_CONF_CHECK_X(dpll_hw_state.pll10); 13920 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12); 13921 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl); 13922 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1); 13923 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl); 13924 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0); 13925 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1); 13926 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf); 13927 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock); 13928 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc); 13929 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias); 13930 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); 13931 13932 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 13933 PIPE_CONF_CHECK_X(dsi_pll.div); 13934 13935 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) 13936 PIPE_CONF_CHECK_I(pipe_bpp); 13937 13938 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); 13939 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 13940 13941 PIPE_CONF_CHECK_I(min_voltage_level); 13942 13943 PIPE_CONF_CHECK_X(infoframes.enable); 13944 PIPE_CONF_CHECK_X(infoframes.gcp); 13945 PIPE_CONF_CHECK_INFOFRAME(avi); 13946 PIPE_CONF_CHECK_INFOFRAME(spd); 13947 PIPE_CONF_CHECK_INFOFRAME(hdmi); 13948 PIPE_CONF_CHECK_INFOFRAME(drm); 13949 PIPE_CONF_CHECK_DP_VSC_SDP(vsc); 13950 13951 PIPE_CONF_CHECK_X(sync_mode_slaves_mask); 13952 PIPE_CONF_CHECK_I(master_transcoder); 13953 13954 PIPE_CONF_CHECK_I(dsc.compression_enable); 13955 PIPE_CONF_CHECK_I(dsc.dsc_split); 13956 PIPE_CONF_CHECK_I(dsc.compressed_bpp); 13957 13958 PIPE_CONF_CHECK_I(mst_master_transcoder); 13959 13960 #undef PIPE_CONF_CHECK_X 13961 #undef PIPE_CONF_CHECK_I 13962 #undef PIPE_CONF_CHECK_BOOL 13963 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE 13964 #undef PIPE_CONF_CHECK_P 13965 #undef PIPE_CONF_CHECK_FLAGS 13966 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 13967 #undef PIPE_CONF_CHECK_COLOR_LUT 13968 #undef PIPE_CONF_QUIRK 13969 13970 return ret; 13971 } 13972 13973 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, 13974 const struct intel_crtc_state *pipe_config) 13975 { 13976 if (pipe_config->has_pch_encoder) { 13977 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 13978 &pipe_config->fdi_m_n); 13979 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock; 13980 13981 /* 13982 * FDI already provided one idea for the dotclock. 13983 * Yell if the encoder disagrees. 13984 */ 13985 drm_WARN(&dev_priv->drm, 13986 !intel_fuzzy_clock_check(fdi_dotclock, dotclock), 13987 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 13988 fdi_dotclock, dotclock); 13989 } 13990 } 13991 13992 static void verify_wm_state(struct intel_crtc *crtc, 13993 struct intel_crtc_state *new_crtc_state) 13994 { 13995 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13996 struct skl_hw_state { 13997 struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; 13998 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES]; 13999 struct skl_pipe_wm wm; 14000 } *hw; 14001 struct skl_pipe_wm *sw_wm; 14002 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; 14003 u8 hw_enabled_slices; 14004 const enum pipe pipe = crtc->pipe; 14005 int plane, level, max_level = ilk_wm_max_level(dev_priv); 14006 14007 if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active) 14008 return; 14009 14010 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 14011 if (!hw) 14012 return; 14013 14014 skl_pipe_wm_get_hw_state(crtc, &hw->wm); 14015 sw_wm = &new_crtc_state->wm.skl.optimal; 14016 14017 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv); 14018 14019 hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv); 14020 14021 if (INTEL_GEN(dev_priv) >= 11 && 14022 hw_enabled_slices != dev_priv->enabled_dbuf_slices_mask) 14023 drm_err(&dev_priv->drm, 14024 "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n", 14025 dev_priv->enabled_dbuf_slices_mask, 14026 hw_enabled_slices); 14027 14028 /* planes */ 14029 for_each_universal_plane(dev_priv, pipe, plane) { 14030 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 14031 14032 hw_plane_wm = &hw->wm.planes[plane]; 14033 sw_plane_wm = &sw_wm->planes[plane]; 14034 14035 /* Watermarks */ 14036 for (level = 0; level <= max_level; level++) { 14037 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 14038 &sw_plane_wm->wm[level]) || 14039 (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level], 14040 &sw_plane_wm->sagv_wm0))) 14041 continue; 14042 14043 drm_err(&dev_priv->drm, 14044 "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 14045 pipe_name(pipe), plane + 1, level, 14046 sw_plane_wm->wm[level].plane_en, 14047 sw_plane_wm->wm[level].plane_res_b, 14048 sw_plane_wm->wm[level].plane_res_l, 14049 hw_plane_wm->wm[level].plane_en, 14050 hw_plane_wm->wm[level].plane_res_b, 14051 hw_plane_wm->wm[level].plane_res_l); 14052 } 14053 14054 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 14055 &sw_plane_wm->trans_wm)) { 14056 drm_err(&dev_priv->drm, 14057 "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 14058 pipe_name(pipe), plane + 1, 14059 sw_plane_wm->trans_wm.plane_en, 14060 sw_plane_wm->trans_wm.plane_res_b, 14061 sw_plane_wm->trans_wm.plane_res_l, 14062 hw_plane_wm->trans_wm.plane_en, 14063 hw_plane_wm->trans_wm.plane_res_b, 14064 hw_plane_wm->trans_wm.plane_res_l); 14065 } 14066 14067 /* DDB */ 14068 hw_ddb_entry = &hw->ddb_y[plane]; 14069 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane]; 14070 14071 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 14072 drm_err(&dev_priv->drm, 14073 "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", 14074 pipe_name(pipe), plane + 1, 14075 sw_ddb_entry->start, sw_ddb_entry->end, 14076 hw_ddb_entry->start, hw_ddb_entry->end); 14077 } 14078 } 14079 14080 /* 14081 * cursor 14082 * If the cursor plane isn't active, we may not have updated it's ddb 14083 * allocation. In that case since the ddb allocation will be updated 14084 * once the plane becomes visible, we can skip this check 14085 */ 14086 if (1) { 14087 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 14088 14089 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR]; 14090 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR]; 14091 14092 /* Watermarks */ 14093 for (level = 0; level <= max_level; level++) { 14094 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 14095 &sw_plane_wm->wm[level]) || 14096 (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level], 14097 &sw_plane_wm->sagv_wm0))) 14098 continue; 14099 14100 drm_err(&dev_priv->drm, 14101 "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 14102 pipe_name(pipe), level, 14103 sw_plane_wm->wm[level].plane_en, 14104 sw_plane_wm->wm[level].plane_res_b, 14105 sw_plane_wm->wm[level].plane_res_l, 14106 hw_plane_wm->wm[level].plane_en, 14107 hw_plane_wm->wm[level].plane_res_b, 14108 hw_plane_wm->wm[level].plane_res_l); 14109 } 14110 14111 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 14112 &sw_plane_wm->trans_wm)) { 14113 drm_err(&dev_priv->drm, 14114 "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 14115 pipe_name(pipe), 14116 sw_plane_wm->trans_wm.plane_en, 14117 sw_plane_wm->trans_wm.plane_res_b, 14118 sw_plane_wm->trans_wm.plane_res_l, 14119 hw_plane_wm->trans_wm.plane_en, 14120 hw_plane_wm->trans_wm.plane_res_b, 14121 hw_plane_wm->trans_wm.plane_res_l); 14122 } 14123 14124 /* DDB */ 14125 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR]; 14126 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; 14127 14128 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 14129 drm_err(&dev_priv->drm, 14130 "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", 14131 pipe_name(pipe), 14132 sw_ddb_entry->start, sw_ddb_entry->end, 14133 hw_ddb_entry->start, hw_ddb_entry->end); 14134 } 14135 } 14136 14137 kfree(hw); 14138 } 14139 14140 static void 14141 verify_connector_state(struct intel_atomic_state *state, 14142 struct intel_crtc *crtc) 14143 { 14144 struct drm_connector *connector; 14145 struct drm_connector_state *new_conn_state; 14146 int i; 14147 14148 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { 14149 struct drm_encoder *encoder = connector->encoder; 14150 struct intel_crtc_state *crtc_state = NULL; 14151 14152 if (new_conn_state->crtc != &crtc->base) 14153 continue; 14154 14155 if (crtc) 14156 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 14157 14158 intel_connector_verify_state(crtc_state, new_conn_state); 14159 14160 I915_STATE_WARN(new_conn_state->best_encoder != encoder, 14161 "connector's atomic encoder doesn't match legacy encoder\n"); 14162 } 14163 } 14164 14165 static void 14166 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state) 14167 { 14168 struct intel_encoder *encoder; 14169 struct drm_connector *connector; 14170 struct drm_connector_state *old_conn_state, *new_conn_state; 14171 int i; 14172 14173 for_each_intel_encoder(&dev_priv->drm, encoder) { 14174 bool enabled = false, found = false; 14175 enum pipe pipe; 14176 14177 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n", 14178 encoder->base.base.id, 14179 encoder->base.name); 14180 14181 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state, 14182 new_conn_state, i) { 14183 if (old_conn_state->best_encoder == &encoder->base) 14184 found = true; 14185 14186 if (new_conn_state->best_encoder != &encoder->base) 14187 continue; 14188 found = enabled = true; 14189 14190 I915_STATE_WARN(new_conn_state->crtc != 14191 encoder->base.crtc, 14192 "connector's crtc doesn't match encoder crtc\n"); 14193 } 14194 14195 if (!found) 14196 continue; 14197 14198 I915_STATE_WARN(!!encoder->base.crtc != enabled, 14199 "encoder's enabled state mismatch " 14200 "(expected %i, found %i)\n", 14201 !!encoder->base.crtc, enabled); 14202 14203 if (!encoder->base.crtc) { 14204 bool active; 14205 14206 active = encoder->get_hw_state(encoder, &pipe); 14207 I915_STATE_WARN(active, 14208 "encoder detached but still enabled on pipe %c.\n", 14209 pipe_name(pipe)); 14210 } 14211 } 14212 } 14213 14214 static void 14215 verify_crtc_state(struct intel_crtc *crtc, 14216 struct intel_crtc_state *old_crtc_state, 14217 struct intel_crtc_state *new_crtc_state) 14218 { 14219 struct drm_device *dev = crtc->base.dev; 14220 struct drm_i915_private *dev_priv = to_i915(dev); 14221 struct intel_encoder *encoder; 14222 struct intel_crtc_state *pipe_config = old_crtc_state; 14223 struct drm_atomic_state *state = old_crtc_state->uapi.state; 14224 bool active; 14225 14226 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi); 14227 intel_crtc_free_hw_state(old_crtc_state); 14228 intel_crtc_state_reset(old_crtc_state, crtc); 14229 old_crtc_state->uapi.state = state; 14230 14231 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id, 14232 crtc->base.name); 14233 14234 active = dev_priv->display.get_pipe_config(crtc, pipe_config); 14235 14236 /* we keep both pipes enabled on 830 */ 14237 if (IS_I830(dev_priv)) 14238 active = new_crtc_state->hw.active; 14239 14240 I915_STATE_WARN(new_crtc_state->hw.active != active, 14241 "crtc active state doesn't match with hw state " 14242 "(expected %i, found %i)\n", 14243 new_crtc_state->hw.active, active); 14244 14245 I915_STATE_WARN(crtc->active != new_crtc_state->hw.active, 14246 "transitional active state does not match atomic hw state " 14247 "(expected %i, found %i)\n", 14248 new_crtc_state->hw.active, crtc->active); 14249 14250 for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 14251 enum pipe pipe; 14252 14253 active = encoder->get_hw_state(encoder, &pipe); 14254 I915_STATE_WARN(active != new_crtc_state->hw.active, 14255 "[ENCODER:%i] active %i with crtc active %i\n", 14256 encoder->base.base.id, active, 14257 new_crtc_state->hw.active); 14258 14259 I915_STATE_WARN(active && crtc->pipe != pipe, 14260 "Encoder connected to wrong pipe %c\n", 14261 pipe_name(pipe)); 14262 14263 if (active) 14264 encoder->get_config(encoder, pipe_config); 14265 } 14266 14267 intel_crtc_compute_pixel_rate(pipe_config); 14268 14269 if (!new_crtc_state->hw.active) 14270 return; 14271 14272 intel_pipe_config_sanity_check(dev_priv, pipe_config); 14273 14274 if (!intel_pipe_config_compare(new_crtc_state, 14275 pipe_config, false)) { 14276 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 14277 intel_dump_pipe_config(pipe_config, NULL, "[hw state]"); 14278 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]"); 14279 } 14280 } 14281 14282 static void 14283 intel_verify_planes(struct intel_atomic_state *state) 14284 { 14285 struct intel_plane *plane; 14286 const struct intel_plane_state *plane_state; 14287 int i; 14288 14289 for_each_new_intel_plane_in_state(state, plane, 14290 plane_state, i) 14291 assert_plane(plane, plane_state->planar_slave || 14292 plane_state->uapi.visible); 14293 } 14294 14295 static void 14296 verify_single_dpll_state(struct drm_i915_private *dev_priv, 14297 struct intel_shared_dpll *pll, 14298 struct intel_crtc *crtc, 14299 struct intel_crtc_state *new_crtc_state) 14300 { 14301 struct intel_dpll_hw_state dpll_hw_state; 14302 unsigned int crtc_mask; 14303 bool active; 14304 14305 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 14306 14307 drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name); 14308 14309 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state); 14310 14311 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) { 14312 I915_STATE_WARN(!pll->on && pll->active_mask, 14313 "pll in active use but not on in sw tracking\n"); 14314 I915_STATE_WARN(pll->on && !pll->active_mask, 14315 "pll is on but not used by any active crtc\n"); 14316 I915_STATE_WARN(pll->on != active, 14317 "pll on state mismatch (expected %i, found %i)\n", 14318 pll->on, active); 14319 } 14320 14321 if (!crtc) { 14322 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask, 14323 "more active pll users than references: %x vs %x\n", 14324 pll->active_mask, pll->state.crtc_mask); 14325 14326 return; 14327 } 14328 14329 crtc_mask = drm_crtc_mask(&crtc->base); 14330 14331 if (new_crtc_state->hw.active) 14332 I915_STATE_WARN(!(pll->active_mask & crtc_mask), 14333 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", 14334 pipe_name(crtc->pipe), pll->active_mask); 14335 else 14336 I915_STATE_WARN(pll->active_mask & crtc_mask, 14337 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", 14338 pipe_name(crtc->pipe), pll->active_mask); 14339 14340 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), 14341 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", 14342 crtc_mask, pll->state.crtc_mask); 14343 14344 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state, 14345 &dpll_hw_state, 14346 sizeof(dpll_hw_state)), 14347 "pll hw state mismatch\n"); 14348 } 14349 14350 static void 14351 verify_shared_dpll_state(struct intel_crtc *crtc, 14352 struct intel_crtc_state *old_crtc_state, 14353 struct intel_crtc_state *new_crtc_state) 14354 { 14355 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14356 14357 if (new_crtc_state->shared_dpll) 14358 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state); 14359 14360 if (old_crtc_state->shared_dpll && 14361 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) { 14362 unsigned int crtc_mask = drm_crtc_mask(&crtc->base); 14363 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll; 14364 14365 I915_STATE_WARN(pll->active_mask & crtc_mask, 14366 "pll active mismatch (didn't expect pipe %c in active mask)\n", 14367 pipe_name(crtc->pipe)); 14368 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, 14369 "pll enabled crtcs mismatch (found %x in enabled mask)\n", 14370 pipe_name(crtc->pipe)); 14371 } 14372 } 14373 14374 static void 14375 intel_modeset_verify_crtc(struct intel_crtc *crtc, 14376 struct intel_atomic_state *state, 14377 struct intel_crtc_state *old_crtc_state, 14378 struct intel_crtc_state *new_crtc_state) 14379 { 14380 if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) 14381 return; 14382 14383 verify_wm_state(crtc, new_crtc_state); 14384 verify_connector_state(state, crtc); 14385 verify_crtc_state(crtc, old_crtc_state, new_crtc_state); 14386 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state); 14387 } 14388 14389 static void 14390 verify_disabled_dpll_state(struct drm_i915_private *dev_priv) 14391 { 14392 int i; 14393 14394 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) 14395 verify_single_dpll_state(dev_priv, 14396 &dev_priv->dpll.shared_dplls[i], 14397 NULL, NULL); 14398 } 14399 14400 static void 14401 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv, 14402 struct intel_atomic_state *state) 14403 { 14404 verify_encoder_state(dev_priv, state); 14405 verify_connector_state(state, NULL); 14406 verify_disabled_dpll_state(dev_priv); 14407 } 14408 14409 static void 14410 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) 14411 { 14412 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 14413 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14414 const struct drm_display_mode *adjusted_mode = 14415 &crtc_state->hw.adjusted_mode; 14416 14417 drm_calc_timestamping_constants(&crtc->base, adjusted_mode); 14418 14419 /* 14420 * The scanline counter increments at the leading edge of hsync. 14421 * 14422 * On most platforms it starts counting from vtotal-1 on the 14423 * first active line. That means the scanline counter value is 14424 * always one less than what we would expect. Ie. just after 14425 * start of vblank, which also occurs at start of hsync (on the 14426 * last active line), the scanline counter will read vblank_start-1. 14427 * 14428 * On gen2 the scanline counter starts counting from 1 instead 14429 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 14430 * to keep the value positive), instead of adding one. 14431 * 14432 * On HSW+ the behaviour of the scanline counter depends on the output 14433 * type. For DP ports it behaves like most other platforms, but on HDMI 14434 * there's an extra 1 line difference. So we need to add two instead of 14435 * one to the value. 14436 * 14437 * On VLV/CHV DSI the scanline counter would appear to increment 14438 * approx. 1/3 of a scanline before start of vblank. Unfortunately 14439 * that means we can't tell whether we're in vblank or not while 14440 * we're on that particular line. We must still set scanline_offset 14441 * to 1 so that the vblank timestamps come out correct when we query 14442 * the scanline counter from within the vblank interrupt handler. 14443 * However if queried just before the start of vblank we'll get an 14444 * answer that's slightly in the future. 14445 */ 14446 if (IS_GEN(dev_priv, 2)) { 14447 int vtotal; 14448 14449 vtotal = adjusted_mode->crtc_vtotal; 14450 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 14451 vtotal /= 2; 14452 14453 crtc->scanline_offset = vtotal - 1; 14454 } else if (HAS_DDI(dev_priv) && 14455 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 14456 crtc->scanline_offset = 2; 14457 } else { 14458 crtc->scanline_offset = 1; 14459 } 14460 } 14461 14462 static void intel_modeset_clear_plls(struct intel_atomic_state *state) 14463 { 14464 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14465 struct intel_crtc_state *new_crtc_state; 14466 struct intel_crtc *crtc; 14467 int i; 14468 14469 if (!dev_priv->display.crtc_compute_clock) 14470 return; 14471 14472 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14473 if (!needs_modeset(new_crtc_state)) 14474 continue; 14475 14476 intel_release_shared_dplls(state, crtc); 14477 } 14478 } 14479 14480 /* 14481 * This implements the workaround described in the "notes" section of the mode 14482 * set sequence documentation. When going from no pipes or single pipe to 14483 * multiple pipes, and planes are enabled after the pipe, we need to wait at 14484 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 14485 */ 14486 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) 14487 { 14488 struct intel_crtc_state *crtc_state; 14489 struct intel_crtc *crtc; 14490 struct intel_crtc_state *first_crtc_state = NULL; 14491 struct intel_crtc_state *other_crtc_state = NULL; 14492 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 14493 int i; 14494 14495 /* look at all crtc's that are going to be enabled in during modeset */ 14496 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 14497 if (!crtc_state->hw.active || 14498 !needs_modeset(crtc_state)) 14499 continue; 14500 14501 if (first_crtc_state) { 14502 other_crtc_state = crtc_state; 14503 break; 14504 } else { 14505 first_crtc_state = crtc_state; 14506 first_pipe = crtc->pipe; 14507 } 14508 } 14509 14510 /* No workaround needed? */ 14511 if (!first_crtc_state) 14512 return 0; 14513 14514 /* w/a possibly needed, check how many crtc's are already enabled. */ 14515 for_each_intel_crtc(state->base.dev, crtc) { 14516 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 14517 if (IS_ERR(crtc_state)) 14518 return PTR_ERR(crtc_state); 14519 14520 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 14521 14522 if (!crtc_state->hw.active || 14523 needs_modeset(crtc_state)) 14524 continue; 14525 14526 /* 2 or more enabled crtcs means no need for w/a */ 14527 if (enabled_pipe != INVALID_PIPE) 14528 return 0; 14529 14530 enabled_pipe = crtc->pipe; 14531 } 14532 14533 if (enabled_pipe != INVALID_PIPE) 14534 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 14535 else if (other_crtc_state) 14536 other_crtc_state->hsw_workaround_pipe = first_pipe; 14537 14538 return 0; 14539 } 14540 14541 u8 intel_calc_active_pipes(struct intel_atomic_state *state, 14542 u8 active_pipes) 14543 { 14544 const struct intel_crtc_state *crtc_state; 14545 struct intel_crtc *crtc; 14546 int i; 14547 14548 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 14549 if (crtc_state->hw.active) 14550 active_pipes |= BIT(crtc->pipe); 14551 else 14552 active_pipes &= ~BIT(crtc->pipe); 14553 } 14554 14555 return active_pipes; 14556 } 14557 14558 static int intel_modeset_checks(struct intel_atomic_state *state) 14559 { 14560 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14561 int ret; 14562 14563 state->modeset = true; 14564 state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes); 14565 14566 state->active_pipe_changes = state->active_pipes ^ dev_priv->active_pipes; 14567 14568 if (state->active_pipe_changes) { 14569 ret = _intel_atomic_lock_global_state(state); 14570 if (ret) 14571 return ret; 14572 } 14573 14574 ret = intel_modeset_calc_cdclk(state); 14575 if (ret) 14576 return ret; 14577 14578 intel_modeset_clear_plls(state); 14579 14580 if (IS_HASWELL(dev_priv)) 14581 return hsw_mode_set_planes_workaround(state); 14582 14583 return 0; 14584 } 14585 14586 /* 14587 * Handle calculation of various watermark data at the end of the atomic check 14588 * phase. The code here should be run after the per-crtc and per-plane 'check' 14589 * handlers to ensure that all derived state has been updated. 14590 */ 14591 static int calc_watermark_data(struct intel_atomic_state *state) 14592 { 14593 struct drm_device *dev = state->base.dev; 14594 struct drm_i915_private *dev_priv = to_i915(dev); 14595 14596 /* Is there platform-specific watermark information to calculate? */ 14597 if (dev_priv->display.compute_global_watermarks) 14598 return dev_priv->display.compute_global_watermarks(state); 14599 14600 return 0; 14601 } 14602 14603 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 14604 struct intel_crtc_state *new_crtc_state) 14605 { 14606 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) 14607 return; 14608 14609 new_crtc_state->uapi.mode_changed = false; 14610 new_crtc_state->update_pipe = true; 14611 } 14612 14613 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state, 14614 struct intel_crtc_state *new_crtc_state) 14615 { 14616 /* 14617 * If we're not doing the full modeset we want to 14618 * keep the current M/N values as they may be 14619 * sufficiently different to the computed values 14620 * to cause problems. 14621 * 14622 * FIXME: should really copy more fuzzy state here 14623 */ 14624 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n; 14625 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n; 14626 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2; 14627 new_crtc_state->has_drrs = old_crtc_state->has_drrs; 14628 } 14629 14630 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, 14631 struct intel_crtc *crtc, 14632 u8 plane_ids_mask) 14633 { 14634 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14635 struct intel_plane *plane; 14636 14637 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 14638 struct intel_plane_state *plane_state; 14639 14640 if ((plane_ids_mask & BIT(plane->id)) == 0) 14641 continue; 14642 14643 plane_state = intel_atomic_get_plane_state(state, plane); 14644 if (IS_ERR(plane_state)) 14645 return PTR_ERR(plane_state); 14646 } 14647 14648 return 0; 14649 } 14650 14651 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) 14652 { 14653 /* See {hsw,vlv,ivb}_plane_ratio() */ 14654 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) || 14655 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) || 14656 IS_IVYBRIDGE(dev_priv); 14657 } 14658 14659 static int intel_atomic_check_planes(struct intel_atomic_state *state, 14660 bool *need_cdclk_calc) 14661 { 14662 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 14663 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 14664 struct intel_plane_state *plane_state; 14665 struct intel_plane *plane; 14666 struct intel_crtc *crtc; 14667 int i, ret; 14668 14669 ret = icl_add_linked_planes(state); 14670 if (ret) 14671 return ret; 14672 14673 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 14674 ret = intel_plane_atomic_check(state, plane); 14675 if (ret) { 14676 drm_dbg_atomic(&dev_priv->drm, 14677 "[PLANE:%d:%s] atomic driver check failed\n", 14678 plane->base.base.id, plane->base.name); 14679 return ret; 14680 } 14681 } 14682 14683 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14684 new_crtc_state, i) { 14685 u8 old_active_planes, new_active_planes; 14686 14687 ret = icl_check_nv12_planes(new_crtc_state); 14688 if (ret) 14689 return ret; 14690 14691 /* 14692 * On some platforms the number of active planes affects 14693 * the planes' minimum cdclk calculation. Add such planes 14694 * to the state before we compute the minimum cdclk. 14695 */ 14696 if (!active_planes_affects_min_cdclk(dev_priv)) 14697 continue; 14698 14699 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 14700 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 14701 14702 if (hweight8(old_active_planes) == hweight8(new_active_planes)) 14703 continue; 14704 14705 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes); 14706 if (ret) 14707 return ret; 14708 } 14709 14710 /* 14711 * active_planes bitmask has been updated, and potentially 14712 * affected planes are part of the state. We can now 14713 * compute the minimum cdclk for each plane. 14714 */ 14715 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 14716 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc); 14717 if (ret) 14718 return ret; 14719 } 14720 14721 return 0; 14722 } 14723 14724 static int intel_atomic_check_crtcs(struct intel_atomic_state *state) 14725 { 14726 struct intel_crtc_state *crtc_state; 14727 struct intel_crtc *crtc; 14728 int i; 14729 14730 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 14731 int ret = intel_crtc_atomic_check(state, crtc); 14732 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 14733 if (ret) { 14734 drm_dbg_atomic(&i915->drm, 14735 "[CRTC:%d:%s] atomic driver check failed\n", 14736 crtc->base.base.id, crtc->base.name); 14737 return ret; 14738 } 14739 } 14740 14741 return 0; 14742 } 14743 14744 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, 14745 u8 transcoders) 14746 { 14747 const struct intel_crtc_state *new_crtc_state; 14748 struct intel_crtc *crtc; 14749 int i; 14750 14751 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14752 if (new_crtc_state->hw.enable && 14753 transcoders & BIT(new_crtc_state->cpu_transcoder) && 14754 needs_modeset(new_crtc_state)) 14755 return true; 14756 } 14757 14758 return false; 14759 } 14760 14761 /** 14762 * intel_atomic_check - validate state object 14763 * @dev: drm device 14764 * @_state: state to validate 14765 */ 14766 static int intel_atomic_check(struct drm_device *dev, 14767 struct drm_atomic_state *_state) 14768 { 14769 struct drm_i915_private *dev_priv = to_i915(dev); 14770 struct intel_atomic_state *state = to_intel_atomic_state(_state); 14771 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 14772 struct intel_cdclk_state *new_cdclk_state; 14773 struct intel_crtc *crtc; 14774 int ret, i; 14775 bool any_ms = false; 14776 14777 /* Catch I915_MODE_FLAG_INHERITED */ 14778 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14779 new_crtc_state, i) { 14780 if (new_crtc_state->uapi.mode.private_flags != 14781 old_crtc_state->uapi.mode.private_flags) 14782 new_crtc_state->uapi.mode_changed = true; 14783 } 14784 14785 ret = drm_atomic_helper_check_modeset(dev, &state->base); 14786 if (ret) 14787 goto fail; 14788 14789 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14790 new_crtc_state, i) { 14791 if (!needs_modeset(new_crtc_state)) { 14792 /* Light copy */ 14793 intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state); 14794 14795 continue; 14796 } 14797 14798 ret = intel_crtc_prepare_cleared_state(new_crtc_state); 14799 if (ret) 14800 goto fail; 14801 14802 if (!new_crtc_state->hw.enable) 14803 continue; 14804 14805 ret = intel_modeset_pipe_config(new_crtc_state); 14806 if (ret) 14807 goto fail; 14808 } 14809 14810 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14811 new_crtc_state, i) { 14812 if (!needs_modeset(new_crtc_state)) 14813 continue; 14814 14815 ret = intel_modeset_pipe_config_late(new_crtc_state); 14816 if (ret) 14817 goto fail; 14818 14819 intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 14820 } 14821 14822 /** 14823 * Check if fastset is allowed by external dependencies like other 14824 * pipes and transcoders. 14825 * 14826 * Right now it only forces a fullmodeset when the MST master 14827 * transcoder did not changed but the pipe of the master transcoder 14828 * needs a fullmodeset so all slaves also needs to do a fullmodeset or 14829 * in case of port synced crtcs, if one of the synced crtcs 14830 * needs a full modeset, all other synced crtcs should be 14831 * forced a full modeset. 14832 */ 14833 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14834 if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state)) 14835 continue; 14836 14837 if (intel_dp_mst_is_slave_trans(new_crtc_state)) { 14838 enum transcoder master = new_crtc_state->mst_master_transcoder; 14839 14840 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) { 14841 new_crtc_state->uapi.mode_changed = true; 14842 new_crtc_state->update_pipe = false; 14843 } 14844 } 14845 14846 if (is_trans_port_sync_mode(new_crtc_state)) { 14847 u8 trans = new_crtc_state->sync_mode_slaves_mask; 14848 14849 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER) 14850 trans |= BIT(new_crtc_state->master_transcoder); 14851 14852 if (intel_cpu_transcoders_need_modeset(state, trans)) { 14853 new_crtc_state->uapi.mode_changed = true; 14854 new_crtc_state->update_pipe = false; 14855 } 14856 } 14857 } 14858 14859 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14860 new_crtc_state, i) { 14861 if (needs_modeset(new_crtc_state)) { 14862 any_ms = true; 14863 continue; 14864 } 14865 14866 if (!new_crtc_state->update_pipe) 14867 continue; 14868 14869 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state); 14870 } 14871 14872 if (any_ms && !check_digital_port_conflicts(state)) { 14873 drm_dbg_kms(&dev_priv->drm, 14874 "rejecting conflicting digital port configuration\n"); 14875 ret = EINVAL; 14876 goto fail; 14877 } 14878 14879 ret = drm_dp_mst_atomic_check(&state->base); 14880 if (ret) 14881 goto fail; 14882 14883 ret = intel_atomic_check_planes(state, &any_ms); 14884 if (ret) 14885 goto fail; 14886 14887 new_cdclk_state = intel_atomic_get_new_cdclk_state(state); 14888 if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed) 14889 any_ms = true; 14890 14891 /* 14892 * distrust_bios_wm will force a full dbuf recomputation 14893 * but the hardware state will only get updated accordingly 14894 * if state->modeset==true. Hence distrust_bios_wm==true && 14895 * state->modeset==false is an invalid combination which 14896 * would cause the hardware and software dbuf state to get 14897 * out of sync. We must prevent that. 14898 * 14899 * FIXME clean up this mess and introduce better 14900 * state tracking for dbuf. 14901 */ 14902 if (dev_priv->wm.distrust_bios_wm) 14903 any_ms = true; 14904 14905 if (any_ms) { 14906 ret = intel_modeset_checks(state); 14907 if (ret) 14908 goto fail; 14909 } 14910 14911 ret = intel_atomic_check_crtcs(state); 14912 if (ret) 14913 goto fail; 14914 14915 intel_fbc_choose_crtc(dev_priv, state); 14916 ret = calc_watermark_data(state); 14917 if (ret) 14918 goto fail; 14919 14920 ret = intel_bw_atomic_check(state); 14921 if (ret) 14922 goto fail; 14923 14924 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14925 new_crtc_state, i) { 14926 if (!needs_modeset(new_crtc_state) && 14927 !new_crtc_state->update_pipe) 14928 continue; 14929 14930 intel_dump_pipe_config(new_crtc_state, state, 14931 needs_modeset(new_crtc_state) ? 14932 "[modeset]" : "[fastset]"); 14933 } 14934 14935 return 0; 14936 14937 fail: 14938 if (ret == -EDEADLK) 14939 return ret; 14940 14941 /* 14942 * FIXME would probably be nice to know which crtc specifically 14943 * caused the failure, in cases where we can pinpoint it. 14944 */ 14945 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14946 new_crtc_state, i) 14947 intel_dump_pipe_config(new_crtc_state, state, "[failed]"); 14948 14949 return ret; 14950 } 14951 14952 static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 14953 { 14954 return drm_atomic_helper_prepare_planes(state->base.dev, 14955 &state->base); 14956 } 14957 14958 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) 14959 { 14960 struct drm_device *dev = crtc->base.dev; 14961 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)]; 14962 14963 if (!vblank->max_vblank_count) 14964 return (u32)drm_crtc_accurate_vblank_count(&crtc->base); 14965 14966 return crtc->base.funcs->get_vblank_counter(&crtc->base); 14967 } 14968 14969 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 14970 struct intel_crtc_state *crtc_state) 14971 { 14972 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14973 14974 if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes) 14975 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 14976 14977 if (crtc_state->has_pch_encoder) { 14978 enum pipe pch_transcoder = 14979 intel_crtc_pch_transcoder(crtc); 14980 14981 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); 14982 } 14983 } 14984 14985 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, 14986 const struct intel_crtc_state *new_crtc_state) 14987 { 14988 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 14989 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14990 14991 /* 14992 * Update pipe size and adjust fitter if needed: the reason for this is 14993 * that in compute_mode_changes we check the native mode (not the pfit 14994 * mode) to see if we can flip rather than do a full mode set. In the 14995 * fastboot case, we'll flip, but if we don't update the pipesrc and 14996 * pfit state, we'll end up with a big fb scanned out into the wrong 14997 * sized surface. 14998 */ 14999 intel_set_pipe_src_size(new_crtc_state); 15000 15001 /* on skylake this is done by detaching scalers */ 15002 if (INTEL_GEN(dev_priv) >= 9) { 15003 skl_detach_scalers(new_crtc_state); 15004 15005 if (new_crtc_state->pch_pfit.enabled) 15006 skl_pfit_enable(new_crtc_state); 15007 } else if (HAS_PCH_SPLIT(dev_priv)) { 15008 if (new_crtc_state->pch_pfit.enabled) 15009 ilk_pfit_enable(new_crtc_state); 15010 else if (old_crtc_state->pch_pfit.enabled) 15011 ilk_pfit_disable(old_crtc_state); 15012 } 15013 15014 /* 15015 * The register is supposedly single buffered so perhaps 15016 * not 100% correct to do this here. But SKL+ calculate 15017 * this based on the adjust pixel rate so pfit changes do 15018 * affect it and so it must be updated for fastsets. 15019 * HSW/BDW only really need this here for fastboot, after 15020 * that the value should not change without a full modeset. 15021 */ 15022 if (INTEL_GEN(dev_priv) >= 9 || 15023 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 15024 hsw_set_linetime_wm(new_crtc_state); 15025 15026 if (INTEL_GEN(dev_priv) >= 11) 15027 icl_set_pipe_chicken(crtc); 15028 } 15029 15030 static void commit_pipe_config(struct intel_atomic_state *state, 15031 struct intel_crtc *crtc) 15032 { 15033 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 15034 const struct intel_crtc_state *old_crtc_state = 15035 intel_atomic_get_old_crtc_state(state, crtc); 15036 const struct intel_crtc_state *new_crtc_state = 15037 intel_atomic_get_new_crtc_state(state, crtc); 15038 bool modeset = needs_modeset(new_crtc_state); 15039 15040 /* 15041 * During modesets pipe configuration was programmed as the 15042 * CRTC was enabled. 15043 */ 15044 if (!modeset) { 15045 if (new_crtc_state->uapi.color_mgmt_changed || 15046 new_crtc_state->update_pipe) 15047 intel_color_commit(new_crtc_state); 15048 15049 if (INTEL_GEN(dev_priv) >= 9) 15050 skl_detach_scalers(new_crtc_state); 15051 15052 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 15053 bdw_set_pipemisc(new_crtc_state); 15054 15055 if (new_crtc_state->update_pipe) 15056 intel_pipe_fastset(old_crtc_state, new_crtc_state); 15057 } 15058 15059 if (dev_priv->display.atomic_update_watermarks) 15060 dev_priv->display.atomic_update_watermarks(state, crtc); 15061 } 15062 15063 static void intel_enable_crtc(struct intel_atomic_state *state, 15064 struct intel_crtc *crtc) 15065 { 15066 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 15067 const struct intel_crtc_state *new_crtc_state = 15068 intel_atomic_get_new_crtc_state(state, crtc); 15069 15070 if (!needs_modeset(new_crtc_state)) 15071 return; 15072 15073 intel_crtc_update_active_timings(new_crtc_state); 15074 15075 dev_priv->display.crtc_enable(state, crtc); 15076 15077 /* vblanks work again, re-enable pipe CRC. */ 15078 intel_crtc_enable_pipe_crc(crtc); 15079 } 15080 15081 static void intel_update_crtc(struct intel_atomic_state *state, 15082 struct intel_crtc *crtc) 15083 { 15084 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 15085 const struct intel_crtc_state *old_crtc_state = 15086 intel_atomic_get_old_crtc_state(state, crtc); 15087 struct intel_crtc_state *new_crtc_state = 15088 intel_atomic_get_new_crtc_state(state, crtc); 15089 bool modeset = needs_modeset(new_crtc_state); 15090 15091 if (!modeset) { 15092 if (new_crtc_state->preload_luts && 15093 (new_crtc_state->uapi.color_mgmt_changed || 15094 new_crtc_state->update_pipe)) 15095 intel_color_load_luts(new_crtc_state); 15096 15097 intel_pre_plane_update(state, crtc); 15098 15099 if (new_crtc_state->update_pipe) 15100 intel_encoders_update_pipe(state, crtc); 15101 } 15102 15103 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) 15104 intel_fbc_disable(crtc); 15105 else 15106 intel_fbc_enable(state, crtc); 15107 15108 /* Perform vblank evasion around commit operation */ 15109 intel_pipe_update_start(new_crtc_state); 15110 15111 commit_pipe_config(state, crtc); 15112 15113 if (INTEL_GEN(dev_priv) >= 9) 15114 skl_update_planes_on_crtc(state, crtc); 15115 else 15116 i9xx_update_planes_on_crtc(state, crtc); 15117 15118 intel_pipe_update_end(new_crtc_state); 15119 15120 /* 15121 * We usually enable FIFO underrun interrupts as part of the 15122 * CRTC enable sequence during modesets. But when we inherit a 15123 * valid pipe configuration from the BIOS we need to take care 15124 * of enabling them on the CRTC's first fastset. 15125 */ 15126 if (new_crtc_state->update_pipe && !modeset && 15127 old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) 15128 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 15129 } 15130 15131 15132 static void intel_old_crtc_state_disables(struct intel_atomic_state *state, 15133 struct intel_crtc_state *old_crtc_state, 15134 struct intel_crtc_state *new_crtc_state, 15135 struct intel_crtc *crtc) 15136 { 15137 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 15138 15139 intel_crtc_disable_planes(state, crtc); 15140 15141 /* 15142 * We need to disable pipe CRC before disabling the pipe, 15143 * or we race against vblank off. 15144 */ 15145 intel_crtc_disable_pipe_crc(crtc); 15146 15147 dev_priv->display.crtc_disable(state, crtc); 15148 crtc->active = false; 15149 intel_fbc_disable(crtc); 15150 intel_disable_shared_dpll(old_crtc_state); 15151 15152 /* FIXME unify this for all platforms */ 15153 if (!new_crtc_state->hw.active && 15154 !HAS_GMCH(dev_priv) && 15155 dev_priv->display.initial_watermarks) 15156 dev_priv->display.initial_watermarks(state, crtc); 15157 } 15158 15159 static void intel_commit_modeset_disables(struct intel_atomic_state *state) 15160 { 15161 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 15162 struct intel_crtc *crtc; 15163 u32 handled = 0; 15164 int i; 15165 15166 /* Only disable port sync and MST slaves */ 15167 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15168 new_crtc_state, i) { 15169 if (!needs_modeset(new_crtc_state)) 15170 continue; 15171 15172 if (!old_crtc_state->hw.active) 15173 continue; 15174 15175 /* In case of Transcoder port Sync master slave CRTCs can be 15176 * assigned in any order and we need to make sure that 15177 * slave CRTCs are disabled first and then master CRTC since 15178 * Slave vblanks are masked till Master Vblanks. 15179 */ 15180 if (!is_trans_port_sync_slave(old_crtc_state) && 15181 !intel_dp_mst_is_slave_trans(old_crtc_state)) 15182 continue; 15183 15184 intel_pre_plane_update(state, crtc); 15185 intel_old_crtc_state_disables(state, old_crtc_state, 15186 new_crtc_state, crtc); 15187 handled |= BIT(crtc->pipe); 15188 } 15189 15190 /* Disable everything else left on */ 15191 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15192 new_crtc_state, i) { 15193 if (!needs_modeset(new_crtc_state) || 15194 (handled & BIT(crtc->pipe))) 15195 continue; 15196 15197 intel_pre_plane_update(state, crtc); 15198 if (old_crtc_state->hw.active) 15199 intel_old_crtc_state_disables(state, old_crtc_state, 15200 new_crtc_state, crtc); 15201 } 15202 } 15203 15204 static void intel_commit_modeset_enables(struct intel_atomic_state *state) 15205 { 15206 struct intel_crtc_state *new_crtc_state; 15207 struct intel_crtc *crtc; 15208 int i; 15209 15210 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15211 if (!new_crtc_state->hw.active) 15212 continue; 15213 15214 intel_enable_crtc(state, crtc); 15215 intel_update_crtc(state, crtc); 15216 } 15217 } 15218 15219 static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state) 15220 { 15221 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 15222 u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask; 15223 u8 required_slices = state->enabled_dbuf_slices_mask; 15224 u8 slices_union = hw_enabled_slices | required_slices; 15225 15226 /* If 2nd DBuf slice required, enable it here */ 15227 if (INTEL_GEN(dev_priv) >= 11 && slices_union != hw_enabled_slices) 15228 icl_dbuf_slices_update(dev_priv, slices_union); 15229 } 15230 15231 static void icl_dbuf_slice_post_update(struct intel_atomic_state *state) 15232 { 15233 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 15234 u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask; 15235 u8 required_slices = state->enabled_dbuf_slices_mask; 15236 15237 /* If 2nd DBuf slice is no more required disable it */ 15238 if (INTEL_GEN(dev_priv) >= 11 && required_slices != hw_enabled_slices) 15239 icl_dbuf_slices_update(dev_priv, required_slices); 15240 } 15241 15242 static void skl_commit_modeset_enables(struct intel_atomic_state *state) 15243 { 15244 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 15245 struct intel_crtc *crtc; 15246 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 15247 struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 15248 u8 update_pipes = 0, modeset_pipes = 0; 15249 int i; 15250 15251 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 15252 enum pipe pipe = crtc->pipe; 15253 15254 if (!new_crtc_state->hw.active) 15255 continue; 15256 15257 /* ignore allocations for crtc's that have been turned off. */ 15258 if (!needs_modeset(new_crtc_state)) { 15259 entries[pipe] = old_crtc_state->wm.skl.ddb; 15260 update_pipes |= BIT(pipe); 15261 } else { 15262 modeset_pipes |= BIT(pipe); 15263 } 15264 } 15265 15266 /* 15267 * Whenever the number of active pipes changes, we need to make sure we 15268 * update the pipes in the right order so that their ddb allocations 15269 * never overlap with each other between CRTC updates. Otherwise we'll 15270 * cause pipe underruns and other bad stuff. 15271 * 15272 * So first lets enable all pipes that do not need a fullmodeset as 15273 * those don't have any external dependency. 15274 */ 15275 while (update_pipes) { 15276 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15277 new_crtc_state, i) { 15278 enum pipe pipe = crtc->pipe; 15279 15280 if ((update_pipes & BIT(pipe)) == 0) 15281 continue; 15282 15283 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 15284 entries, I915_MAX_PIPES, pipe)) 15285 continue; 15286 15287 entries[pipe] = new_crtc_state->wm.skl.ddb; 15288 update_pipes &= ~BIT(pipe); 15289 15290 intel_update_crtc(state, crtc); 15291 15292 /* 15293 * If this is an already active pipe, it's DDB changed, 15294 * and this isn't the last pipe that needs updating 15295 * then we need to wait for a vblank to pass for the 15296 * new ddb allocation to take effect. 15297 */ 15298 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 15299 &old_crtc_state->wm.skl.ddb) && 15300 (update_pipes | modeset_pipes)) 15301 intel_wait_for_vblank(dev_priv, pipe); 15302 } 15303 } 15304 15305 update_pipes = modeset_pipes; 15306 15307 /* 15308 * Enable all pipes that needs a modeset and do not depends on other 15309 * pipes 15310 */ 15311 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15312 enum pipe pipe = crtc->pipe; 15313 15314 if ((modeset_pipes & BIT(pipe)) == 0) 15315 continue; 15316 15317 if (intel_dp_mst_is_slave_trans(new_crtc_state) || 15318 is_trans_port_sync_master(new_crtc_state)) 15319 continue; 15320 15321 modeset_pipes &= ~BIT(pipe); 15322 15323 intel_enable_crtc(state, crtc); 15324 } 15325 15326 /* 15327 * Then we enable all remaining pipes that depend on other 15328 * pipes: MST slaves and port sync masters. 15329 */ 15330 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15331 enum pipe pipe = crtc->pipe; 15332 15333 if ((modeset_pipes & BIT(pipe)) == 0) 15334 continue; 15335 15336 modeset_pipes &= ~BIT(pipe); 15337 15338 intel_enable_crtc(state, crtc); 15339 } 15340 15341 /* 15342 * Finally we do the plane updates/etc. for all pipes that got enabled. 15343 */ 15344 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15345 enum pipe pipe = crtc->pipe; 15346 15347 if ((update_pipes & BIT(pipe)) == 0) 15348 continue; 15349 15350 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 15351 entries, I915_MAX_PIPES, pipe)); 15352 15353 entries[pipe] = new_crtc_state->wm.skl.ddb; 15354 update_pipes &= ~BIT(pipe); 15355 15356 intel_update_crtc(state, crtc); 15357 } 15358 15359 drm_WARN_ON(&dev_priv->drm, modeset_pipes); 15360 drm_WARN_ON(&dev_priv->drm, update_pipes); 15361 } 15362 15363 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) 15364 { 15365 struct intel_atomic_state *state, *next; 15366 struct llist_node *freed; 15367 15368 freed = llist_del_all(&dev_priv->atomic_helper.free_list); 15369 llist_for_each_entry_safe(state, next, freed, freed) 15370 drm_atomic_state_put(&state->base); 15371 } 15372 15373 static void intel_atomic_helper_free_state_worker(struct work_struct *work) 15374 { 15375 struct drm_i915_private *dev_priv = 15376 container_of(work, typeof(*dev_priv), atomic_helper.free_work); 15377 15378 intel_atomic_helper_free_state(dev_priv); 15379 } 15380 15381 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 15382 { 15383 struct wait_queue_entry wait_fence, wait_reset; 15384 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); 15385 15386 init_wait_entry(&wait_fence, 0); 15387 init_wait_entry(&wait_reset, 0); 15388 for (;;) { 15389 prepare_to_wait(&intel_state->commit_ready.wait, 15390 &wait_fence, TASK_UNINTERRUPTIBLE); 15391 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags, 15392 I915_RESET_MODESET), 15393 &wait_reset, TASK_UNINTERRUPTIBLE); 15394 15395 15396 if (i915_sw_fence_done(&intel_state->commit_ready) || 15397 test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) 15398 break; 15399 15400 schedule(); 15401 } 15402 finish_wait(&intel_state->commit_ready.wait, &wait_fence); 15403 finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags, 15404 I915_RESET_MODESET), 15405 &wait_reset); 15406 } 15407 15408 static void intel_atomic_cleanup_work(struct work_struct *work) 15409 { 15410 struct drm_atomic_state *state = 15411 container_of(work, struct drm_atomic_state, commit_work); 15412 struct drm_i915_private *i915 = to_i915(state->dev); 15413 15414 drm_atomic_helper_cleanup_planes(&i915->drm, state); 15415 drm_atomic_helper_commit_cleanup_done(state); 15416 drm_atomic_state_put(state); 15417 15418 intel_atomic_helper_free_state(i915); 15419 } 15420 15421 static void intel_atomic_commit_tail(struct intel_atomic_state *state) 15422 { 15423 struct drm_device *dev = state->base.dev; 15424 struct drm_i915_private *dev_priv = to_i915(dev); 15425 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 15426 struct intel_crtc *crtc; 15427 u64 put_domains[I915_MAX_PIPES] = {}; 15428 intel_wakeref_t wakeref = 0; 15429 int i; 15430 15431 intel_atomic_commit_fence_wait(state); 15432 15433 drm_atomic_helper_wait_for_dependencies(&state->base); 15434 15435 if (state->modeset) 15436 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 15437 15438 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15439 new_crtc_state, i) { 15440 if (needs_modeset(new_crtc_state) || 15441 new_crtc_state->update_pipe) { 15442 15443 put_domains[crtc->pipe] = 15444 modeset_get_crtc_power_domains(new_crtc_state); 15445 } 15446 } 15447 15448 intel_commit_modeset_disables(state); 15449 15450 /* FIXME: Eventually get rid of our crtc->config pointer */ 15451 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 15452 crtc->config = new_crtc_state; 15453 15454 if (state->modeset) { 15455 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); 15456 15457 intel_set_cdclk_pre_plane_update(state); 15458 15459 intel_modeset_verify_disabled(dev_priv, state); 15460 } 15461 15462 intel_sagv_pre_plane_update(state); 15463 15464 /* Complete the events for pipes that have now been disabled */ 15465 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15466 bool modeset = needs_modeset(new_crtc_state); 15467 15468 /* Complete events for now disable pipes here. */ 15469 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { 15470 spin_lock_irq(&dev->event_lock); 15471 drm_crtc_send_vblank_event(&crtc->base, 15472 new_crtc_state->uapi.event); 15473 spin_unlock_irq(&dev->event_lock); 15474 15475 new_crtc_state->uapi.event = NULL; 15476 } 15477 } 15478 15479 if (state->modeset) 15480 intel_encoders_update_prepare(state); 15481 15482 /* Enable all new slices, we might need */ 15483 if (state->modeset) 15484 icl_dbuf_slice_pre_update(state); 15485 15486 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 15487 dev_priv->display.commit_modeset_enables(state); 15488 15489 if (state->modeset) { 15490 intel_encoders_update_complete(state); 15491 15492 intel_set_cdclk_post_plane_update(state); 15493 } 15494 15495 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 15496 * already, but still need the state for the delayed optimization. To 15497 * fix this: 15498 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 15499 * - schedule that vblank worker _before_ calling hw_done 15500 * - at the start of commit_tail, cancel it _synchrously 15501 * - switch over to the vblank wait helper in the core after that since 15502 * we don't need out special handling any more. 15503 */ 15504 drm_atomic_helper_wait_for_flip_done(dev, &state->base); 15505 15506 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15507 if (new_crtc_state->hw.active && 15508 !needs_modeset(new_crtc_state) && 15509 !new_crtc_state->preload_luts && 15510 (new_crtc_state->uapi.color_mgmt_changed || 15511 new_crtc_state->update_pipe)) 15512 intel_color_load_luts(new_crtc_state); 15513 } 15514 15515 /* 15516 * Now that the vblank has passed, we can go ahead and program the 15517 * optimal watermarks on platforms that need two-step watermark 15518 * programming. 15519 * 15520 * TODO: Move this (and other cleanup) to an async worker eventually. 15521 */ 15522 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15523 new_crtc_state, i) { 15524 /* 15525 * Gen2 reports pipe underruns whenever all planes are disabled. 15526 * So re-enable underrun reporting after some planes get enabled. 15527 * 15528 * We do this before .optimize_watermarks() so that we have a 15529 * chance of catching underruns with the intermediate watermarks 15530 * vs. the new plane configuration. 15531 */ 15532 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state)) 15533 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 15534 15535 if (dev_priv->display.optimize_watermarks) 15536 dev_priv->display.optimize_watermarks(state, crtc); 15537 } 15538 15539 /* Disable all slices, we don't need */ 15540 if (state->modeset) 15541 icl_dbuf_slice_post_update(state); 15542 15543 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 15544 intel_post_plane_update(state, crtc); 15545 15546 if (put_domains[i]) 15547 modeset_put_power_domains(dev_priv, put_domains[i]); 15548 15549 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); 15550 } 15551 15552 /* Underruns don't always raise interrupts, so check manually */ 15553 intel_check_cpu_fifo_underruns(dev_priv); 15554 intel_check_pch_fifo_underruns(dev_priv); 15555 15556 if (state->modeset) 15557 intel_verify_planes(state); 15558 15559 intel_sagv_post_plane_update(state); 15560 15561 drm_atomic_helper_commit_hw_done(&state->base); 15562 15563 if (state->modeset) { 15564 /* As one of the primary mmio accessors, KMS has a high 15565 * likelihood of triggering bugs in unclaimed access. After we 15566 * finish modesetting, see if an error has been flagged, and if 15567 * so enable debugging for the next modeset - and hope we catch 15568 * the culprit. 15569 */ 15570 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 15571 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); 15572 } 15573 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 15574 15575 /* 15576 * Defer the cleanup of the old state to a separate worker to not 15577 * impede the current task (userspace for blocking modesets) that 15578 * are executed inline. For out-of-line asynchronous modesets/flips, 15579 * deferring to a new worker seems overkill, but we would place a 15580 * schedule point (cond_resched()) here anyway to keep latencies 15581 * down. 15582 */ 15583 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work); 15584 queue_work(system_highpri_wq, &state->base.commit_work); 15585 } 15586 15587 static void intel_atomic_commit_work(struct work_struct *work) 15588 { 15589 struct intel_atomic_state *state = 15590 container_of(work, struct intel_atomic_state, base.commit_work); 15591 15592 intel_atomic_commit_tail(state); 15593 } 15594 15595 static int __i915_sw_fence_call 15596 intel_atomic_commit_ready(struct i915_sw_fence *fence, 15597 enum i915_sw_fence_notify notify) 15598 { 15599 struct intel_atomic_state *state = 15600 container_of(fence, struct intel_atomic_state, commit_ready); 15601 15602 switch (notify) { 15603 case FENCE_COMPLETE: 15604 /* we do blocking waits in the worker, nothing to do here */ 15605 break; 15606 case FENCE_FREE: 15607 { 15608 struct intel_atomic_helper *helper = 15609 &to_i915(state->base.dev)->atomic_helper; 15610 15611 if (llist_add(&state->freed, &helper->free_list)) 15612 schedule_work(&helper->free_work); 15613 break; 15614 } 15615 } 15616 15617 return NOTIFY_DONE; 15618 } 15619 15620 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 15621 { 15622 struct intel_plane_state *old_plane_state, *new_plane_state; 15623 struct intel_plane *plane; 15624 int i; 15625 15626 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 15627 new_plane_state, i) 15628 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 15629 to_intel_frontbuffer(new_plane_state->hw.fb), 15630 plane->frontbuffer_bit); 15631 } 15632 15633 static void assert_global_state_locked(struct drm_i915_private *dev_priv) 15634 { 15635 struct intel_crtc *crtc; 15636 15637 for_each_intel_crtc(&dev_priv->drm, crtc) 15638 drm_modeset_lock_assert_held(&crtc->base.mutex); 15639 } 15640 15641 static int intel_atomic_commit(struct drm_device *dev, 15642 struct drm_atomic_state *_state, 15643 bool nonblock) 15644 { 15645 struct intel_atomic_state *state = to_intel_atomic_state(_state); 15646 struct drm_i915_private *dev_priv = to_i915(dev); 15647 int ret = 0; 15648 15649 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 15650 15651 drm_atomic_state_get(&state->base); 15652 i915_sw_fence_init(&state->commit_ready, 15653 intel_atomic_commit_ready); 15654 15655 /* 15656 * The intel_legacy_cursor_update() fast path takes care 15657 * of avoiding the vblank waits for simple cursor 15658 * movement and flips. For cursor on/off and size changes, 15659 * we want to perform the vblank waits so that watermark 15660 * updates happen during the correct frames. Gen9+ have 15661 * double buffered watermarks and so shouldn't need this. 15662 * 15663 * Unset state->legacy_cursor_update before the call to 15664 * drm_atomic_helper_setup_commit() because otherwise 15665 * drm_atomic_helper_wait_for_flip_done() is a noop and 15666 * we get FIFO underruns because we didn't wait 15667 * for vblank. 15668 * 15669 * FIXME doing watermarks and fb cleanup from a vblank worker 15670 * (assuming we had any) would solve these problems. 15671 */ 15672 if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) { 15673 struct intel_crtc_state *new_crtc_state; 15674 struct intel_crtc *crtc; 15675 int i; 15676 15677 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 15678 if (new_crtc_state->wm.need_postvbl_update || 15679 new_crtc_state->update_wm_post) 15680 state->base.legacy_cursor_update = false; 15681 } 15682 15683 ret = intel_atomic_prepare_commit(state); 15684 if (ret) { 15685 drm_dbg_atomic(&dev_priv->drm, 15686 "Preparing state failed with %i\n", ret); 15687 i915_sw_fence_commit(&state->commit_ready); 15688 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 15689 return ret; 15690 } 15691 15692 ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 15693 if (!ret) 15694 ret = drm_atomic_helper_swap_state(&state->base, true); 15695 if (!ret) 15696 intel_atomic_swap_global_state(state); 15697 15698 if (ret) { 15699 i915_sw_fence_commit(&state->commit_ready); 15700 15701 drm_atomic_helper_cleanup_planes(dev, &state->base); 15702 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 15703 return ret; 15704 } 15705 dev_priv->wm.distrust_bios_wm = false; 15706 intel_shared_dpll_swap_state(state); 15707 intel_atomic_track_fbs(state); 15708 15709 if (state->global_state_changed) { 15710 assert_global_state_locked(dev_priv); 15711 15712 dev_priv->active_pipes = state->active_pipes; 15713 } 15714 15715 drm_atomic_state_get(&state->base); 15716 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 15717 15718 i915_sw_fence_commit(&state->commit_ready); 15719 if (nonblock && state->modeset) { 15720 queue_work(dev_priv->modeset_wq, &state->base.commit_work); 15721 } else if (nonblock) { 15722 queue_work(dev_priv->flip_wq, &state->base.commit_work); 15723 } else { 15724 if (state->modeset) 15725 flush_workqueue(dev_priv->modeset_wq); 15726 intel_atomic_commit_tail(state); 15727 } 15728 15729 return 0; 15730 } 15731 15732 struct wait_rps_boost { 15733 struct wait_queue_entry wait; 15734 15735 struct drm_crtc *crtc; 15736 struct i915_request *request; 15737 }; 15738 15739 static int do_rps_boost(struct wait_queue_entry *_wait, 15740 unsigned mode, int sync, void *key) 15741 { 15742 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); 15743 struct i915_request *rq = wait->request; 15744 15745 /* 15746 * If we missed the vblank, but the request is already running it 15747 * is reasonable to assume that it will complete before the next 15748 * vblank without our intervention, so leave RPS alone. 15749 */ 15750 if (!i915_request_started(rq)) 15751 intel_rps_boost(rq); 15752 i915_request_put(rq); 15753 15754 drm_crtc_vblank_put(wait->crtc); 15755 15756 list_del(&wait->wait.entry); 15757 kfree(wait); 15758 return 1; 15759 } 15760 15761 static void add_rps_boost_after_vblank(struct drm_crtc *crtc, 15762 struct dma_fence *fence) 15763 { 15764 struct wait_rps_boost *wait; 15765 15766 if (!dma_fence_is_i915(fence)) 15767 return; 15768 15769 if (INTEL_GEN(to_i915(crtc->dev)) < 6) 15770 return; 15771 15772 if (drm_crtc_vblank_get(crtc)) 15773 return; 15774 15775 wait = kmalloc(sizeof(*wait), GFP_KERNEL); 15776 if (!wait) { 15777 drm_crtc_vblank_put(crtc); 15778 return; 15779 } 15780 15781 wait->request = to_request(dma_fence_get(fence)); 15782 wait->crtc = crtc; 15783 15784 wait->wait.func = do_rps_boost; 15785 wait->wait.flags = 0; 15786 15787 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); 15788 } 15789 15790 static int intel_plane_pin_fb(struct intel_plane_state *plane_state) 15791 { 15792 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 15793 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 15794 struct drm_framebuffer *fb = plane_state->hw.fb; 15795 struct i915_vma *vma; 15796 15797 if (plane->id == PLANE_CURSOR && 15798 INTEL_INFO(dev_priv)->display.cursor_needs_physical) { 15799 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 15800 const int align = intel_cursor_alignment(dev_priv); 15801 int err; 15802 15803 err = i915_gem_object_attach_phys(obj, align); 15804 if (err) 15805 return err; 15806 } 15807 15808 vma = intel_pin_and_fence_fb_obj(fb, 15809 &plane_state->view, 15810 intel_plane_uses_fence(plane_state), 15811 &plane_state->flags); 15812 if (IS_ERR(vma)) 15813 return PTR_ERR(vma); 15814 15815 plane_state->vma = vma; 15816 15817 return 0; 15818 } 15819 15820 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) 15821 { 15822 struct i915_vma *vma; 15823 15824 vma = fetch_and_zero(&old_plane_state->vma); 15825 if (vma) 15826 intel_unpin_fb_vma(vma, old_plane_state->flags); 15827 } 15828 15829 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj) 15830 { 15831 struct i915_sched_attr attr = { 15832 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY), 15833 }; 15834 15835 i915_gem_object_wait_priority(obj, 0, &attr); 15836 } 15837 15838 /** 15839 * intel_prepare_plane_fb - Prepare fb for usage on plane 15840 * @_plane: drm plane to prepare for 15841 * @_new_plane_state: the plane state being prepared 15842 * 15843 * Prepares a framebuffer for usage on a display plane. Generally this 15844 * involves pinning the underlying object and updating the frontbuffer tracking 15845 * bits. Some older platforms need special physical address handling for 15846 * cursor planes. 15847 * 15848 * Returns 0 on success, negative error code on failure. 15849 */ 15850 int 15851 intel_prepare_plane_fb(struct drm_plane *_plane, 15852 struct drm_plane_state *_new_plane_state) 15853 { 15854 struct intel_plane *plane = to_intel_plane(_plane); 15855 struct intel_plane_state *new_plane_state = 15856 to_intel_plane_state(_new_plane_state); 15857 struct intel_atomic_state *state = 15858 to_intel_atomic_state(new_plane_state->uapi.state); 15859 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 15860 const struct intel_plane_state *old_plane_state = 15861 intel_atomic_get_old_plane_state(state, plane); 15862 struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb); 15863 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb); 15864 int ret; 15865 15866 if (old_obj) { 15867 const struct intel_crtc_state *crtc_state = 15868 intel_atomic_get_new_crtc_state(state, 15869 to_intel_crtc(old_plane_state->hw.crtc)); 15870 15871 /* Big Hammer, we also need to ensure that any pending 15872 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 15873 * current scanout is retired before unpinning the old 15874 * framebuffer. Note that we rely on userspace rendering 15875 * into the buffer attached to the pipe they are waiting 15876 * on. If not, userspace generates a GPU hang with IPEHR 15877 * point to the MI_WAIT_FOR_EVENT. 15878 * 15879 * This should only fail upon a hung GPU, in which case we 15880 * can safely continue. 15881 */ 15882 if (needs_modeset(crtc_state)) { 15883 ret = i915_sw_fence_await_reservation(&state->commit_ready, 15884 old_obj->base.resv, NULL, 15885 false, 0, 15886 GFP_KERNEL); 15887 if (ret < 0) 15888 return ret; 15889 } 15890 } 15891 15892 if (new_plane_state->uapi.fence) { /* explicit fencing */ 15893 ret = i915_sw_fence_await_dma_fence(&state->commit_ready, 15894 new_plane_state->uapi.fence, 15895 i915_fence_timeout(dev_priv), 15896 GFP_KERNEL); 15897 if (ret < 0) 15898 return ret; 15899 } 15900 15901 if (!obj) 15902 return 0; 15903 15904 ret = i915_gem_object_pin_pages(obj); 15905 if (ret) 15906 return ret; 15907 15908 ret = intel_plane_pin_fb(new_plane_state); 15909 15910 i915_gem_object_unpin_pages(obj); 15911 if (ret) 15912 return ret; 15913 15914 fb_obj_bump_render_priority(obj); 15915 i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB); 15916 15917 if (!new_plane_state->uapi.fence) { /* implicit fencing */ 15918 struct dma_fence *fence; 15919 15920 ret = i915_sw_fence_await_reservation(&state->commit_ready, 15921 obj->base.resv, NULL, 15922 false, 15923 i915_fence_timeout(dev_priv), 15924 GFP_KERNEL); 15925 if (ret < 0) 15926 goto unpin_fb; 15927 15928 fence = dma_resv_get_excl_rcu(obj->base.resv); 15929 if (fence) { 15930 add_rps_boost_after_vblank(new_plane_state->hw.crtc, 15931 fence); 15932 dma_fence_put(fence); 15933 } 15934 } else { 15935 add_rps_boost_after_vblank(new_plane_state->hw.crtc, 15936 new_plane_state->uapi.fence); 15937 } 15938 15939 /* 15940 * We declare pageflips to be interactive and so merit a small bias 15941 * towards upclocking to deliver the frame on time. By only changing 15942 * the RPS thresholds to sample more regularly and aim for higher 15943 * clocks we can hopefully deliver low power workloads (like kodi) 15944 * that are not quite steady state without resorting to forcing 15945 * maximum clocks following a vblank miss (see do_rps_boost()). 15946 */ 15947 if (!state->rps_interactive) { 15948 intel_rps_mark_interactive(&dev_priv->gt.rps, true); 15949 state->rps_interactive = true; 15950 } 15951 15952 return 0; 15953 15954 unpin_fb: 15955 intel_plane_unpin_fb(new_plane_state); 15956 15957 return ret; 15958 } 15959 15960 /** 15961 * intel_cleanup_plane_fb - Cleans up an fb after plane use 15962 * @plane: drm plane to clean up for 15963 * @_old_plane_state: the state from the previous modeset 15964 * 15965 * Cleans up a framebuffer that has just been removed from a plane. 15966 */ 15967 void 15968 intel_cleanup_plane_fb(struct drm_plane *plane, 15969 struct drm_plane_state *_old_plane_state) 15970 { 15971 struct intel_plane_state *old_plane_state = 15972 to_intel_plane_state(_old_plane_state); 15973 struct intel_atomic_state *state = 15974 to_intel_atomic_state(old_plane_state->uapi.state); 15975 struct drm_i915_private *dev_priv = to_i915(plane->dev); 15976 struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb); 15977 15978 if (!obj) 15979 return; 15980 15981 if (state->rps_interactive) { 15982 intel_rps_mark_interactive(&dev_priv->gt.rps, false); 15983 state->rps_interactive = false; 15984 } 15985 15986 /* Should only be called after a successful intel_prepare_plane_fb()! */ 15987 intel_plane_unpin_fb(old_plane_state); 15988 } 15989 15990 /** 15991 * intel_plane_destroy - destroy a plane 15992 * @plane: plane to destroy 15993 * 15994 * Common destruction function for all types of planes (primary, cursor, 15995 * sprite). 15996 */ 15997 void intel_plane_destroy(struct drm_plane *plane) 15998 { 15999 drm_plane_cleanup(plane); 16000 kfree(to_intel_plane(plane)); 16001 } 16002 16003 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, 16004 u32 format, u64 modifier) 16005 { 16006 switch (modifier) { 16007 case DRM_FORMAT_MOD_LINEAR: 16008 case I915_FORMAT_MOD_X_TILED: 16009 break; 16010 default: 16011 return false; 16012 } 16013 16014 switch (format) { 16015 case DRM_FORMAT_C8: 16016 case DRM_FORMAT_RGB565: 16017 case DRM_FORMAT_XRGB1555: 16018 case DRM_FORMAT_XRGB8888: 16019 return modifier == DRM_FORMAT_MOD_LINEAR || 16020 modifier == I915_FORMAT_MOD_X_TILED; 16021 default: 16022 return false; 16023 } 16024 } 16025 16026 static bool i965_plane_format_mod_supported(struct drm_plane *_plane, 16027 u32 format, u64 modifier) 16028 { 16029 switch (modifier) { 16030 case DRM_FORMAT_MOD_LINEAR: 16031 case I915_FORMAT_MOD_X_TILED: 16032 break; 16033 default: 16034 return false; 16035 } 16036 16037 switch (format) { 16038 case DRM_FORMAT_C8: 16039 case DRM_FORMAT_RGB565: 16040 case DRM_FORMAT_XRGB8888: 16041 case DRM_FORMAT_XBGR8888: 16042 case DRM_FORMAT_ARGB8888: 16043 case DRM_FORMAT_ABGR8888: 16044 case DRM_FORMAT_XRGB2101010: 16045 case DRM_FORMAT_XBGR2101010: 16046 case DRM_FORMAT_ARGB2101010: 16047 case DRM_FORMAT_ABGR2101010: 16048 case DRM_FORMAT_XBGR16161616F: 16049 return modifier == DRM_FORMAT_MOD_LINEAR || 16050 modifier == I915_FORMAT_MOD_X_TILED; 16051 default: 16052 return false; 16053 } 16054 } 16055 16056 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, 16057 u32 format, u64 modifier) 16058 { 16059 return modifier == DRM_FORMAT_MOD_LINEAR && 16060 format == DRM_FORMAT_ARGB8888; 16061 } 16062 16063 static const struct drm_plane_funcs i965_plane_funcs = { 16064 .update_plane = drm_atomic_helper_update_plane, 16065 .disable_plane = drm_atomic_helper_disable_plane, 16066 .destroy = intel_plane_destroy, 16067 .atomic_duplicate_state = intel_plane_duplicate_state, 16068 .atomic_destroy_state = intel_plane_destroy_state, 16069 .format_mod_supported = i965_plane_format_mod_supported, 16070 }; 16071 16072 static const struct drm_plane_funcs i8xx_plane_funcs = { 16073 .update_plane = drm_atomic_helper_update_plane, 16074 .disable_plane = drm_atomic_helper_disable_plane, 16075 .destroy = intel_plane_destroy, 16076 .atomic_duplicate_state = intel_plane_duplicate_state, 16077 .atomic_destroy_state = intel_plane_destroy_state, 16078 .format_mod_supported = i8xx_plane_format_mod_supported, 16079 }; 16080 16081 static int 16082 intel_legacy_cursor_update(struct drm_plane *_plane, 16083 struct drm_crtc *_crtc, 16084 struct drm_framebuffer *fb, 16085 int crtc_x, int crtc_y, 16086 unsigned int crtc_w, unsigned int crtc_h, 16087 u32 src_x, u32 src_y, 16088 u32 src_w, u32 src_h, 16089 struct drm_modeset_acquire_ctx *ctx) 16090 { 16091 struct intel_plane *plane = to_intel_plane(_plane); 16092 struct intel_crtc *crtc = to_intel_crtc(_crtc); 16093 struct intel_plane_state *old_plane_state = 16094 to_intel_plane_state(plane->base.state); 16095 struct intel_plane_state *new_plane_state; 16096 struct intel_crtc_state *crtc_state = 16097 to_intel_crtc_state(crtc->base.state); 16098 struct intel_crtc_state *new_crtc_state; 16099 int ret; 16100 16101 /* 16102 * When crtc is inactive or there is a modeset pending, 16103 * wait for it to complete in the slowpath 16104 */ 16105 if (!crtc_state->hw.active || needs_modeset(crtc_state) || 16106 crtc_state->update_pipe) 16107 goto slow; 16108 16109 /* 16110 * Don't do an async update if there is an outstanding commit modifying 16111 * the plane. This prevents our async update's changes from getting 16112 * overridden by a previous synchronous update's state. 16113 */ 16114 if (old_plane_state->uapi.commit && 16115 !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done)) 16116 goto slow; 16117 16118 /* 16119 * If any parameters change that may affect watermarks, 16120 * take the slowpath. Only changing fb or position should be 16121 * in the fastpath. 16122 */ 16123 if (old_plane_state->uapi.crtc != &crtc->base || 16124 old_plane_state->uapi.src_w != src_w || 16125 old_plane_state->uapi.src_h != src_h || 16126 old_plane_state->uapi.crtc_w != crtc_w || 16127 old_plane_state->uapi.crtc_h != crtc_h || 16128 !old_plane_state->uapi.fb != !fb) 16129 goto slow; 16130 16131 new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base)); 16132 if (!new_plane_state) 16133 return -ENOMEM; 16134 16135 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base)); 16136 if (!new_crtc_state) { 16137 ret = -ENOMEM; 16138 goto out_free; 16139 } 16140 16141 drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb); 16142 16143 new_plane_state->uapi.src_x = src_x; 16144 new_plane_state->uapi.src_y = src_y; 16145 new_plane_state->uapi.src_w = src_w; 16146 new_plane_state->uapi.src_h = src_h; 16147 new_plane_state->uapi.crtc_x = crtc_x; 16148 new_plane_state->uapi.crtc_y = crtc_y; 16149 new_plane_state->uapi.crtc_w = crtc_w; 16150 new_plane_state->uapi.crtc_h = crtc_h; 16151 16152 intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state); 16153 16154 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state, 16155 old_plane_state, new_plane_state); 16156 if (ret) 16157 goto out_free; 16158 16159 ret = intel_plane_pin_fb(new_plane_state); 16160 if (ret) 16161 goto out_free; 16162 16163 intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb), 16164 ORIGIN_FLIP); 16165 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 16166 to_intel_frontbuffer(new_plane_state->hw.fb), 16167 plane->frontbuffer_bit); 16168 16169 /* Swap plane state */ 16170 plane->base.state = &new_plane_state->uapi; 16171 16172 /* 16173 * We cannot swap crtc_state as it may be in use by an atomic commit or 16174 * page flip that's running simultaneously. If we swap crtc_state and 16175 * destroy the old state, we will cause a use-after-free there. 16176 * 16177 * Only update active_planes, which is needed for our internal 16178 * bookkeeping. Either value will do the right thing when updating 16179 * planes atomically. If the cursor was part of the atomic update then 16180 * we would have taken the slowpath. 16181 */ 16182 crtc_state->active_planes = new_crtc_state->active_planes; 16183 16184 if (new_plane_state->uapi.visible) 16185 intel_update_plane(plane, crtc_state, new_plane_state); 16186 else 16187 intel_disable_plane(plane, crtc_state); 16188 16189 intel_plane_unpin_fb(old_plane_state); 16190 16191 out_free: 16192 if (new_crtc_state) 16193 intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi); 16194 if (ret) 16195 intel_plane_destroy_state(&plane->base, &new_plane_state->uapi); 16196 else 16197 intel_plane_destroy_state(&plane->base, &old_plane_state->uapi); 16198 return ret; 16199 16200 slow: 16201 return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb, 16202 crtc_x, crtc_y, crtc_w, crtc_h, 16203 src_x, src_y, src_w, src_h, ctx); 16204 } 16205 16206 static const struct drm_plane_funcs intel_cursor_plane_funcs = { 16207 .update_plane = intel_legacy_cursor_update, 16208 .disable_plane = drm_atomic_helper_disable_plane, 16209 .destroy = intel_plane_destroy, 16210 .atomic_duplicate_state = intel_plane_duplicate_state, 16211 .atomic_destroy_state = intel_plane_destroy_state, 16212 .format_mod_supported = intel_cursor_format_mod_supported, 16213 }; 16214 16215 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, 16216 enum i9xx_plane_id i9xx_plane) 16217 { 16218 if (!HAS_FBC(dev_priv)) 16219 return false; 16220 16221 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 16222 return i9xx_plane == PLANE_A; /* tied to pipe A */ 16223 else if (IS_IVYBRIDGE(dev_priv)) 16224 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B || 16225 i9xx_plane == PLANE_C; 16226 else if (INTEL_GEN(dev_priv) >= 4) 16227 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B; 16228 else 16229 return i9xx_plane == PLANE_A; 16230 } 16231 16232 static struct intel_plane * 16233 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) 16234 { 16235 struct intel_plane *plane; 16236 const struct drm_plane_funcs *plane_funcs; 16237 unsigned int supported_rotations; 16238 const u32 *formats; 16239 int num_formats; 16240 int ret, zpos; 16241 16242 if (INTEL_GEN(dev_priv) >= 9) 16243 return skl_universal_plane_create(dev_priv, pipe, 16244 PLANE_PRIMARY); 16245 16246 plane = intel_plane_alloc(); 16247 if (IS_ERR(plane)) 16248 return plane; 16249 16250 plane->pipe = pipe; 16251 /* 16252 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS 16253 * port is hooked to pipe B. Hence we want plane A feeding pipe B. 16254 */ 16255 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) 16256 plane->i9xx_plane = (enum i9xx_plane_id) !pipe; 16257 else 16258 plane->i9xx_plane = (enum i9xx_plane_id) pipe; 16259 plane->id = PLANE_PRIMARY; 16260 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); 16261 16262 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane); 16263 if (plane->has_fbc) { 16264 struct intel_fbc *fbc = &dev_priv->fbc; 16265 16266 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; 16267 } 16268 16269 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 16270 formats = vlv_primary_formats; 16271 num_formats = ARRAY_SIZE(vlv_primary_formats); 16272 } else if (INTEL_GEN(dev_priv) >= 4) { 16273 /* 16274 * WaFP16GammaEnabling:ivb 16275 * "Workaround : When using the 64-bit format, the plane 16276 * output on each color channel has one quarter amplitude. 16277 * It can be brought up to full amplitude by using pipe 16278 * gamma correction or pipe color space conversion to 16279 * multiply the plane output by four." 16280 * 16281 * There is no dedicated plane gamma for the primary plane, 16282 * and using the pipe gamma/csc could conflict with other 16283 * planes, so we choose not to expose fp16 on IVB primary 16284 * planes. HSW primary planes no longer have this problem. 16285 */ 16286 if (IS_IVYBRIDGE(dev_priv)) { 16287 formats = ivb_primary_formats; 16288 num_formats = ARRAY_SIZE(ivb_primary_formats); 16289 } else { 16290 formats = i965_primary_formats; 16291 num_formats = ARRAY_SIZE(i965_primary_formats); 16292 } 16293 } else { 16294 formats = i8xx_primary_formats; 16295 num_formats = ARRAY_SIZE(i8xx_primary_formats); 16296 } 16297 16298 if (INTEL_GEN(dev_priv) >= 4) 16299 plane_funcs = &i965_plane_funcs; 16300 else 16301 plane_funcs = &i8xx_plane_funcs; 16302 16303 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 16304 plane->min_cdclk = vlv_plane_min_cdclk; 16305 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 16306 plane->min_cdclk = hsw_plane_min_cdclk; 16307 else if (IS_IVYBRIDGE(dev_priv)) 16308 plane->min_cdclk = ivb_plane_min_cdclk; 16309 else 16310 plane->min_cdclk = i9xx_plane_min_cdclk; 16311 16312 plane->max_stride = i9xx_plane_max_stride; 16313 plane->update_plane = i9xx_update_plane; 16314 plane->disable_plane = i9xx_disable_plane; 16315 plane->get_hw_state = i9xx_plane_get_hw_state; 16316 plane->check_plane = i9xx_plane_check; 16317 16318 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 16319 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 16320 0, plane_funcs, 16321 formats, num_formats, 16322 i9xx_format_modifiers, 16323 DRM_PLANE_TYPE_PRIMARY, 16324 "primary %c", pipe_name(pipe)); 16325 else 16326 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 16327 0, plane_funcs, 16328 formats, num_formats, 16329 i9xx_format_modifiers, 16330 DRM_PLANE_TYPE_PRIMARY, 16331 "plane %c", 16332 plane_name(plane->i9xx_plane)); 16333 if (ret) 16334 goto fail; 16335 16336 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 16337 supported_rotations = 16338 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | 16339 DRM_MODE_REFLECT_X; 16340 } else if (INTEL_GEN(dev_priv) >= 4) { 16341 supported_rotations = 16342 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; 16343 } else { 16344 supported_rotations = DRM_MODE_ROTATE_0; 16345 } 16346 16347 if (INTEL_GEN(dev_priv) >= 4) 16348 drm_plane_create_rotation_property(&plane->base, 16349 DRM_MODE_ROTATE_0, 16350 supported_rotations); 16351 16352 zpos = 0; 16353 drm_plane_create_zpos_immutable_property(&plane->base, zpos); 16354 16355 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); 16356 16357 return plane; 16358 16359 fail: 16360 intel_plane_free(plane); 16361 16362 return ERR_PTR(ret); 16363 } 16364 16365 static struct intel_plane * 16366 intel_cursor_plane_create(struct drm_i915_private *dev_priv, 16367 enum pipe pipe) 16368 { 16369 struct intel_plane *cursor; 16370 int ret, zpos; 16371 16372 cursor = intel_plane_alloc(); 16373 if (IS_ERR(cursor)) 16374 return cursor; 16375 16376 cursor->pipe = pipe; 16377 cursor->i9xx_plane = (enum i9xx_plane_id) pipe; 16378 cursor->id = PLANE_CURSOR; 16379 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id); 16380 16381 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 16382 cursor->max_stride = i845_cursor_max_stride; 16383 cursor->update_plane = i845_update_cursor; 16384 cursor->disable_plane = i845_disable_cursor; 16385 cursor->get_hw_state = i845_cursor_get_hw_state; 16386 cursor->check_plane = i845_check_cursor; 16387 } else { 16388 cursor->max_stride = i9xx_cursor_max_stride; 16389 cursor->update_plane = i9xx_update_cursor; 16390 cursor->disable_plane = i9xx_disable_cursor; 16391 cursor->get_hw_state = i9xx_cursor_get_hw_state; 16392 cursor->check_plane = i9xx_check_cursor; 16393 } 16394 16395 cursor->cursor.base = ~0; 16396 cursor->cursor.cntl = ~0; 16397 16398 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) 16399 cursor->cursor.size = ~0; 16400 16401 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 16402 0, &intel_cursor_plane_funcs, 16403 intel_cursor_formats, 16404 ARRAY_SIZE(intel_cursor_formats), 16405 cursor_format_modifiers, 16406 DRM_PLANE_TYPE_CURSOR, 16407 "cursor %c", pipe_name(pipe)); 16408 if (ret) 16409 goto fail; 16410 16411 if (INTEL_GEN(dev_priv) >= 4) 16412 drm_plane_create_rotation_property(&cursor->base, 16413 DRM_MODE_ROTATE_0, 16414 DRM_MODE_ROTATE_0 | 16415 DRM_MODE_ROTATE_180); 16416 16417 zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1; 16418 drm_plane_create_zpos_immutable_property(&cursor->base, zpos); 16419 16420 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 16421 16422 return cursor; 16423 16424 fail: 16425 intel_plane_free(cursor); 16426 16427 return ERR_PTR(ret); 16428 } 16429 16430 #define INTEL_CRTC_FUNCS \ 16431 .gamma_set = drm_atomic_helper_legacy_gamma_set, \ 16432 .set_config = drm_atomic_helper_set_config, \ 16433 .destroy = intel_crtc_destroy, \ 16434 .page_flip = drm_atomic_helper_page_flip, \ 16435 .atomic_duplicate_state = intel_crtc_duplicate_state, \ 16436 .atomic_destroy_state = intel_crtc_destroy_state, \ 16437 .set_crc_source = intel_crtc_set_crc_source, \ 16438 .verify_crc_source = intel_crtc_verify_crc_source, \ 16439 .get_crc_sources = intel_crtc_get_crc_sources 16440 16441 static const struct drm_crtc_funcs bdw_crtc_funcs = { 16442 INTEL_CRTC_FUNCS, 16443 16444 .get_vblank_counter = g4x_get_vblank_counter, 16445 .enable_vblank = bdw_enable_vblank, 16446 .disable_vblank = bdw_disable_vblank, 16447 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16448 }; 16449 16450 static const struct drm_crtc_funcs ilk_crtc_funcs = { 16451 INTEL_CRTC_FUNCS, 16452 16453 .get_vblank_counter = g4x_get_vblank_counter, 16454 .enable_vblank = ilk_enable_vblank, 16455 .disable_vblank = ilk_disable_vblank, 16456 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16457 }; 16458 16459 static const struct drm_crtc_funcs g4x_crtc_funcs = { 16460 INTEL_CRTC_FUNCS, 16461 16462 .get_vblank_counter = g4x_get_vblank_counter, 16463 .enable_vblank = i965_enable_vblank, 16464 .disable_vblank = i965_disable_vblank, 16465 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16466 }; 16467 16468 static const struct drm_crtc_funcs i965_crtc_funcs = { 16469 INTEL_CRTC_FUNCS, 16470 16471 .get_vblank_counter = i915_get_vblank_counter, 16472 .enable_vblank = i965_enable_vblank, 16473 .disable_vblank = i965_disable_vblank, 16474 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16475 }; 16476 16477 static const struct drm_crtc_funcs i915gm_crtc_funcs = { 16478 INTEL_CRTC_FUNCS, 16479 16480 .get_vblank_counter = i915_get_vblank_counter, 16481 .enable_vblank = i915gm_enable_vblank, 16482 .disable_vblank = i915gm_disable_vblank, 16483 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16484 }; 16485 16486 static const struct drm_crtc_funcs i915_crtc_funcs = { 16487 INTEL_CRTC_FUNCS, 16488 16489 .get_vblank_counter = i915_get_vblank_counter, 16490 .enable_vblank = i8xx_enable_vblank, 16491 .disable_vblank = i8xx_disable_vblank, 16492 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16493 }; 16494 16495 static const struct drm_crtc_funcs i8xx_crtc_funcs = { 16496 INTEL_CRTC_FUNCS, 16497 16498 /* no hw vblank counter */ 16499 .enable_vblank = i8xx_enable_vblank, 16500 .disable_vblank = i8xx_disable_vblank, 16501 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, 16502 }; 16503 16504 static struct intel_crtc *intel_crtc_alloc(void) 16505 { 16506 struct intel_crtc_state *crtc_state; 16507 struct intel_crtc *crtc; 16508 16509 crtc = kzalloc(sizeof(*crtc), GFP_KERNEL); 16510 if (!crtc) 16511 return ERR_PTR(-ENOMEM); 16512 16513 crtc_state = intel_crtc_state_alloc(crtc); 16514 if (!crtc_state) { 16515 kfree(crtc); 16516 return ERR_PTR(-ENOMEM); 16517 } 16518 16519 crtc->base.state = &crtc_state->uapi; 16520 crtc->config = crtc_state; 16521 16522 return crtc; 16523 } 16524 16525 static void intel_crtc_free(struct intel_crtc *crtc) 16526 { 16527 intel_crtc_destroy_state(&crtc->base, crtc->base.state); 16528 kfree(crtc); 16529 } 16530 16531 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv) 16532 { 16533 struct intel_plane *plane; 16534 16535 for_each_intel_plane(&dev_priv->drm, plane) { 16536 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, 16537 plane->pipe); 16538 16539 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base); 16540 } 16541 } 16542 16543 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) 16544 { 16545 struct intel_plane *primary, *cursor; 16546 const struct drm_crtc_funcs *funcs; 16547 struct intel_crtc *crtc; 16548 int sprite, ret; 16549 16550 crtc = intel_crtc_alloc(); 16551 if (IS_ERR(crtc)) 16552 return PTR_ERR(crtc); 16553 16554 crtc->pipe = pipe; 16555 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe]; 16556 16557 primary = intel_primary_plane_create(dev_priv, pipe); 16558 if (IS_ERR(primary)) { 16559 ret = PTR_ERR(primary); 16560 goto fail; 16561 } 16562 crtc->plane_ids_mask |= BIT(primary->id); 16563 16564 for_each_sprite(dev_priv, pipe, sprite) { 16565 struct intel_plane *plane; 16566 16567 plane = intel_sprite_plane_create(dev_priv, pipe, sprite); 16568 if (IS_ERR(plane)) { 16569 ret = PTR_ERR(plane); 16570 goto fail; 16571 } 16572 crtc->plane_ids_mask |= BIT(plane->id); 16573 } 16574 16575 cursor = intel_cursor_plane_create(dev_priv, pipe); 16576 if (IS_ERR(cursor)) { 16577 ret = PTR_ERR(cursor); 16578 goto fail; 16579 } 16580 crtc->plane_ids_mask |= BIT(cursor->id); 16581 16582 if (HAS_GMCH(dev_priv)) { 16583 if (IS_CHERRYVIEW(dev_priv) || 16584 IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv)) 16585 funcs = &g4x_crtc_funcs; 16586 else if (IS_GEN(dev_priv, 4)) 16587 funcs = &i965_crtc_funcs; 16588 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv)) 16589 funcs = &i915gm_crtc_funcs; 16590 else if (IS_GEN(dev_priv, 3)) 16591 funcs = &i915_crtc_funcs; 16592 else 16593 funcs = &i8xx_crtc_funcs; 16594 } else { 16595 if (INTEL_GEN(dev_priv) >= 8) 16596 funcs = &bdw_crtc_funcs; 16597 else 16598 funcs = &ilk_crtc_funcs; 16599 } 16600 16601 ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base, 16602 &primary->base, &cursor->base, 16603 funcs, "pipe %c", pipe_name(pipe)); 16604 if (ret) 16605 goto fail; 16606 16607 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) || 16608 dev_priv->pipe_to_crtc_mapping[pipe] != NULL); 16609 dev_priv->pipe_to_crtc_mapping[pipe] = crtc; 16610 16611 if (INTEL_GEN(dev_priv) < 9) { 16612 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane; 16613 16614 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 16615 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL); 16616 dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc; 16617 } 16618 16619 intel_color_init(crtc); 16620 16621 intel_crtc_crc_init(crtc); 16622 16623 drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe); 16624 16625 return 0; 16626 16627 fail: 16628 intel_crtc_free(crtc); 16629 16630 return ret; 16631 } 16632 16633 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 16634 struct drm_file *file) 16635 { 16636 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 16637 struct drm_crtc *drmmode_crtc; 16638 struct intel_crtc *crtc; 16639 16640 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); 16641 if (!drmmode_crtc) 16642 return -ENOENT; 16643 16644 crtc = to_intel_crtc(drmmode_crtc); 16645 pipe_from_crtc_id->pipe = crtc->pipe; 16646 16647 return 0; 16648 } 16649 16650 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) 16651 { 16652 struct drm_device *dev = encoder->base.dev; 16653 struct intel_encoder *source_encoder; 16654 u32 possible_clones = 0; 16655 16656 for_each_intel_encoder(dev, source_encoder) { 16657 if (encoders_cloneable(encoder, source_encoder)) 16658 possible_clones |= drm_encoder_mask(&source_encoder->base); 16659 } 16660 16661 return possible_clones; 16662 } 16663 16664 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) 16665 { 16666 struct drm_device *dev = encoder->base.dev; 16667 struct intel_crtc *crtc; 16668 u32 possible_crtcs = 0; 16669 16670 for_each_intel_crtc(dev, crtc) { 16671 if (encoder->pipe_mask & BIT(crtc->pipe)) 16672 possible_crtcs |= drm_crtc_mask(&crtc->base); 16673 } 16674 16675 return possible_crtcs; 16676 } 16677 16678 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) 16679 { 16680 if (!IS_MOBILE(dev_priv)) 16681 return false; 16682 16683 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0) 16684 return false; 16685 16686 if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE)) 16687 return false; 16688 16689 return true; 16690 } 16691 16692 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) 16693 { 16694 if (INTEL_GEN(dev_priv) >= 9) 16695 return false; 16696 16697 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) 16698 return false; 16699 16700 if (HAS_PCH_LPT_H(dev_priv) && 16701 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 16702 return false; 16703 16704 /* DDI E can't be used if DDI A requires 4 lanes */ 16705 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 16706 return false; 16707 16708 if (!dev_priv->vbt.int_crt_support) 16709 return false; 16710 16711 return true; 16712 } 16713 16714 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) 16715 { 16716 int pps_num; 16717 int pps_idx; 16718 16719 if (HAS_DDI(dev_priv)) 16720 return; 16721 /* 16722 * This w/a is needed at least on CPT/PPT, but to be sure apply it 16723 * everywhere where registers can be write protected. 16724 */ 16725 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 16726 pps_num = 2; 16727 else 16728 pps_num = 1; 16729 16730 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { 16731 u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx)); 16732 16733 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS; 16734 intel_de_write(dev_priv, PP_CONTROL(pps_idx), val); 16735 } 16736 } 16737 16738 static void intel_pps_init(struct drm_i915_private *dev_priv) 16739 { 16740 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv)) 16741 dev_priv->pps_mmio_base = PCH_PPS_BASE; 16742 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 16743 dev_priv->pps_mmio_base = VLV_PPS_BASE; 16744 else 16745 dev_priv->pps_mmio_base = PPS_BASE; 16746 16747 intel_pps_unlock_regs_wa(dev_priv); 16748 } 16749 16750 static void intel_setup_outputs(struct drm_i915_private *dev_priv) 16751 { 16752 struct intel_encoder *encoder; 16753 bool dpd_is_edp = false; 16754 16755 intel_pps_init(dev_priv); 16756 16757 if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) 16758 return; 16759 16760 if (INTEL_GEN(dev_priv) >= 12) { 16761 intel_ddi_init(dev_priv, PORT_A); 16762 intel_ddi_init(dev_priv, PORT_B); 16763 intel_ddi_init(dev_priv, PORT_D); 16764 intel_ddi_init(dev_priv, PORT_E); 16765 intel_ddi_init(dev_priv, PORT_F); 16766 intel_ddi_init(dev_priv, PORT_G); 16767 intel_ddi_init(dev_priv, PORT_H); 16768 intel_ddi_init(dev_priv, PORT_I); 16769 icl_dsi_init(dev_priv); 16770 } else if (IS_ELKHARTLAKE(dev_priv)) { 16771 intel_ddi_init(dev_priv, PORT_A); 16772 intel_ddi_init(dev_priv, PORT_B); 16773 intel_ddi_init(dev_priv, PORT_C); 16774 intel_ddi_init(dev_priv, PORT_D); 16775 icl_dsi_init(dev_priv); 16776 } else if (IS_GEN(dev_priv, 11)) { 16777 intel_ddi_init(dev_priv, PORT_A); 16778 intel_ddi_init(dev_priv, PORT_B); 16779 intel_ddi_init(dev_priv, PORT_C); 16780 intel_ddi_init(dev_priv, PORT_D); 16781 intel_ddi_init(dev_priv, PORT_E); 16782 /* 16783 * On some ICL SKUs port F is not present. No strap bits for 16784 * this, so rely on VBT. 16785 * Work around broken VBTs on SKUs known to have no port F. 16786 */ 16787 if (IS_ICL_WITH_PORT_F(dev_priv) && 16788 intel_bios_is_port_present(dev_priv, PORT_F)) 16789 intel_ddi_init(dev_priv, PORT_F); 16790 16791 icl_dsi_init(dev_priv); 16792 } else if (IS_GEN9_LP(dev_priv)) { 16793 /* 16794 * FIXME: Broxton doesn't support port detection via the 16795 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 16796 * detect the ports. 16797 */ 16798 intel_ddi_init(dev_priv, PORT_A); 16799 intel_ddi_init(dev_priv, PORT_B); 16800 intel_ddi_init(dev_priv, PORT_C); 16801 16802 vlv_dsi_init(dev_priv); 16803 } else if (HAS_DDI(dev_priv)) { 16804 int found; 16805 16806 if (intel_ddi_crt_present(dev_priv)) 16807 intel_crt_init(dev_priv); 16808 16809 /* 16810 * Haswell uses DDI functions to detect digital outputs. 16811 * On SKL pre-D0 the strap isn't connected, so we assume 16812 * it's there. 16813 */ 16814 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 16815 /* WaIgnoreDDIAStrap: skl */ 16816 if (found || IS_GEN9_BC(dev_priv)) 16817 intel_ddi_init(dev_priv, PORT_A); 16818 16819 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP 16820 * register */ 16821 found = intel_de_read(dev_priv, SFUSE_STRAP); 16822 16823 if (found & SFUSE_STRAP_DDIB_DETECTED) 16824 intel_ddi_init(dev_priv, PORT_B); 16825 if (found & SFUSE_STRAP_DDIC_DETECTED) 16826 intel_ddi_init(dev_priv, PORT_C); 16827 if (found & SFUSE_STRAP_DDID_DETECTED) 16828 intel_ddi_init(dev_priv, PORT_D); 16829 if (found & SFUSE_STRAP_DDIF_DETECTED) 16830 intel_ddi_init(dev_priv, PORT_F); 16831 /* 16832 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 16833 */ 16834 if (IS_GEN9_BC(dev_priv) && 16835 intel_bios_is_port_present(dev_priv, PORT_E)) 16836 intel_ddi_init(dev_priv, PORT_E); 16837 16838 } else if (HAS_PCH_SPLIT(dev_priv)) { 16839 int found; 16840 16841 /* 16842 * intel_edp_init_connector() depends on this completing first, 16843 * to prevent the registration of both eDP and LVDS and the 16844 * incorrect sharing of the PPS. 16845 */ 16846 intel_lvds_init(dev_priv); 16847 intel_crt_init(dev_priv); 16848 16849 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); 16850 16851 if (ilk_has_edp_a(dev_priv)) 16852 intel_dp_init(dev_priv, DP_A, PORT_A); 16853 16854 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) { 16855 /* PCH SDVOB multiplex with HDMIB */ 16856 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 16857 if (!found) 16858 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 16859 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED)) 16860 intel_dp_init(dev_priv, PCH_DP_B, PORT_B); 16861 } 16862 16863 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED) 16864 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 16865 16866 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED) 16867 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 16868 16869 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED) 16870 intel_dp_init(dev_priv, PCH_DP_C, PORT_C); 16871 16872 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED) 16873 intel_dp_init(dev_priv, PCH_DP_D, PORT_D); 16874 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 16875 bool has_edp, has_port; 16876 16877 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support) 16878 intel_crt_init(dev_priv); 16879 16880 /* 16881 * The DP_DETECTED bit is the latched state of the DDC 16882 * SDA pin at boot. However since eDP doesn't require DDC 16883 * (no way to plug in a DP->HDMI dongle) the DDC pins for 16884 * eDP ports may have been muxed to an alternate function. 16885 * Thus we can't rely on the DP_DETECTED bit alone to detect 16886 * eDP ports. Consult the VBT as well as DP_DETECTED to 16887 * detect eDP ports. 16888 * 16889 * Sadly the straps seem to be missing sometimes even for HDMI 16890 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 16891 * and VBT for the presence of the port. Additionally we can't 16892 * trust the port type the VBT declares as we've seen at least 16893 * HDMI ports that the VBT claim are DP or eDP. 16894 */ 16895 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); 16896 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 16897 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port) 16898 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B); 16899 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 16900 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 16901 16902 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); 16903 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 16904 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port) 16905 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C); 16906 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 16907 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 16908 16909 if (IS_CHERRYVIEW(dev_priv)) { 16910 /* 16911 * eDP not supported on port D, 16912 * so no need to worry about it 16913 */ 16914 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 16915 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port) 16916 intel_dp_init(dev_priv, CHV_DP_D, PORT_D); 16917 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port) 16918 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 16919 } 16920 16921 vlv_dsi_init(dev_priv); 16922 } else if (IS_PINEVIEW(dev_priv)) { 16923 intel_lvds_init(dev_priv); 16924 intel_crt_init(dev_priv); 16925 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) { 16926 bool found = false; 16927 16928 if (IS_MOBILE(dev_priv)) 16929 intel_lvds_init(dev_priv); 16930 16931 intel_crt_init(dev_priv); 16932 16933 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 16934 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n"); 16935 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 16936 if (!found && IS_G4X(dev_priv)) { 16937 drm_dbg_kms(&dev_priv->drm, 16938 "probing HDMI on SDVOB\n"); 16939 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 16940 } 16941 16942 if (!found && IS_G4X(dev_priv)) 16943 intel_dp_init(dev_priv, DP_B, PORT_B); 16944 } 16945 16946 /* Before G4X SDVOC doesn't have its own detect register */ 16947 16948 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 16949 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n"); 16950 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 16951 } 16952 16953 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) { 16954 16955 if (IS_G4X(dev_priv)) { 16956 drm_dbg_kms(&dev_priv->drm, 16957 "probing HDMI on SDVOC\n"); 16958 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 16959 } 16960 if (IS_G4X(dev_priv)) 16961 intel_dp_init(dev_priv, DP_C, PORT_C); 16962 } 16963 16964 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED)) 16965 intel_dp_init(dev_priv, DP_D, PORT_D); 16966 16967 if (SUPPORTS_TV(dev_priv)) 16968 intel_tv_init(dev_priv); 16969 } else if (IS_GEN(dev_priv, 2)) { 16970 if (IS_I85X(dev_priv)) 16971 intel_lvds_init(dev_priv); 16972 16973 intel_crt_init(dev_priv); 16974 intel_dvo_init(dev_priv); 16975 } 16976 16977 intel_psr_init(dev_priv); 16978 16979 for_each_intel_encoder(&dev_priv->drm, encoder) { 16980 encoder->base.possible_crtcs = 16981 intel_encoder_possible_crtcs(encoder); 16982 encoder->base.possible_clones = 16983 intel_encoder_possible_clones(encoder); 16984 } 16985 16986 intel_init_pch_refclk(dev_priv); 16987 16988 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 16989 } 16990 16991 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 16992 { 16993 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 16994 16995 drm_framebuffer_cleanup(fb); 16996 intel_frontbuffer_put(intel_fb->frontbuffer); 16997 16998 kfree(intel_fb); 16999 } 17000 17001 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 17002 struct drm_file *file, 17003 unsigned int *handle) 17004 { 17005 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 17006 struct drm_i915_private *i915 = to_i915(obj->base.dev); 17007 17008 if (obj->userptr.mm) { 17009 drm_dbg(&i915->drm, 17010 "attempting to use a userptr for a framebuffer, denied\n"); 17011 return -EINVAL; 17012 } 17013 17014 return drm_gem_handle_create(file, &obj->base, handle); 17015 } 17016 17017 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, 17018 struct drm_file *file, 17019 unsigned flags, unsigned color, 17020 struct drm_clip_rect *clips, 17021 unsigned num_clips) 17022 { 17023 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 17024 17025 i915_gem_object_flush_if_display(obj); 17026 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); 17027 17028 return 0; 17029 } 17030 17031 static const struct drm_framebuffer_funcs intel_fb_funcs = { 17032 .destroy = intel_user_framebuffer_destroy, 17033 .create_handle = intel_user_framebuffer_create_handle, 17034 .dirty = intel_user_framebuffer_dirty, 17035 }; 17036 17037 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, 17038 struct drm_i915_gem_object *obj, 17039 struct drm_mode_fb_cmd2 *mode_cmd) 17040 { 17041 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 17042 struct drm_framebuffer *fb = &intel_fb->base; 17043 u32 max_stride; 17044 unsigned int tiling, stride; 17045 int ret = -EINVAL; 17046 int i; 17047 17048 intel_fb->frontbuffer = intel_frontbuffer_get(obj); 17049 if (!intel_fb->frontbuffer) 17050 return -ENOMEM; 17051 17052 i915_gem_object_lock(obj); 17053 tiling = i915_gem_object_get_tiling(obj); 17054 stride = i915_gem_object_get_stride(obj); 17055 i915_gem_object_unlock(obj); 17056 17057 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 17058 /* 17059 * If there's a fence, enforce that 17060 * the fb modifier and tiling mode match. 17061 */ 17062 if (tiling != I915_TILING_NONE && 17063 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 17064 drm_dbg_kms(&dev_priv->drm, 17065 "tiling_mode doesn't match fb modifier\n"); 17066 goto err; 17067 } 17068 } else { 17069 if (tiling == I915_TILING_X) { 17070 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 17071 } else if (tiling == I915_TILING_Y) { 17072 drm_dbg_kms(&dev_priv->drm, 17073 "No Y tiling for legacy addfb\n"); 17074 goto err; 17075 } 17076 } 17077 17078 if (!drm_any_plane_has_format(&dev_priv->drm, 17079 mode_cmd->pixel_format, 17080 mode_cmd->modifier[0])) { 17081 struct drm_format_name_buf format_name; 17082 17083 drm_dbg_kms(&dev_priv->drm, 17084 "unsupported pixel format %s / modifier 0x%llx\n", 17085 drm_get_format_name(mode_cmd->pixel_format, 17086 &format_name), 17087 mode_cmd->modifier[0]); 17088 goto err; 17089 } 17090 17091 /* 17092 * gen2/3 display engine uses the fence if present, 17093 * so the tiling mode must match the fb modifier exactly. 17094 */ 17095 if (INTEL_GEN(dev_priv) < 4 && 17096 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 17097 drm_dbg_kms(&dev_priv->drm, 17098 "tiling_mode must match fb modifier exactly on gen2/3\n"); 17099 goto err; 17100 } 17101 17102 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format, 17103 mode_cmd->modifier[0]); 17104 if (mode_cmd->pitches[0] > max_stride) { 17105 drm_dbg_kms(&dev_priv->drm, 17106 "%s pitch (%u) must be at most %d\n", 17107 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? 17108 "tiled" : "linear", 17109 mode_cmd->pitches[0], max_stride); 17110 goto err; 17111 } 17112 17113 /* 17114 * If there's a fence, enforce that 17115 * the fb pitch and fence stride match. 17116 */ 17117 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) { 17118 drm_dbg_kms(&dev_priv->drm, 17119 "pitch (%d) must match tiling stride (%d)\n", 17120 mode_cmd->pitches[0], stride); 17121 goto err; 17122 } 17123 17124 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 17125 if (mode_cmd->offsets[0] != 0) { 17126 drm_dbg_kms(&dev_priv->drm, 17127 "plane 0 offset (0x%08x) must be 0\n", 17128 mode_cmd->offsets[0]); 17129 goto err; 17130 } 17131 17132 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd); 17133 17134 for (i = 0; i < fb->format->num_planes; i++) { 17135 u32 stride_alignment; 17136 17137 if (mode_cmd->handles[i] != mode_cmd->handles[0]) { 17138 drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n", 17139 i); 17140 goto err; 17141 } 17142 17143 stride_alignment = intel_fb_stride_alignment(fb, i); 17144 if (fb->pitches[i] & (stride_alignment - 1)) { 17145 drm_dbg_kms(&dev_priv->drm, 17146 "plane %d pitch (%d) must be at least %u byte aligned\n", 17147 i, fb->pitches[i], stride_alignment); 17148 goto err; 17149 } 17150 17151 if (is_gen12_ccs_plane(fb, i)) { 17152 int ccs_aux_stride = gen12_ccs_aux_stride(fb, i); 17153 17154 if (fb->pitches[i] != ccs_aux_stride) { 17155 drm_dbg_kms(&dev_priv->drm, 17156 "ccs aux plane %d pitch (%d) must be %d\n", 17157 i, 17158 fb->pitches[i], ccs_aux_stride); 17159 goto err; 17160 } 17161 } 17162 17163 fb->obj[i] = &obj->base; 17164 } 17165 17166 ret = intel_fill_fb_info(dev_priv, fb); 17167 if (ret) 17168 goto err; 17169 17170 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs); 17171 if (ret) { 17172 drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret); 17173 goto err; 17174 } 17175 17176 return 0; 17177 17178 err: 17179 intel_frontbuffer_put(intel_fb->frontbuffer); 17180 return ret; 17181 } 17182 17183 static struct drm_framebuffer * 17184 intel_user_framebuffer_create(struct drm_device *dev, 17185 struct drm_file *filp, 17186 const struct drm_mode_fb_cmd2 *user_mode_cmd) 17187 { 17188 struct drm_framebuffer *fb; 17189 struct drm_i915_gem_object *obj; 17190 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 17191 17192 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); 17193 if (!obj) 17194 return ERR_PTR(-ENOENT); 17195 17196 fb = intel_framebuffer_create(obj, &mode_cmd); 17197 i915_gem_object_put(obj); 17198 17199 return fb; 17200 } 17201 17202 static enum drm_mode_status 17203 intel_mode_valid(struct drm_device *dev, 17204 const struct drm_display_mode *mode) 17205 { 17206 struct drm_i915_private *dev_priv = to_i915(dev); 17207 int hdisplay_max, htotal_max; 17208 int vdisplay_max, vtotal_max; 17209 17210 /* 17211 * Can't reject DBLSCAN here because Xorg ddxen can add piles 17212 * of DBLSCAN modes to the output's mode list when they detect 17213 * the scaling mode property on the connector. And they don't 17214 * ask the kernel to validate those modes in any way until 17215 * modeset time at which point the client gets a protocol error. 17216 * So in order to not upset those clients we silently ignore the 17217 * DBLSCAN flag on such connectors. For other connectors we will 17218 * reject modes with the DBLSCAN flag in encoder->compute_config(). 17219 * And we always reject DBLSCAN modes in connector->mode_valid() 17220 * as we never want such modes on the connector's mode list. 17221 */ 17222 17223 if (mode->vscan > 1) 17224 return MODE_NO_VSCAN; 17225 17226 if (mode->flags & DRM_MODE_FLAG_HSKEW) 17227 return MODE_H_ILLEGAL; 17228 17229 if (mode->flags & (DRM_MODE_FLAG_CSYNC | 17230 DRM_MODE_FLAG_NCSYNC | 17231 DRM_MODE_FLAG_PCSYNC)) 17232 return MODE_HSYNC; 17233 17234 if (mode->flags & (DRM_MODE_FLAG_BCAST | 17235 DRM_MODE_FLAG_PIXMUX | 17236 DRM_MODE_FLAG_CLKDIV2)) 17237 return MODE_BAD; 17238 17239 /* Transcoder timing limits */ 17240 if (INTEL_GEN(dev_priv) >= 11) { 17241 hdisplay_max = 16384; 17242 vdisplay_max = 8192; 17243 htotal_max = 16384; 17244 vtotal_max = 8192; 17245 } else if (INTEL_GEN(dev_priv) >= 9 || 17246 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 17247 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 17248 vdisplay_max = 4096; 17249 htotal_max = 8192; 17250 vtotal_max = 8192; 17251 } else if (INTEL_GEN(dev_priv) >= 3) { 17252 hdisplay_max = 4096; 17253 vdisplay_max = 4096; 17254 htotal_max = 8192; 17255 vtotal_max = 8192; 17256 } else { 17257 hdisplay_max = 2048; 17258 vdisplay_max = 2048; 17259 htotal_max = 4096; 17260 vtotal_max = 4096; 17261 } 17262 17263 if (mode->hdisplay > hdisplay_max || 17264 mode->hsync_start > htotal_max || 17265 mode->hsync_end > htotal_max || 17266 mode->htotal > htotal_max) 17267 return MODE_H_ILLEGAL; 17268 17269 if (mode->vdisplay > vdisplay_max || 17270 mode->vsync_start > vtotal_max || 17271 mode->vsync_end > vtotal_max || 17272 mode->vtotal > vtotal_max) 17273 return MODE_V_ILLEGAL; 17274 17275 if (INTEL_GEN(dev_priv) >= 5) { 17276 if (mode->hdisplay < 64 || 17277 mode->htotal - mode->hdisplay < 32) 17278 return MODE_H_ILLEGAL; 17279 17280 if (mode->vtotal - mode->vdisplay < 5) 17281 return MODE_V_ILLEGAL; 17282 } else { 17283 if (mode->htotal - mode->hdisplay < 32) 17284 return MODE_H_ILLEGAL; 17285 17286 if (mode->vtotal - mode->vdisplay < 3) 17287 return MODE_V_ILLEGAL; 17288 } 17289 17290 return MODE_OK; 17291 } 17292 17293 enum drm_mode_status 17294 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, 17295 const struct drm_display_mode *mode) 17296 { 17297 int plane_width_max, plane_height_max; 17298 17299 /* 17300 * intel_mode_valid() should be 17301 * sufficient on older platforms. 17302 */ 17303 if (INTEL_GEN(dev_priv) < 9) 17304 return MODE_OK; 17305 17306 /* 17307 * Most people will probably want a fullscreen 17308 * plane so let's not advertize modes that are 17309 * too big for that. 17310 */ 17311 if (INTEL_GEN(dev_priv) >= 11) { 17312 plane_width_max = 5120; 17313 plane_height_max = 4320; 17314 } else { 17315 plane_width_max = 5120; 17316 plane_height_max = 4096; 17317 } 17318 17319 if (mode->hdisplay > plane_width_max) 17320 return MODE_H_ILLEGAL; 17321 17322 if (mode->vdisplay > plane_height_max) 17323 return MODE_V_ILLEGAL; 17324 17325 return MODE_OK; 17326 } 17327 17328 static const struct drm_mode_config_funcs intel_mode_funcs = { 17329 .fb_create = intel_user_framebuffer_create, 17330 .get_format_info = intel_get_format_info, 17331 .output_poll_changed = intel_fbdev_output_poll_changed, 17332 .mode_valid = intel_mode_valid, 17333 .atomic_check = intel_atomic_check, 17334 .atomic_commit = intel_atomic_commit, 17335 .atomic_state_alloc = intel_atomic_state_alloc, 17336 .atomic_state_clear = intel_atomic_state_clear, 17337 .atomic_state_free = intel_atomic_state_free, 17338 }; 17339 17340 /** 17341 * intel_init_display_hooks - initialize the display modesetting hooks 17342 * @dev_priv: device private 17343 */ 17344 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 17345 { 17346 intel_init_cdclk_hooks(dev_priv); 17347 17348 if (INTEL_GEN(dev_priv) >= 9) { 17349 dev_priv->display.get_pipe_config = hsw_get_pipe_config; 17350 dev_priv->display.get_initial_plane_config = 17351 skl_get_initial_plane_config; 17352 dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock; 17353 dev_priv->display.crtc_enable = hsw_crtc_enable; 17354 dev_priv->display.crtc_disable = hsw_crtc_disable; 17355 } else if (HAS_DDI(dev_priv)) { 17356 dev_priv->display.get_pipe_config = hsw_get_pipe_config; 17357 dev_priv->display.get_initial_plane_config = 17358 i9xx_get_initial_plane_config; 17359 dev_priv->display.crtc_compute_clock = 17360 hsw_crtc_compute_clock; 17361 dev_priv->display.crtc_enable = hsw_crtc_enable; 17362 dev_priv->display.crtc_disable = hsw_crtc_disable; 17363 } else if (HAS_PCH_SPLIT(dev_priv)) { 17364 dev_priv->display.get_pipe_config = ilk_get_pipe_config; 17365 dev_priv->display.get_initial_plane_config = 17366 i9xx_get_initial_plane_config; 17367 dev_priv->display.crtc_compute_clock = 17368 ilk_crtc_compute_clock; 17369 dev_priv->display.crtc_enable = ilk_crtc_enable; 17370 dev_priv->display.crtc_disable = ilk_crtc_disable; 17371 } else if (IS_CHERRYVIEW(dev_priv)) { 17372 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17373 dev_priv->display.get_initial_plane_config = 17374 i9xx_get_initial_plane_config; 17375 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock; 17376 dev_priv->display.crtc_enable = valleyview_crtc_enable; 17377 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17378 } else if (IS_VALLEYVIEW(dev_priv)) { 17379 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17380 dev_priv->display.get_initial_plane_config = 17381 i9xx_get_initial_plane_config; 17382 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock; 17383 dev_priv->display.crtc_enable = valleyview_crtc_enable; 17384 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17385 } else if (IS_G4X(dev_priv)) { 17386 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17387 dev_priv->display.get_initial_plane_config = 17388 i9xx_get_initial_plane_config; 17389 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock; 17390 dev_priv->display.crtc_enable = i9xx_crtc_enable; 17391 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17392 } else if (IS_PINEVIEW(dev_priv)) { 17393 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17394 dev_priv->display.get_initial_plane_config = 17395 i9xx_get_initial_plane_config; 17396 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock; 17397 dev_priv->display.crtc_enable = i9xx_crtc_enable; 17398 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17399 } else if (!IS_GEN(dev_priv, 2)) { 17400 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17401 dev_priv->display.get_initial_plane_config = 17402 i9xx_get_initial_plane_config; 17403 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 17404 dev_priv->display.crtc_enable = i9xx_crtc_enable; 17405 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17406 } else { 17407 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 17408 dev_priv->display.get_initial_plane_config = 17409 i9xx_get_initial_plane_config; 17410 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock; 17411 dev_priv->display.crtc_enable = i9xx_crtc_enable; 17412 dev_priv->display.crtc_disable = i9xx_crtc_disable; 17413 } 17414 17415 if (IS_GEN(dev_priv, 5)) { 17416 dev_priv->display.fdi_link_train = ilk_fdi_link_train; 17417 } else if (IS_GEN(dev_priv, 6)) { 17418 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 17419 } else if (IS_IVYBRIDGE(dev_priv)) { 17420 /* FIXME: detect B0+ stepping and use auto training */ 17421 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 17422 } 17423 17424 if (INTEL_GEN(dev_priv) >= 9) 17425 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables; 17426 else 17427 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables; 17428 17429 } 17430 17431 void intel_modeset_init_hw(struct drm_i915_private *i915) 17432 { 17433 struct intel_cdclk_state *cdclk_state = 17434 to_intel_cdclk_state(i915->cdclk.obj.state); 17435 17436 intel_update_cdclk(i915); 17437 intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK"); 17438 cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw; 17439 } 17440 17441 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state) 17442 { 17443 struct drm_plane *plane; 17444 struct drm_crtc *crtc; 17445 17446 drm_for_each_crtc(crtc, state->dev) { 17447 struct drm_crtc_state *crtc_state; 17448 17449 crtc_state = drm_atomic_get_crtc_state(state, crtc); 17450 if (IS_ERR(crtc_state)) 17451 return PTR_ERR(crtc_state); 17452 } 17453 17454 drm_for_each_plane(plane, state->dev) { 17455 struct drm_plane_state *plane_state; 17456 17457 plane_state = drm_atomic_get_plane_state(state, plane); 17458 if (IS_ERR(plane_state)) 17459 return PTR_ERR(plane_state); 17460 } 17461 17462 return 0; 17463 } 17464 17465 /* 17466 * Calculate what we think the watermarks should be for the state we've read 17467 * out of the hardware and then immediately program those watermarks so that 17468 * we ensure the hardware settings match our internal state. 17469 * 17470 * We can calculate what we think WM's should be by creating a duplicate of the 17471 * current state (which was constructed during hardware readout) and running it 17472 * through the atomic check code to calculate new watermark values in the 17473 * state object. 17474 */ 17475 static void sanitize_watermarks(struct drm_i915_private *dev_priv) 17476 { 17477 struct drm_atomic_state *state; 17478 struct intel_atomic_state *intel_state; 17479 struct intel_crtc *crtc; 17480 struct intel_crtc_state *crtc_state; 17481 struct drm_modeset_acquire_ctx ctx; 17482 int ret; 17483 int i; 17484 17485 /* Only supported on platforms that use atomic watermark design */ 17486 if (!dev_priv->display.optimize_watermarks) 17487 return; 17488 17489 state = drm_atomic_state_alloc(&dev_priv->drm); 17490 if (drm_WARN_ON(&dev_priv->drm, !state)) 17491 return; 17492 17493 intel_state = to_intel_atomic_state(state); 17494 17495 drm_modeset_acquire_init(&ctx, 0); 17496 17497 retry: 17498 state->acquire_ctx = &ctx; 17499 17500 /* 17501 * Hardware readout is the only time we don't want to calculate 17502 * intermediate watermarks (since we don't trust the current 17503 * watermarks). 17504 */ 17505 if (!HAS_GMCH(dev_priv)) 17506 intel_state->skip_intermediate_wm = true; 17507 17508 ret = sanitize_watermarks_add_affected(state); 17509 if (ret) 17510 goto fail; 17511 17512 ret = intel_atomic_check(&dev_priv->drm, state); 17513 if (ret) 17514 goto fail; 17515 17516 /* Write calculated watermark values back */ 17517 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { 17518 crtc_state->wm.need_postvbl_update = true; 17519 dev_priv->display.optimize_watermarks(intel_state, crtc); 17520 17521 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; 17522 } 17523 17524 fail: 17525 if (ret == -EDEADLK) { 17526 drm_atomic_state_clear(state); 17527 drm_modeset_backoff(&ctx); 17528 goto retry; 17529 } 17530 17531 /* 17532 * If we fail here, it means that the hardware appears to be 17533 * programmed in a way that shouldn't be possible, given our 17534 * understanding of watermark requirements. This might mean a 17535 * mistake in the hardware readout code or a mistake in the 17536 * watermark calculations for a given platform. Raise a WARN 17537 * so that this is noticeable. 17538 * 17539 * If this actually happens, we'll have to just leave the 17540 * BIOS-programmed watermarks untouched and hope for the best. 17541 */ 17542 drm_WARN(&dev_priv->drm, ret, 17543 "Could not determine valid watermarks for inherited state\n"); 17544 17545 drm_atomic_state_put(state); 17546 17547 drm_modeset_drop_locks(&ctx); 17548 drm_modeset_acquire_fini(&ctx); 17549 } 17550 17551 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv) 17552 { 17553 if (IS_GEN(dev_priv, 5)) { 17554 u32 fdi_pll_clk = 17555 intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; 17556 17557 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000; 17558 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) { 17559 dev_priv->fdi_pll_freq = 270000; 17560 } else { 17561 return; 17562 } 17563 17564 drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq); 17565 } 17566 17567 static int intel_initial_commit(struct drm_device *dev) 17568 { 17569 struct drm_atomic_state *state = NULL; 17570 struct drm_modeset_acquire_ctx ctx; 17571 struct intel_crtc *crtc; 17572 int ret = 0; 17573 17574 state = drm_atomic_state_alloc(dev); 17575 if (!state) 17576 return -ENOMEM; 17577 17578 drm_modeset_acquire_init(&ctx, 0); 17579 17580 retry: 17581 state->acquire_ctx = &ctx; 17582 17583 for_each_intel_crtc(dev, crtc) { 17584 struct intel_crtc_state *crtc_state = 17585 intel_atomic_get_crtc_state(state, crtc); 17586 17587 if (IS_ERR(crtc_state)) { 17588 ret = PTR_ERR(crtc_state); 17589 goto out; 17590 } 17591 17592 if (crtc_state->hw.active) { 17593 ret = drm_atomic_add_affected_planes(state, &crtc->base); 17594 if (ret) 17595 goto out; 17596 17597 /* 17598 * FIXME hack to force a LUT update to avoid the 17599 * plane update forcing the pipe gamma on without 17600 * having a proper LUT loaded. Remove once we 17601 * have readout for pipe gamma enable. 17602 */ 17603 crtc_state->uapi.color_mgmt_changed = true; 17604 17605 /* 17606 * FIXME hack to force full modeset when DSC is being 17607 * used. 17608 * 17609 * As long as we do not have full state readout and 17610 * config comparison of crtc_state->dsc, we have no way 17611 * to ensure reliable fastset. Remove once we have 17612 * readout for DSC. 17613 */ 17614 if (crtc_state->dsc.compression_enable) { 17615 ret = drm_atomic_add_affected_connectors(state, 17616 &crtc->base); 17617 if (ret) 17618 goto out; 17619 crtc_state->uapi.mode_changed = true; 17620 drm_dbg_kms(dev, "Force full modeset for DSC\n"); 17621 } 17622 } 17623 } 17624 17625 ret = drm_atomic_commit(state); 17626 17627 out: 17628 if (ret == -EDEADLK) { 17629 drm_atomic_state_clear(state); 17630 drm_modeset_backoff(&ctx); 17631 goto retry; 17632 } 17633 17634 drm_atomic_state_put(state); 17635 17636 drm_modeset_drop_locks(&ctx); 17637 drm_modeset_acquire_fini(&ctx); 17638 17639 return ret; 17640 } 17641 17642 static void intel_mode_config_init(struct drm_i915_private *i915) 17643 { 17644 struct drm_mode_config *mode_config = &i915->drm.mode_config; 17645 17646 drm_mode_config_init(&i915->drm); 17647 INIT_LIST_HEAD(&i915->global_obj_list); 17648 17649 mode_config->min_width = 0; 17650 mode_config->min_height = 0; 17651 17652 mode_config->preferred_depth = 24; 17653 mode_config->prefer_shadow = 1; 17654 17655 mode_config->allow_fb_modifiers = true; 17656 17657 mode_config->funcs = &intel_mode_funcs; 17658 17659 /* 17660 * Maximum framebuffer dimensions, chosen to match 17661 * the maximum render engine surface size on gen4+. 17662 */ 17663 if (INTEL_GEN(i915) >= 7) { 17664 mode_config->max_width = 16384; 17665 mode_config->max_height = 16384; 17666 } else if (INTEL_GEN(i915) >= 4) { 17667 mode_config->max_width = 8192; 17668 mode_config->max_height = 8192; 17669 } else if (IS_GEN(i915, 3)) { 17670 mode_config->max_width = 4096; 17671 mode_config->max_height = 4096; 17672 } else { 17673 mode_config->max_width = 2048; 17674 mode_config->max_height = 2048; 17675 } 17676 17677 if (IS_I845G(i915) || IS_I865G(i915)) { 17678 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512; 17679 mode_config->cursor_height = 1023; 17680 } else if (IS_GEN(i915, 2)) { 17681 mode_config->cursor_width = 64; 17682 mode_config->cursor_height = 64; 17683 } else { 17684 mode_config->cursor_width = 256; 17685 mode_config->cursor_height = 256; 17686 } 17687 } 17688 17689 static void intel_mode_config_cleanup(struct drm_i915_private *i915) 17690 { 17691 intel_atomic_global_obj_cleanup(i915); 17692 drm_mode_config_cleanup(&i915->drm); 17693 } 17694 17695 static void plane_config_fini(struct intel_initial_plane_config *plane_config) 17696 { 17697 if (plane_config->fb) { 17698 struct drm_framebuffer *fb = &plane_config->fb->base; 17699 17700 /* We may only have the stub and not a full framebuffer */ 17701 if (drm_framebuffer_read_refcount(fb)) 17702 drm_framebuffer_put(fb); 17703 else 17704 kfree(fb); 17705 } 17706 17707 if (plane_config->vma) 17708 i915_vma_put(plane_config->vma); 17709 } 17710 17711 /* part #1: call before irq install */ 17712 int intel_modeset_init_noirq(struct drm_i915_private *i915) 17713 { 17714 int ret; 17715 17716 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); 17717 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI | 17718 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); 17719 17720 intel_mode_config_init(i915); 17721 17722 ret = intel_cdclk_init(i915); 17723 if (ret) 17724 return ret; 17725 17726 ret = intel_bw_init(i915); 17727 if (ret) 17728 return ret; 17729 17730 init_llist_head(&i915->atomic_helper.free_list); 17731 INIT_WORK(&i915->atomic_helper.free_work, 17732 intel_atomic_helper_free_state_worker); 17733 17734 intel_init_quirks(i915); 17735 17736 intel_fbc_init(i915); 17737 17738 return 0; 17739 } 17740 17741 /* part #2: call after irq install */ 17742 int intel_modeset_init(struct drm_i915_private *i915) 17743 { 17744 struct drm_device *dev = &i915->drm; 17745 enum pipe pipe; 17746 struct intel_crtc *crtc; 17747 int ret; 17748 17749 intel_init_pm(i915); 17750 17751 intel_panel_sanitize_ssc(i915); 17752 17753 intel_gmbus_setup(i915); 17754 17755 drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n", 17756 INTEL_NUM_PIPES(i915), 17757 INTEL_NUM_PIPES(i915) > 1 ? "s" : ""); 17758 17759 if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) { 17760 for_each_pipe(i915, pipe) { 17761 ret = intel_crtc_init(i915, pipe); 17762 if (ret) { 17763 intel_mode_config_cleanup(i915); 17764 return ret; 17765 } 17766 } 17767 } 17768 17769 intel_plane_possible_crtcs_init(i915); 17770 intel_shared_dpll_init(dev); 17771 intel_update_fdi_pll_freq(i915); 17772 17773 intel_update_czclk(i915); 17774 intel_modeset_init_hw(i915); 17775 17776 intel_hdcp_component_init(i915); 17777 17778 if (i915->max_cdclk_freq == 0) 17779 intel_update_max_cdclk(i915); 17780 17781 /* Just disable it once at startup */ 17782 intel_vga_disable(i915); 17783 intel_setup_outputs(i915); 17784 17785 drm_modeset_lock_all(dev); 17786 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); 17787 drm_modeset_unlock_all(dev); 17788 17789 for_each_intel_crtc(dev, crtc) { 17790 struct intel_initial_plane_config plane_config = {}; 17791 17792 if (!crtc->active) 17793 continue; 17794 17795 /* 17796 * Note that reserving the BIOS fb up front prevents us 17797 * from stuffing other stolen allocations like the ring 17798 * on top. This prevents some ugliness at boot time, and 17799 * can even allow for smooth boot transitions if the BIOS 17800 * fb is large enough for the active pipe configuration. 17801 */ 17802 i915->display.get_initial_plane_config(crtc, &plane_config); 17803 17804 /* 17805 * If the fb is shared between multiple heads, we'll 17806 * just get the first one. 17807 */ 17808 intel_find_initial_plane_obj(crtc, &plane_config); 17809 17810 plane_config_fini(&plane_config); 17811 } 17812 17813 /* 17814 * Make sure hardware watermarks really match the state we read out. 17815 * Note that we need to do this after reconstructing the BIOS fb's 17816 * since the watermark calculation done here will use pstate->fb. 17817 */ 17818 if (!HAS_GMCH(i915)) 17819 sanitize_watermarks(i915); 17820 17821 /* 17822 * Force all active planes to recompute their states. So that on 17823 * mode_setcrtc after probe, all the intel_plane_state variables 17824 * are already calculated and there is no assert_plane warnings 17825 * during bootup. 17826 */ 17827 ret = intel_initial_commit(dev); 17828 if (ret) 17829 drm_dbg_kms(&i915->drm, "Initial commit in probe failed.\n"); 17830 17831 return 0; 17832 } 17833 17834 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 17835 { 17836 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17837 /* 640x480@60Hz, ~25175 kHz */ 17838 struct dpll clock = { 17839 .m1 = 18, 17840 .m2 = 7, 17841 .p1 = 13, 17842 .p2 = 4, 17843 .n = 2, 17844 }; 17845 u32 dpll, fp; 17846 int i; 17847 17848 drm_WARN_ON(&dev_priv->drm, 17849 i9xx_calc_dpll_params(48000, &clock) != 25154); 17850 17851 drm_dbg_kms(&dev_priv->drm, 17852 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 17853 pipe_name(pipe), clock.vco, clock.dot); 17854 17855 fp = i9xx_dpll_compute_fp(&clock); 17856 dpll = DPLL_DVO_2X_MODE | 17857 DPLL_VGA_MODE_DIS | 17858 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 17859 PLL_P2_DIVIDE_BY_4 | 17860 PLL_REF_INPUT_DREFCLK | 17861 DPLL_VCO_ENABLE; 17862 17863 intel_de_write(dev_priv, FP0(pipe), fp); 17864 intel_de_write(dev_priv, FP1(pipe), fp); 17865 17866 intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); 17867 intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); 17868 intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); 17869 intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16)); 17870 intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16)); 17871 intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); 17872 intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); 17873 17874 /* 17875 * Apparently we need to have VGA mode enabled prior to changing 17876 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 17877 * dividers, even though the register value does change. 17878 */ 17879 intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); 17880 intel_de_write(dev_priv, DPLL(pipe), dpll); 17881 17882 /* Wait for the clocks to stabilize. */ 17883 intel_de_posting_read(dev_priv, DPLL(pipe)); 17884 udelay(150); 17885 17886 /* The pixel multiplier can only be updated once the 17887 * DPLL is enabled and the clocks are stable. 17888 * 17889 * So write it again. 17890 */ 17891 intel_de_write(dev_priv, DPLL(pipe), dpll); 17892 17893 /* We do this three times for luck */ 17894 for (i = 0; i < 3 ; i++) { 17895 intel_de_write(dev_priv, DPLL(pipe), dpll); 17896 intel_de_posting_read(dev_priv, DPLL(pipe)); 17897 udelay(150); /* wait for warmup */ 17898 } 17899 17900 intel_de_write(dev_priv, PIPECONF(pipe), 17901 PIPECONF_ENABLE | PIPECONF_PROGRESSIVE); 17902 intel_de_posting_read(dev_priv, PIPECONF(pipe)); 17903 17904 intel_wait_for_pipe_scanline_moving(crtc); 17905 } 17906 17907 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 17908 { 17909 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17910 17911 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n", 17912 pipe_name(pipe)); 17913 17914 drm_WARN_ON(&dev_priv->drm, 17915 intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & 17916 DISPLAY_PLANE_ENABLE); 17917 drm_WARN_ON(&dev_priv->drm, 17918 intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & 17919 DISPLAY_PLANE_ENABLE); 17920 drm_WARN_ON(&dev_priv->drm, 17921 intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & 17922 DISPLAY_PLANE_ENABLE); 17923 drm_WARN_ON(&dev_priv->drm, 17924 intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE); 17925 drm_WARN_ON(&dev_priv->drm, 17926 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE); 17927 17928 intel_de_write(dev_priv, PIPECONF(pipe), 0); 17929 intel_de_posting_read(dev_priv, PIPECONF(pipe)); 17930 17931 intel_wait_for_pipe_scanline_stopped(crtc); 17932 17933 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS); 17934 intel_de_posting_read(dev_priv, DPLL(pipe)); 17935 } 17936 17937 static void 17938 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) 17939 { 17940 struct intel_crtc *crtc; 17941 17942 if (INTEL_GEN(dev_priv) >= 4) 17943 return; 17944 17945 for_each_intel_crtc(&dev_priv->drm, crtc) { 17946 struct intel_plane *plane = 17947 to_intel_plane(crtc->base.primary); 17948 struct intel_crtc *plane_crtc; 17949 enum pipe pipe; 17950 17951 if (!plane->get_hw_state(plane, &pipe)) 17952 continue; 17953 17954 if (pipe == crtc->pipe) 17955 continue; 17956 17957 drm_dbg_kms(&dev_priv->drm, 17958 "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", 17959 plane->base.base.id, plane->base.name); 17960 17961 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17962 intel_plane_disable_noatomic(plane_crtc, plane); 17963 } 17964 } 17965 17966 static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 17967 { 17968 struct drm_device *dev = crtc->base.dev; 17969 struct intel_encoder *encoder; 17970 17971 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 17972 return true; 17973 17974 return false; 17975 } 17976 17977 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder) 17978 { 17979 struct drm_device *dev = encoder->base.dev; 17980 struct intel_connector *connector; 17981 17982 for_each_connector_on_encoder(dev, &encoder->base, connector) 17983 return connector; 17984 17985 return NULL; 17986 } 17987 17988 static bool has_pch_trancoder(struct drm_i915_private *dev_priv, 17989 enum pipe pch_transcoder) 17990 { 17991 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 17992 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A); 17993 } 17994 17995 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state) 17996 { 17997 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 17998 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 17999 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 18000 18001 if (INTEL_GEN(dev_priv) >= 9 || 18002 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 18003 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder); 18004 u32 val; 18005 18006 if (transcoder_is_dsi(cpu_transcoder)) 18007 return; 18008 18009 val = intel_de_read(dev_priv, reg); 18010 val &= ~HSW_FRAME_START_DELAY_MASK; 18011 val |= HSW_FRAME_START_DELAY(0); 18012 intel_de_write(dev_priv, reg, val); 18013 } else { 18014 i915_reg_t reg = PIPECONF(cpu_transcoder); 18015 u32 val; 18016 18017 val = intel_de_read(dev_priv, reg); 18018 val &= ~PIPECONF_FRAME_START_DELAY_MASK; 18019 val |= PIPECONF_FRAME_START_DELAY(0); 18020 intel_de_write(dev_priv, reg, val); 18021 } 18022 18023 if (!crtc_state->has_pch_encoder) 18024 return; 18025 18026 if (HAS_PCH_IBX(dev_priv)) { 18027 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe); 18028 u32 val; 18029 18030 val = intel_de_read(dev_priv, reg); 18031 val &= ~TRANS_FRAME_START_DELAY_MASK; 18032 val |= TRANS_FRAME_START_DELAY(0); 18033 intel_de_write(dev_priv, reg, val); 18034 } else { 18035 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc); 18036 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder); 18037 u32 val; 18038 18039 val = intel_de_read(dev_priv, reg); 18040 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 18041 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); 18042 intel_de_write(dev_priv, reg, val); 18043 } 18044 } 18045 18046 static void intel_sanitize_crtc(struct intel_crtc *crtc, 18047 struct drm_modeset_acquire_ctx *ctx) 18048 { 18049 struct drm_device *dev = crtc->base.dev; 18050 struct drm_i915_private *dev_priv = to_i915(dev); 18051 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); 18052 18053 if (crtc_state->hw.active) { 18054 struct intel_plane *plane; 18055 18056 /* Clear any frame start delays used for debugging left by the BIOS */ 18057 intel_sanitize_frame_start_delay(crtc_state); 18058 18059 /* Disable everything but the primary plane */ 18060 for_each_intel_plane_on_crtc(dev, crtc, plane) { 18061 const struct intel_plane_state *plane_state = 18062 to_intel_plane_state(plane->base.state); 18063 18064 if (plane_state->uapi.visible && 18065 plane->base.type != DRM_PLANE_TYPE_PRIMARY) 18066 intel_plane_disable_noatomic(crtc, plane); 18067 } 18068 18069 /* 18070 * Disable any background color set by the BIOS, but enable the 18071 * gamma and CSC to match how we program our planes. 18072 */ 18073 if (INTEL_GEN(dev_priv) >= 9) 18074 intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe), 18075 SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE); 18076 } 18077 18078 /* Adjust the state of the output pipe according to whether we 18079 * have active connectors/encoders. */ 18080 if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc)) 18081 intel_crtc_disable_noatomic(crtc, ctx); 18082 18083 if (crtc_state->hw.active || HAS_GMCH(dev_priv)) { 18084 /* 18085 * We start out with underrun reporting disabled to avoid races. 18086 * For correct bookkeeping mark this on active crtcs. 18087 * 18088 * Also on gmch platforms we dont have any hardware bits to 18089 * disable the underrun reporting. Which means we need to start 18090 * out with underrun reporting disabled also on inactive pipes, 18091 * since otherwise we'll complain about the garbage we read when 18092 * e.g. coming up after runtime pm. 18093 * 18094 * No protection against concurrent access is required - at 18095 * worst a fifo underrun happens which also sets this to false. 18096 */ 18097 crtc->cpu_fifo_underrun_disabled = true; 18098 /* 18099 * We track the PCH trancoder underrun reporting state 18100 * within the crtc. With crtc for pipe A housing the underrun 18101 * reporting state for PCH transcoder A, crtc for pipe B housing 18102 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, 18103 * and marking underrun reporting as disabled for the non-existing 18104 * PCH transcoders B and C would prevent enabling the south 18105 * error interrupt (see cpt_can_enable_serr_int()). 18106 */ 18107 if (has_pch_trancoder(dev_priv, crtc->pipe)) 18108 crtc->pch_fifo_underrun_disabled = true; 18109 } 18110 } 18111 18112 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) 18113 { 18114 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 18115 18116 /* 18117 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram 18118 * the hardware when a high res displays plugged in. DPLL P 18119 * divider is zero, and the pipe timings are bonkers. We'll 18120 * try to disable everything in that case. 18121 * 18122 * FIXME would be nice to be able to sanitize this state 18123 * without several WARNs, but for now let's take the easy 18124 * road. 18125 */ 18126 return IS_GEN(dev_priv, 6) && 18127 crtc_state->hw.active && 18128 crtc_state->shared_dpll && 18129 crtc_state->port_clock == 0; 18130 } 18131 18132 static void intel_sanitize_encoder(struct intel_encoder *encoder) 18133 { 18134 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 18135 struct intel_connector *connector; 18136 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 18137 struct intel_crtc_state *crtc_state = crtc ? 18138 to_intel_crtc_state(crtc->base.state) : NULL; 18139 18140 /* We need to check both for a crtc link (meaning that the 18141 * encoder is active and trying to read from a pipe) and the 18142 * pipe itself being active. */ 18143 bool has_active_crtc = crtc_state && 18144 crtc_state->hw.active; 18145 18146 if (crtc_state && has_bogus_dpll_config(crtc_state)) { 18147 drm_dbg_kms(&dev_priv->drm, 18148 "BIOS has misprogrammed the hardware. Disabling pipe %c\n", 18149 pipe_name(crtc->pipe)); 18150 has_active_crtc = false; 18151 } 18152 18153 connector = intel_encoder_find_connector(encoder); 18154 if (connector && !has_active_crtc) { 18155 drm_dbg_kms(&dev_priv->drm, 18156 "[ENCODER:%d:%s] has active connectors but no active pipe!\n", 18157 encoder->base.base.id, 18158 encoder->base.name); 18159 18160 /* Connector is active, but has no active pipe. This is 18161 * fallout from our resume register restoring. Disable 18162 * the encoder manually again. */ 18163 if (crtc_state) { 18164 struct drm_encoder *best_encoder; 18165 18166 drm_dbg_kms(&dev_priv->drm, 18167 "[ENCODER:%d:%s] manually disabled\n", 18168 encoder->base.base.id, 18169 encoder->base.name); 18170 18171 /* avoid oopsing in case the hooks consult best_encoder */ 18172 best_encoder = connector->base.state->best_encoder; 18173 connector->base.state->best_encoder = &encoder->base; 18174 18175 /* FIXME NULL atomic state passed! */ 18176 if (encoder->disable) 18177 encoder->disable(NULL, encoder, crtc_state, 18178 connector->base.state); 18179 if (encoder->post_disable) 18180 encoder->post_disable(NULL, encoder, crtc_state, 18181 connector->base.state); 18182 18183 connector->base.state->best_encoder = best_encoder; 18184 } 18185 encoder->base.crtc = NULL; 18186 18187 /* Inconsistent output/port/pipe state happens presumably due to 18188 * a bug in one of the get_hw_state functions. Or someplace else 18189 * in our code, like the register restore mess on resume. Clamp 18190 * things to off as a safer default. */ 18191 18192 connector->base.dpms = DRM_MODE_DPMS_OFF; 18193 connector->base.encoder = NULL; 18194 } 18195 18196 /* notify opregion of the sanitized encoder state */ 18197 intel_opregion_notify_encoder(encoder, connector && has_active_crtc); 18198 18199 if (INTEL_GEN(dev_priv) >= 11) 18200 icl_sanitize_encoder_pll_mapping(encoder); 18201 } 18202 18203 /* FIXME read out full plane state for all planes */ 18204 static void readout_plane_state(struct drm_i915_private *dev_priv) 18205 { 18206 struct intel_plane *plane; 18207 struct intel_crtc *crtc; 18208 18209 for_each_intel_plane(&dev_priv->drm, plane) { 18210 struct intel_plane_state *plane_state = 18211 to_intel_plane_state(plane->base.state); 18212 struct intel_crtc_state *crtc_state; 18213 enum pipe pipe = PIPE_A; 18214 bool visible; 18215 18216 visible = plane->get_hw_state(plane, &pipe); 18217 18218 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 18219 crtc_state = to_intel_crtc_state(crtc->base.state); 18220 18221 intel_set_plane_visible(crtc_state, plane_state, visible); 18222 18223 drm_dbg_kms(&dev_priv->drm, 18224 "[PLANE:%d:%s] hw state readout: %s, pipe %c\n", 18225 plane->base.base.id, plane->base.name, 18226 enableddisabled(visible), pipe_name(pipe)); 18227 } 18228 18229 for_each_intel_crtc(&dev_priv->drm, crtc) { 18230 struct intel_crtc_state *crtc_state = 18231 to_intel_crtc_state(crtc->base.state); 18232 18233 fixup_active_planes(crtc_state); 18234 } 18235 } 18236 18237 static void intel_modeset_readout_hw_state(struct drm_device *dev) 18238 { 18239 struct drm_i915_private *dev_priv = to_i915(dev); 18240 struct intel_cdclk_state *cdclk_state = 18241 to_intel_cdclk_state(dev_priv->cdclk.obj.state); 18242 enum pipe pipe; 18243 struct intel_crtc *crtc; 18244 struct intel_encoder *encoder; 18245 struct intel_connector *connector; 18246 struct drm_connector_list_iter conn_iter; 18247 u8 active_pipes = 0; 18248 18249 for_each_intel_crtc(dev, crtc) { 18250 struct intel_crtc_state *crtc_state = 18251 to_intel_crtc_state(crtc->base.state); 18252 18253 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi); 18254 intel_crtc_free_hw_state(crtc_state); 18255 intel_crtc_state_reset(crtc_state, crtc); 18256 18257 crtc_state->hw.active = crtc_state->hw.enable = 18258 dev_priv->display.get_pipe_config(crtc, crtc_state); 18259 18260 crtc->base.enabled = crtc_state->hw.enable; 18261 crtc->active = crtc_state->hw.active; 18262 18263 if (crtc_state->hw.active) 18264 active_pipes |= BIT(crtc->pipe); 18265 18266 drm_dbg_kms(&dev_priv->drm, 18267 "[CRTC:%d:%s] hw state readout: %s\n", 18268 crtc->base.base.id, crtc->base.name, 18269 enableddisabled(crtc_state->hw.active)); 18270 } 18271 18272 dev_priv->active_pipes = cdclk_state->active_pipes = active_pipes; 18273 18274 readout_plane_state(dev_priv); 18275 18276 intel_dpll_readout_hw_state(dev_priv); 18277 18278 for_each_intel_encoder(dev, encoder) { 18279 pipe = 0; 18280 18281 if (encoder->get_hw_state(encoder, &pipe)) { 18282 struct intel_crtc_state *crtc_state; 18283 18284 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 18285 crtc_state = to_intel_crtc_state(crtc->base.state); 18286 18287 encoder->base.crtc = &crtc->base; 18288 encoder->get_config(encoder, crtc_state); 18289 } else { 18290 encoder->base.crtc = NULL; 18291 } 18292 18293 drm_dbg_kms(&dev_priv->drm, 18294 "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 18295 encoder->base.base.id, encoder->base.name, 18296 enableddisabled(encoder->base.crtc), 18297 pipe_name(pipe)); 18298 } 18299 18300 drm_connector_list_iter_begin(dev, &conn_iter); 18301 for_each_intel_connector_iter(connector, &conn_iter) { 18302 if (connector->get_hw_state(connector)) { 18303 struct intel_crtc_state *crtc_state; 18304 struct intel_crtc *crtc; 18305 18306 connector->base.dpms = DRM_MODE_DPMS_ON; 18307 18308 encoder = intel_attached_encoder(connector); 18309 connector->base.encoder = &encoder->base; 18310 18311 crtc = to_intel_crtc(encoder->base.crtc); 18312 crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL; 18313 18314 if (crtc_state && crtc_state->hw.active) { 18315 /* 18316 * This has to be done during hardware readout 18317 * because anything calling .crtc_disable may 18318 * rely on the connector_mask being accurate. 18319 */ 18320 crtc_state->uapi.connector_mask |= 18321 drm_connector_mask(&connector->base); 18322 crtc_state->uapi.encoder_mask |= 18323 drm_encoder_mask(&encoder->base); 18324 } 18325 } else { 18326 connector->base.dpms = DRM_MODE_DPMS_OFF; 18327 connector->base.encoder = NULL; 18328 } 18329 drm_dbg_kms(&dev_priv->drm, 18330 "[CONNECTOR:%d:%s] hw state readout: %s\n", 18331 connector->base.base.id, connector->base.name, 18332 enableddisabled(connector->base.encoder)); 18333 } 18334 drm_connector_list_iter_end(&conn_iter); 18335 18336 for_each_intel_crtc(dev, crtc) { 18337 struct intel_bw_state *bw_state = 18338 to_intel_bw_state(dev_priv->bw_obj.state); 18339 struct intel_crtc_state *crtc_state = 18340 to_intel_crtc_state(crtc->base.state); 18341 struct intel_plane *plane; 18342 int min_cdclk = 0; 18343 18344 if (crtc_state->hw.active) { 18345 struct drm_display_mode *mode = &crtc_state->hw.mode; 18346 18347 intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode, 18348 crtc_state); 18349 18350 *mode = crtc_state->hw.adjusted_mode; 18351 mode->hdisplay = crtc_state->pipe_src_w; 18352 mode->vdisplay = crtc_state->pipe_src_h; 18353 18354 /* 18355 * The initial mode needs to be set in order to keep 18356 * the atomic core happy. It wants a valid mode if the 18357 * crtc's enabled, so we do the above call. 18358 * 18359 * But we don't set all the derived state fully, hence 18360 * set a flag to indicate that a full recalculation is 18361 * needed on the next commit. 18362 */ 18363 mode->private_flags = I915_MODE_FLAG_INHERITED; 18364 18365 intel_crtc_compute_pixel_rate(crtc_state); 18366 18367 intel_crtc_update_active_timings(crtc_state); 18368 18369 intel_crtc_copy_hw_to_uapi_state(crtc_state); 18370 } 18371 18372 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 18373 const struct intel_plane_state *plane_state = 18374 to_intel_plane_state(plane->base.state); 18375 18376 /* 18377 * FIXME don't have the fb yet, so can't 18378 * use intel_plane_data_rate() :( 18379 */ 18380 if (plane_state->uapi.visible) 18381 crtc_state->data_rate[plane->id] = 18382 4 * crtc_state->pixel_rate; 18383 /* 18384 * FIXME don't have the fb yet, so can't 18385 * use plane->min_cdclk() :( 18386 */ 18387 if (plane_state->uapi.visible && plane->min_cdclk) { 18388 if (crtc_state->double_wide || 18389 INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 18390 crtc_state->min_cdclk[plane->id] = 18391 DIV_ROUND_UP(crtc_state->pixel_rate, 2); 18392 else 18393 crtc_state->min_cdclk[plane->id] = 18394 crtc_state->pixel_rate; 18395 } 18396 drm_dbg_kms(&dev_priv->drm, 18397 "[PLANE:%d:%s] min_cdclk %d kHz\n", 18398 plane->base.base.id, plane->base.name, 18399 crtc_state->min_cdclk[plane->id]); 18400 } 18401 18402 if (crtc_state->hw.active) { 18403 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); 18404 if (drm_WARN_ON(dev, min_cdclk < 0)) 18405 min_cdclk = 0; 18406 } 18407 18408 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk; 18409 cdclk_state->min_voltage_level[crtc->pipe] = 18410 crtc_state->min_voltage_level; 18411 18412 intel_bw_crtc_update(bw_state, crtc_state); 18413 18414 intel_pipe_config_sanity_check(dev_priv, crtc_state); 18415 } 18416 } 18417 18418 static void 18419 get_encoder_power_domains(struct drm_i915_private *dev_priv) 18420 { 18421 struct intel_encoder *encoder; 18422 18423 for_each_intel_encoder(&dev_priv->drm, encoder) { 18424 struct intel_crtc_state *crtc_state; 18425 18426 if (!encoder->get_power_domains) 18427 continue; 18428 18429 /* 18430 * MST-primary and inactive encoders don't have a crtc state 18431 * and neither of these require any power domain references. 18432 */ 18433 if (!encoder->base.crtc) 18434 continue; 18435 18436 crtc_state = to_intel_crtc_state(encoder->base.crtc->state); 18437 encoder->get_power_domains(encoder, crtc_state); 18438 } 18439 } 18440 18441 static void intel_early_display_was(struct drm_i915_private *dev_priv) 18442 { 18443 /* 18444 * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl 18445 * Also known as Wa_14010480278. 18446 */ 18447 if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv)) 18448 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0, 18449 intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS); 18450 18451 if (IS_HASWELL(dev_priv)) { 18452 /* 18453 * WaRsPkgCStateDisplayPMReq:hsw 18454 * System hang if this isn't done before disabling all planes! 18455 */ 18456 intel_de_write(dev_priv, CHICKEN_PAR1_1, 18457 intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); 18458 } 18459 } 18460 18461 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv, 18462 enum port port, i915_reg_t hdmi_reg) 18463 { 18464 u32 val = intel_de_read(dev_priv, hdmi_reg); 18465 18466 if (val & SDVO_ENABLE || 18467 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A)) 18468 return; 18469 18470 drm_dbg_kms(&dev_priv->drm, 18471 "Sanitizing transcoder select for HDMI %c\n", 18472 port_name(port)); 18473 18474 val &= ~SDVO_PIPE_SEL_MASK; 18475 val |= SDVO_PIPE_SEL(PIPE_A); 18476 18477 intel_de_write(dev_priv, hdmi_reg, val); 18478 } 18479 18480 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv, 18481 enum port port, i915_reg_t dp_reg) 18482 { 18483 u32 val = intel_de_read(dev_priv, dp_reg); 18484 18485 if (val & DP_PORT_EN || 18486 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A)) 18487 return; 18488 18489 drm_dbg_kms(&dev_priv->drm, 18490 "Sanitizing transcoder select for DP %c\n", 18491 port_name(port)); 18492 18493 val &= ~DP_PIPE_SEL_MASK; 18494 val |= DP_PIPE_SEL(PIPE_A); 18495 18496 intel_de_write(dev_priv, dp_reg, val); 18497 } 18498 18499 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv) 18500 { 18501 /* 18502 * The BIOS may select transcoder B on some of the PCH 18503 * ports even it doesn't enable the port. This would trip 18504 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled(). 18505 * Sanitize the transcoder select bits to prevent that. We 18506 * assume that the BIOS never actually enabled the port, 18507 * because if it did we'd actually have to toggle the port 18508 * on and back off to make the transcoder A select stick 18509 * (see. intel_dp_link_down(), intel_disable_hdmi(), 18510 * intel_disable_sdvo()). 18511 */ 18512 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B); 18513 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C); 18514 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D); 18515 18516 /* PCH SDVOB multiplex with HDMIB */ 18517 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB); 18518 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC); 18519 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID); 18520 } 18521 18522 /* Scan out the current hw modeset state, 18523 * and sanitizes it to the current state 18524 */ 18525 static void 18526 intel_modeset_setup_hw_state(struct drm_device *dev, 18527 struct drm_modeset_acquire_ctx *ctx) 18528 { 18529 struct drm_i915_private *dev_priv = to_i915(dev); 18530 struct intel_encoder *encoder; 18531 struct intel_crtc *crtc; 18532 intel_wakeref_t wakeref; 18533 18534 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 18535 18536 intel_early_display_was(dev_priv); 18537 intel_modeset_readout_hw_state(dev); 18538 18539 /* HW state is read out, now we need to sanitize this mess. */ 18540 18541 /* Sanitize the TypeC port mode upfront, encoders depend on this */ 18542 for_each_intel_encoder(dev, encoder) { 18543 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 18544 18545 /* We need to sanitize only the MST primary port. */ 18546 if (encoder->type != INTEL_OUTPUT_DP_MST && 18547 intel_phy_is_tc(dev_priv, phy)) 18548 intel_tc_port_sanitize(enc_to_dig_port(encoder)); 18549 } 18550 18551 get_encoder_power_domains(dev_priv); 18552 18553 if (HAS_PCH_IBX(dev_priv)) 18554 ibx_sanitize_pch_ports(dev_priv); 18555 18556 /* 18557 * intel_sanitize_plane_mapping() may need to do vblank 18558 * waits, so we need vblank interrupts restored beforehand. 18559 */ 18560 for_each_intel_crtc(&dev_priv->drm, crtc) { 18561 struct intel_crtc_state *crtc_state = 18562 to_intel_crtc_state(crtc->base.state); 18563 18564 drm_crtc_vblank_reset(&crtc->base); 18565 18566 if (crtc_state->hw.active) 18567 intel_crtc_vblank_on(crtc_state); 18568 } 18569 18570 intel_sanitize_plane_mapping(dev_priv); 18571 18572 for_each_intel_encoder(dev, encoder) 18573 intel_sanitize_encoder(encoder); 18574 18575 for_each_intel_crtc(&dev_priv->drm, crtc) { 18576 struct intel_crtc_state *crtc_state = 18577 to_intel_crtc_state(crtc->base.state); 18578 18579 intel_sanitize_crtc(crtc, ctx); 18580 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]"); 18581 } 18582 18583 intel_modeset_update_connector_atomic_state(dev); 18584 18585 intel_dpll_sanitize_state(dev_priv); 18586 18587 if (IS_G4X(dev_priv)) { 18588 g4x_wm_get_hw_state(dev_priv); 18589 g4x_wm_sanitize(dev_priv); 18590 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 18591 vlv_wm_get_hw_state(dev_priv); 18592 vlv_wm_sanitize(dev_priv); 18593 } else if (INTEL_GEN(dev_priv) >= 9) { 18594 skl_wm_get_hw_state(dev_priv); 18595 } else if (HAS_PCH_SPLIT(dev_priv)) { 18596 ilk_wm_get_hw_state(dev_priv); 18597 } 18598 18599 for_each_intel_crtc(dev, crtc) { 18600 struct intel_crtc_state *crtc_state = 18601 to_intel_crtc_state(crtc->base.state); 18602 u64 put_domains; 18603 18604 put_domains = modeset_get_crtc_power_domains(crtc_state); 18605 if (drm_WARN_ON(dev, put_domains)) 18606 modeset_put_power_domains(dev_priv, put_domains); 18607 } 18608 18609 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); 18610 } 18611 18612 void intel_display_resume(struct drm_device *dev) 18613 { 18614 struct drm_i915_private *dev_priv = to_i915(dev); 18615 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 18616 struct drm_modeset_acquire_ctx ctx; 18617 int ret; 18618 18619 dev_priv->modeset_restore_state = NULL; 18620 if (state) 18621 state->acquire_ctx = &ctx; 18622 18623 drm_modeset_acquire_init(&ctx, 0); 18624 18625 while (1) { 18626 ret = drm_modeset_lock_all_ctx(dev, &ctx); 18627 if (ret != -EDEADLK) 18628 break; 18629 18630 drm_modeset_backoff(&ctx); 18631 } 18632 18633 if (!ret) 18634 ret = __intel_display_resume(dev, state, &ctx); 18635 18636 intel_enable_ipc(dev_priv); 18637 drm_modeset_drop_locks(&ctx); 18638 drm_modeset_acquire_fini(&ctx); 18639 18640 if (ret) 18641 drm_err(&dev_priv->drm, 18642 "Restoring old state failed with %i\n", ret); 18643 if (state) 18644 drm_atomic_state_put(state); 18645 } 18646 18647 static void intel_hpd_poll_fini(struct drm_i915_private *i915) 18648 { 18649 struct intel_connector *connector; 18650 struct drm_connector_list_iter conn_iter; 18651 18652 /* Kill all the work that may have been queued by hpd. */ 18653 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 18654 for_each_intel_connector_iter(connector, &conn_iter) { 18655 if (connector->modeset_retry_work.func) 18656 cancel_work_sync(&connector->modeset_retry_work); 18657 if (connector->hdcp.shim) { 18658 cancel_delayed_work_sync(&connector->hdcp.check_work); 18659 cancel_work_sync(&connector->hdcp.prop_work); 18660 } 18661 } 18662 drm_connector_list_iter_end(&conn_iter); 18663 } 18664 18665 /* part #1: call before irq uninstall */ 18666 void intel_modeset_driver_remove(struct drm_i915_private *i915) 18667 { 18668 flush_workqueue(i915->flip_wq); 18669 flush_workqueue(i915->modeset_wq); 18670 18671 flush_work(&i915->atomic_helper.free_work); 18672 drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list)); 18673 } 18674 18675 /* part #2: call after irq uninstall */ 18676 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) 18677 { 18678 /* 18679 * Due to the hpd irq storm handling the hotplug work can re-arm the 18680 * poll handlers. Hence disable polling after hpd handling is shut down. 18681 */ 18682 intel_hpd_poll_fini(i915); 18683 18684 /* 18685 * MST topology needs to be suspended so we don't have any calls to 18686 * fbdev after it's finalized. MST will be destroyed later as part of 18687 * drm_mode_config_cleanup() 18688 */ 18689 intel_dp_mst_suspend(i915); 18690 18691 /* poll work can call into fbdev, hence clean that up afterwards */ 18692 intel_fbdev_fini(i915); 18693 18694 intel_unregister_dsm_handler(); 18695 18696 intel_fbc_global_disable(i915); 18697 18698 /* flush any delayed tasks or pending work */ 18699 flush_scheduled_work(); 18700 18701 intel_hdcp_component_fini(i915); 18702 18703 intel_mode_config_cleanup(i915); 18704 18705 intel_overlay_cleanup(i915); 18706 18707 intel_gmbus_teardown(i915); 18708 18709 destroy_workqueue(i915->flip_wq); 18710 destroy_workqueue(i915->modeset_wq); 18711 18712 intel_fbc_cleanup_cfb(i915); 18713 } 18714 18715 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 18716 18717 struct intel_display_error_state { 18718 18719 u32 power_well_driver; 18720 18721 struct intel_cursor_error_state { 18722 u32 control; 18723 u32 position; 18724 u32 base; 18725 u32 size; 18726 } cursor[I915_MAX_PIPES]; 18727 18728 struct intel_pipe_error_state { 18729 bool power_domain_on; 18730 u32 source; 18731 u32 stat; 18732 } pipe[I915_MAX_PIPES]; 18733 18734 struct intel_plane_error_state { 18735 u32 control; 18736 u32 stride; 18737 u32 size; 18738 u32 pos; 18739 u32 addr; 18740 u32 surface; 18741 u32 tile_offset; 18742 } plane[I915_MAX_PIPES]; 18743 18744 struct intel_transcoder_error_state { 18745 bool available; 18746 bool power_domain_on; 18747 enum transcoder cpu_transcoder; 18748 18749 u32 conf; 18750 18751 u32 htotal; 18752 u32 hblank; 18753 u32 hsync; 18754 u32 vtotal; 18755 u32 vblank; 18756 u32 vsync; 18757 } transcoder[5]; 18758 }; 18759 18760 struct intel_display_error_state * 18761 intel_display_capture_error_state(struct drm_i915_private *dev_priv) 18762 { 18763 struct intel_display_error_state *error; 18764 int transcoders[] = { 18765 TRANSCODER_A, 18766 TRANSCODER_B, 18767 TRANSCODER_C, 18768 TRANSCODER_D, 18769 TRANSCODER_EDP, 18770 }; 18771 int i; 18772 18773 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder)); 18774 18775 if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) 18776 return NULL; 18777 18778 error = kzalloc(sizeof(*error), GFP_ATOMIC); 18779 if (error == NULL) 18780 return NULL; 18781 18782 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 18783 error->power_well_driver = intel_de_read(dev_priv, 18784 HSW_PWR_WELL_CTL2); 18785 18786 for_each_pipe(dev_priv, i) { 18787 error->pipe[i].power_domain_on = 18788 __intel_display_power_is_enabled(dev_priv, 18789 POWER_DOMAIN_PIPE(i)); 18790 if (!error->pipe[i].power_domain_on) 18791 continue; 18792 18793 error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i)); 18794 error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i)); 18795 error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i)); 18796 18797 error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i)); 18798 error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i)); 18799 if (INTEL_GEN(dev_priv) <= 3) { 18800 error->plane[i].size = intel_de_read(dev_priv, 18801 DSPSIZE(i)); 18802 error->plane[i].pos = intel_de_read(dev_priv, 18803 DSPPOS(i)); 18804 } 18805 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 18806 error->plane[i].addr = intel_de_read(dev_priv, 18807 DSPADDR(i)); 18808 if (INTEL_GEN(dev_priv) >= 4) { 18809 error->plane[i].surface = intel_de_read(dev_priv, 18810 DSPSURF(i)); 18811 error->plane[i].tile_offset = intel_de_read(dev_priv, 18812 DSPTILEOFF(i)); 18813 } 18814 18815 error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i)); 18816 18817 if (HAS_GMCH(dev_priv)) 18818 error->pipe[i].stat = intel_de_read(dev_priv, 18819 PIPESTAT(i)); 18820 } 18821 18822 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { 18823 enum transcoder cpu_transcoder = transcoders[i]; 18824 18825 if (!HAS_TRANSCODER(dev_priv, cpu_transcoder)) 18826 continue; 18827 18828 error->transcoder[i].available = true; 18829 error->transcoder[i].power_domain_on = 18830 __intel_display_power_is_enabled(dev_priv, 18831 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 18832 if (!error->transcoder[i].power_domain_on) 18833 continue; 18834 18835 error->transcoder[i].cpu_transcoder = cpu_transcoder; 18836 18837 error->transcoder[i].conf = intel_de_read(dev_priv, 18838 PIPECONF(cpu_transcoder)); 18839 error->transcoder[i].htotal = intel_de_read(dev_priv, 18840 HTOTAL(cpu_transcoder)); 18841 error->transcoder[i].hblank = intel_de_read(dev_priv, 18842 HBLANK(cpu_transcoder)); 18843 error->transcoder[i].hsync = intel_de_read(dev_priv, 18844 HSYNC(cpu_transcoder)); 18845 error->transcoder[i].vtotal = intel_de_read(dev_priv, 18846 VTOTAL(cpu_transcoder)); 18847 error->transcoder[i].vblank = intel_de_read(dev_priv, 18848 VBLANK(cpu_transcoder)); 18849 error->transcoder[i].vsync = intel_de_read(dev_priv, 18850 VSYNC(cpu_transcoder)); 18851 } 18852 18853 return error; 18854 } 18855 18856 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 18857 18858 void 18859 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 18860 struct intel_display_error_state *error) 18861 { 18862 struct drm_i915_private *dev_priv = m->i915; 18863 int i; 18864 18865 if (!error) 18866 return; 18867 18868 err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv)); 18869 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 18870 err_printf(m, "PWR_WELL_CTL2: %08x\n", 18871 error->power_well_driver); 18872 for_each_pipe(dev_priv, i) { 18873 err_printf(m, "Pipe [%d]:\n", i); 18874 err_printf(m, " Power: %s\n", 18875 onoff(error->pipe[i].power_domain_on)); 18876 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 18877 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 18878 18879 err_printf(m, "Plane [%d]:\n", i); 18880 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 18881 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 18882 if (INTEL_GEN(dev_priv) <= 3) { 18883 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 18884 err_printf(m, " POS: %08x\n", error->plane[i].pos); 18885 } 18886 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 18887 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 18888 if (INTEL_GEN(dev_priv) >= 4) { 18889 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 18890 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 18891 } 18892 18893 err_printf(m, "Cursor [%d]:\n", i); 18894 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 18895 err_printf(m, " POS: %08x\n", error->cursor[i].position); 18896 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 18897 } 18898 18899 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { 18900 if (!error->transcoder[i].available) 18901 continue; 18902 18903 err_printf(m, "CPU transcoder: %s\n", 18904 transcoder_name(error->transcoder[i].cpu_transcoder)); 18905 err_printf(m, " Power: %s\n", 18906 onoff(error->transcoder[i].power_domain_on)); 18907 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 18908 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 18909 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 18910 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 18911 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 18912 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 18913 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 18914 } 18915 } 18916 18917 #endif 18918