1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 25 #include "intel_color.h" 26 #include "intel_de.h" 27 #include "intel_display_types.h" 28 29 #define CTM_COEFF_SIGN (1ULL << 63) 30 31 #define CTM_COEFF_1_0 (1ULL << 32) 32 #define CTM_COEFF_2_0 (CTM_COEFF_1_0 << 1) 33 #define CTM_COEFF_4_0 (CTM_COEFF_2_0 << 1) 34 #define CTM_COEFF_8_0 (CTM_COEFF_4_0 << 1) 35 #define CTM_COEFF_0_5 (CTM_COEFF_1_0 >> 1) 36 #define CTM_COEFF_0_25 (CTM_COEFF_0_5 >> 1) 37 #define CTM_COEFF_0_125 (CTM_COEFF_0_25 >> 1) 38 39 #define CTM_COEFF_LIMITED_RANGE ((235ULL - 16ULL) * CTM_COEFF_1_0 / 255) 40 41 #define CTM_COEFF_NEGATIVE(coeff) (((coeff) & CTM_COEFF_SIGN) != 0) 42 #define CTM_COEFF_ABS(coeff) ((coeff) & (CTM_COEFF_SIGN - 1)) 43 44 #define LEGACY_LUT_LENGTH 256 45 46 /* 47 * ILK+ csc matrix: 48 * 49 * |R/Cr| | c0 c1 c2 | ( |R/Cr| |preoff0| ) |postoff0| 50 * |G/Y | = | c3 c4 c5 | x ( |G/Y | + |preoff1| ) + |postoff1| 51 * |B/Cb| | c6 c7 c8 | ( |B/Cb| |preoff2| ) |postoff2| 52 * 53 * ILK/SNB don't have explicit post offsets, and instead 54 * CSC_MODE_YUV_TO_RGB and CSC_BLACK_SCREEN_OFFSET are used: 55 * CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=0 -> 1/2, 0, 1/2 56 * CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/2, 1/16, 1/2 57 * CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=0 -> 0, 0, 0 58 * CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/16, 1/16, 1/16 59 */ 60 61 /* 62 * Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point 63 * format). This macro takes the coefficient we want transformed and the 64 * number of fractional bits. 65 * 66 * We only have a 9 bits precision window which slides depending on the value 67 * of the CTM coefficient and we write the value from bit 3. We also round the 68 * value. 69 */ 70 #define ILK_CSC_COEFF_FP(coeff, fbits) \ 71 (clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8) 72 73 #define ILK_CSC_COEFF_LIMITED_RANGE 0x0dc0 74 #define ILK_CSC_COEFF_1_0 0x7800 75 76 #define ILK_CSC_POSTOFF_LIMITED_RANGE (16 * (1 << 12) / 255) 77 78 /* Nop pre/post offsets */ 79 static const u16 ilk_csc_off_zero[3] = {}; 80 81 /* Identity matrix */ 82 static const u16 ilk_csc_coeff_identity[9] = { 83 ILK_CSC_COEFF_1_0, 0, 0, 84 0, ILK_CSC_COEFF_1_0, 0, 85 0, 0, ILK_CSC_COEFF_1_0, 86 }; 87 88 /* Limited range RGB post offsets */ 89 static const u16 ilk_csc_postoff_limited_range[3] = { 90 ILK_CSC_POSTOFF_LIMITED_RANGE, 91 ILK_CSC_POSTOFF_LIMITED_RANGE, 92 ILK_CSC_POSTOFF_LIMITED_RANGE, 93 }; 94 95 /* Full range RGB -> limited range RGB matrix */ 96 static const u16 ilk_csc_coeff_limited_range[9] = { 97 ILK_CSC_COEFF_LIMITED_RANGE, 0, 0, 98 0, ILK_CSC_COEFF_LIMITED_RANGE, 0, 99 0, 0, ILK_CSC_COEFF_LIMITED_RANGE, 100 }; 101 102 /* BT.709 full range RGB -> limited range YCbCr matrix */ 103 static const u16 ilk_csc_coeff_rgb_to_ycbcr[9] = { 104 0x1e08, 0x9cc0, 0xb528, 105 0x2ba8, 0x09d8, 0x37e8, 106 0xbce8, 0x9ad8, 0x1e08, 107 }; 108 109 /* Limited range YCbCr post offsets */ 110 static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = { 111 0x0800, 0x0100, 0x0800, 112 }; 113 114 static bool lut_is_legacy(const struct drm_property_blob *lut) 115 { 116 return drm_color_lut_size(lut) == LEGACY_LUT_LENGTH; 117 } 118 119 static bool crtc_state_is_legacy_gamma(const struct intel_crtc_state *crtc_state) 120 { 121 return !crtc_state->hw.degamma_lut && 122 !crtc_state->hw.ctm && 123 crtc_state->hw.gamma_lut && 124 lut_is_legacy(crtc_state->hw.gamma_lut); 125 } 126 127 /* 128 * When using limited range, multiply the matrix given by userspace by 129 * the matrix that we would use for the limited range. 130 */ 131 static u64 *ctm_mult_by_limited(u64 *result, const u64 *input) 132 { 133 int i; 134 135 for (i = 0; i < 9; i++) { 136 u64 user_coeff = input[i]; 137 u32 limited_coeff = CTM_COEFF_LIMITED_RANGE; 138 u32 abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff), 0, 139 CTM_COEFF_4_0 - 1) >> 2; 140 141 /* 142 * By scaling every co-efficient with limited range (16-235) 143 * vs full range (0-255) the final o/p will be scaled down to 144 * fit in the limited range supported by the panel. 145 */ 146 result[i] = mul_u32_u32(limited_coeff, abs_coeff) >> 30; 147 result[i] |= user_coeff & CTM_COEFF_SIGN; 148 } 149 150 return result; 151 } 152 153 static void ilk_update_pipe_csc(struct intel_crtc *crtc, 154 const u16 preoff[3], 155 const u16 coeff[9], 156 const u16 postoff[3]) 157 { 158 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 159 enum pipe pipe = crtc->pipe; 160 161 intel_de_write(dev_priv, PIPE_CSC_PREOFF_HI(pipe), preoff[0]); 162 intel_de_write(dev_priv, PIPE_CSC_PREOFF_ME(pipe), preoff[1]); 163 intel_de_write(dev_priv, PIPE_CSC_PREOFF_LO(pipe), preoff[2]); 164 165 intel_de_write(dev_priv, PIPE_CSC_COEFF_RY_GY(pipe), 166 coeff[0] << 16 | coeff[1]); 167 intel_de_write(dev_priv, PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16); 168 169 intel_de_write(dev_priv, PIPE_CSC_COEFF_RU_GU(pipe), 170 coeff[3] << 16 | coeff[4]); 171 intel_de_write(dev_priv, PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16); 172 173 intel_de_write(dev_priv, PIPE_CSC_COEFF_RV_GV(pipe), 174 coeff[6] << 16 | coeff[7]); 175 intel_de_write(dev_priv, PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16); 176 177 if (DISPLAY_VER(dev_priv) >= 7) { 178 intel_de_write(dev_priv, PIPE_CSC_POSTOFF_HI(pipe), 179 postoff[0]); 180 intel_de_write(dev_priv, PIPE_CSC_POSTOFF_ME(pipe), 181 postoff[1]); 182 intel_de_write(dev_priv, PIPE_CSC_POSTOFF_LO(pipe), 183 postoff[2]); 184 } 185 } 186 187 static void icl_update_output_csc(struct intel_crtc *crtc, 188 const u16 preoff[3], 189 const u16 coeff[9], 190 const u16 postoff[3]) 191 { 192 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 193 enum pipe pipe = crtc->pipe; 194 195 intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]); 196 intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]); 197 intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]); 198 199 intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), 200 coeff[0] << 16 | coeff[1]); 201 intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BY(pipe), 202 coeff[2] << 16); 203 204 intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), 205 coeff[3] << 16 | coeff[4]); 206 intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BU(pipe), 207 coeff[5] << 16); 208 209 intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), 210 coeff[6] << 16 | coeff[7]); 211 intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BV(pipe), 212 coeff[8] << 16); 213 214 intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]); 215 intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]); 216 intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]); 217 } 218 219 static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state) 220 { 221 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 222 223 /* 224 * FIXME if there's a gamma LUT after the CSC, we should 225 * do the range compression using the gamma LUT instead. 226 */ 227 return crtc_state->limited_color_range && 228 (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) || 229 IS_DISPLAY_VER(dev_priv, 9, 10)); 230 } 231 232 static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state, 233 u16 coeffs[9]) 234 { 235 const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data; 236 const u64 *input; 237 u64 temp[9]; 238 int i; 239 240 if (ilk_csc_limited_range(crtc_state)) 241 input = ctm_mult_by_limited(temp, ctm->matrix); 242 else 243 input = ctm->matrix; 244 245 /* 246 * Convert fixed point S31.32 input to format supported by the 247 * hardware. 248 */ 249 for (i = 0; i < 9; i++) { 250 u64 abs_coeff = ((1ULL << 63) - 1) & input[i]; 251 252 /* 253 * Clamp input value to min/max supported by 254 * hardware. 255 */ 256 abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1); 257 258 coeffs[i] = 0; 259 260 /* sign bit */ 261 if (CTM_COEFF_NEGATIVE(input[i])) 262 coeffs[i] |= 1 << 15; 263 264 if (abs_coeff < CTM_COEFF_0_125) 265 coeffs[i] |= (3 << 12) | 266 ILK_CSC_COEFF_FP(abs_coeff, 12); 267 else if (abs_coeff < CTM_COEFF_0_25) 268 coeffs[i] |= (2 << 12) | 269 ILK_CSC_COEFF_FP(abs_coeff, 11); 270 else if (abs_coeff < CTM_COEFF_0_5) 271 coeffs[i] |= (1 << 12) | 272 ILK_CSC_COEFF_FP(abs_coeff, 10); 273 else if (abs_coeff < CTM_COEFF_1_0) 274 coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9); 275 else if (abs_coeff < CTM_COEFF_2_0) 276 coeffs[i] |= (7 << 12) | 277 ILK_CSC_COEFF_FP(abs_coeff, 8); 278 else 279 coeffs[i] |= (6 << 12) | 280 ILK_CSC_COEFF_FP(abs_coeff, 7); 281 } 282 } 283 284 static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state) 285 { 286 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 287 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 288 bool limited_color_range = ilk_csc_limited_range(crtc_state); 289 290 if (crtc_state->hw.ctm) { 291 u16 coeff[9]; 292 293 ilk_csc_convert_ctm(crtc_state, coeff); 294 ilk_update_pipe_csc(crtc, ilk_csc_off_zero, coeff, 295 limited_color_range ? 296 ilk_csc_postoff_limited_range : 297 ilk_csc_off_zero); 298 } else if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) { 299 ilk_update_pipe_csc(crtc, ilk_csc_off_zero, 300 ilk_csc_coeff_rgb_to_ycbcr, 301 ilk_csc_postoff_rgb_to_ycbcr); 302 } else if (limited_color_range) { 303 ilk_update_pipe_csc(crtc, ilk_csc_off_zero, 304 ilk_csc_coeff_limited_range, 305 ilk_csc_postoff_limited_range); 306 } else if (crtc_state->csc_enable) { 307 /* 308 * On GLK+ both pipe CSC and degamma LUT are controlled 309 * by csc_enable. Hence for the cases where the degama 310 * LUT is needed but CSC is not we need to load an 311 * identity matrix. 312 */ 313 drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) && 314 !IS_GEMINILAKE(dev_priv)); 315 316 ilk_update_pipe_csc(crtc, ilk_csc_off_zero, 317 ilk_csc_coeff_identity, 318 ilk_csc_off_zero); 319 } 320 321 intel_de_write(dev_priv, PIPE_CSC_MODE(crtc->pipe), 322 crtc_state->csc_mode); 323 } 324 325 static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state) 326 { 327 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 328 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 329 330 if (crtc_state->hw.ctm) { 331 u16 coeff[9]; 332 333 ilk_csc_convert_ctm(crtc_state, coeff); 334 ilk_update_pipe_csc(crtc, ilk_csc_off_zero, 335 coeff, ilk_csc_off_zero); 336 } 337 338 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) { 339 icl_update_output_csc(crtc, ilk_csc_off_zero, 340 ilk_csc_coeff_rgb_to_ycbcr, 341 ilk_csc_postoff_rgb_to_ycbcr); 342 } else if (crtc_state->limited_color_range) { 343 icl_update_output_csc(crtc, ilk_csc_off_zero, 344 ilk_csc_coeff_limited_range, 345 ilk_csc_postoff_limited_range); 346 } 347 348 intel_de_write(dev_priv, PIPE_CSC_MODE(crtc->pipe), 349 crtc_state->csc_mode); 350 } 351 352 static void chv_load_cgm_csc(struct intel_crtc *crtc, 353 const struct drm_property_blob *blob) 354 { 355 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 356 const struct drm_color_ctm *ctm = blob->data; 357 enum pipe pipe = crtc->pipe; 358 u16 coeffs[9]; 359 int i; 360 361 for (i = 0; i < ARRAY_SIZE(coeffs); i++) { 362 u64 abs_coeff = ((1ULL << 63) - 1) & ctm->matrix[i]; 363 364 /* Round coefficient. */ 365 abs_coeff += 1 << (32 - 13); 366 /* Clamp to hardware limits. */ 367 abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1); 368 369 coeffs[i] = 0; 370 371 /* Write coefficients in S3.12 format. */ 372 if (ctm->matrix[i] & (1ULL << 63)) 373 coeffs[i] |= 1 << 15; 374 375 coeffs[i] |= ((abs_coeff >> 32) & 7) << 12; 376 coeffs[i] |= (abs_coeff >> 20) & 0xfff; 377 } 378 379 intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF01(pipe), 380 coeffs[1] << 16 | coeffs[0]); 381 intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF23(pipe), 382 coeffs[3] << 16 | coeffs[2]); 383 intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF45(pipe), 384 coeffs[5] << 16 | coeffs[4]); 385 intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF67(pipe), 386 coeffs[7] << 16 | coeffs[6]); 387 intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF8(pipe), 388 coeffs[8]); 389 } 390 391 /* convert hw value with given bit_precision to lut property val */ 392 static u32 intel_color_lut_pack(u32 val, int bit_precision) 393 { 394 u32 max = 0xffff >> (16 - bit_precision); 395 396 val = clamp_val(val, 0, max); 397 398 if (bit_precision < 16) 399 val <<= 16 - bit_precision; 400 401 return val; 402 } 403 404 static u32 i9xx_lut_8(const struct drm_color_lut *color) 405 { 406 return drm_color_lut_extract(color->red, 8) << 16 | 407 drm_color_lut_extract(color->green, 8) << 8 | 408 drm_color_lut_extract(color->blue, 8); 409 } 410 411 static void i9xx_lut_8_pack(struct drm_color_lut *entry, u32 val) 412 { 413 entry->red = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_RED_MASK, val), 8); 414 entry->green = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_GREEN_MASK, val), 8); 415 entry->blue = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_BLUE_MASK, val), 8); 416 } 417 418 /* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */ 419 static u32 i965_lut_10p6_ldw(const struct drm_color_lut *color) 420 { 421 return (color->red & 0xff) << 16 | 422 (color->green & 0xff) << 8 | 423 (color->blue & 0xff); 424 } 425 426 /* i965+ "10.6" interpolated format "odd DW" (high 8 bits) */ 427 static u32 i965_lut_10p6_udw(const struct drm_color_lut *color) 428 { 429 return (color->red >> 8) << 16 | 430 (color->green >> 8) << 8 | 431 (color->blue >> 8); 432 } 433 434 static void i965_lut_10p6_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) 435 { 436 entry->red = REG_FIELD_GET(PALETTE_RED_MASK, udw) << 8 | 437 REG_FIELD_GET(PALETTE_RED_MASK, ldw); 438 entry->green = REG_FIELD_GET(PALETTE_GREEN_MASK, udw) << 8 | 439 REG_FIELD_GET(PALETTE_GREEN_MASK, ldw); 440 entry->blue = REG_FIELD_GET(PALETTE_BLUE_MASK, udw) << 8 | 441 REG_FIELD_GET(PALETTE_BLUE_MASK, ldw); 442 } 443 444 static u16 i965_lut_11p6_max_pack(u32 val) 445 { 446 /* PIPEGCMAX is 11.6, clamp to 10.6 */ 447 return clamp_val(val, 0, 0xffff); 448 } 449 450 static u32 ilk_lut_10(const struct drm_color_lut *color) 451 { 452 return drm_color_lut_extract(color->red, 10) << 20 | 453 drm_color_lut_extract(color->green, 10) << 10 | 454 drm_color_lut_extract(color->blue, 10); 455 } 456 457 static void ilk_lut_10_pack(struct drm_color_lut *entry, u32 val) 458 { 459 entry->red = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_RED_MASK, val), 10); 460 entry->green = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_GREEN_MASK, val), 10); 461 entry->blue = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_BLUE_MASK, val), 10); 462 } 463 464 static void icl_lut_multi_seg_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) 465 { 466 entry->red = REG_FIELD_GET(PAL_PREC_MULTI_SEG_RED_UDW_MASK, udw) << 6 | 467 REG_FIELD_GET(PAL_PREC_MULTI_SEG_RED_LDW_MASK, ldw); 468 entry->green = REG_FIELD_GET(PAL_PREC_MULTI_SEG_GREEN_UDW_MASK, udw) << 6 | 469 REG_FIELD_GET(PAL_PREC_MULTI_SEG_GREEN_LDW_MASK, ldw); 470 entry->blue = REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_UDW_MASK, udw) << 6 | 471 REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_LDW_MASK, ldw); 472 } 473 474 static void i9xx_color_commit(const struct intel_crtc_state *crtc_state) 475 { 476 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 477 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 478 enum pipe pipe = crtc->pipe; 479 u32 val; 480 481 val = intel_de_read(dev_priv, PIPECONF(pipe)); 482 val &= ~PIPECONF_GAMMA_MODE_MASK_I9XX; 483 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 484 intel_de_write(dev_priv, PIPECONF(pipe), val); 485 } 486 487 static void ilk_color_commit(const struct intel_crtc_state *crtc_state) 488 { 489 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 490 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 491 enum pipe pipe = crtc->pipe; 492 u32 val; 493 494 val = intel_de_read(dev_priv, PIPECONF(pipe)); 495 val &= ~PIPECONF_GAMMA_MODE_MASK_ILK; 496 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 497 intel_de_write(dev_priv, PIPECONF(pipe), val); 498 499 ilk_load_csc_matrix(crtc_state); 500 } 501 502 static void hsw_color_commit(const struct intel_crtc_state *crtc_state) 503 { 504 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 505 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 506 507 intel_de_write(dev_priv, GAMMA_MODE(crtc->pipe), 508 crtc_state->gamma_mode); 509 510 ilk_load_csc_matrix(crtc_state); 511 } 512 513 static void skl_color_commit(const struct intel_crtc_state *crtc_state) 514 { 515 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 516 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 517 enum pipe pipe = crtc->pipe; 518 u32 val = 0; 519 520 /* 521 * We don't (yet) allow userspace to control the pipe background color, 522 * so force it to black, but apply pipe gamma and CSC appropriately 523 * so that its handling will match how we program our planes. 524 */ 525 if (crtc_state->gamma_enable) 526 val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE; 527 if (crtc_state->csc_enable) 528 val |= SKL_BOTTOM_COLOR_CSC_ENABLE; 529 intel_de_write(dev_priv, SKL_BOTTOM_COLOR(pipe), val); 530 531 intel_de_write(dev_priv, GAMMA_MODE(crtc->pipe), 532 crtc_state->gamma_mode); 533 534 if (DISPLAY_VER(dev_priv) >= 11) 535 icl_load_csc_matrix(crtc_state); 536 else 537 ilk_load_csc_matrix(crtc_state); 538 } 539 540 static void i9xx_load_lut_8(struct intel_crtc *crtc, 541 const struct drm_property_blob *blob) 542 { 543 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 544 const struct drm_color_lut *lut; 545 enum pipe pipe = crtc->pipe; 546 int i; 547 548 if (!blob) 549 return; 550 551 lut = blob->data; 552 553 for (i = 0; i < 256; i++) 554 intel_de_write(dev_priv, PALETTE(pipe, i), 555 i9xx_lut_8(&lut[i])); 556 } 557 558 static void i9xx_load_luts(const struct intel_crtc_state *crtc_state) 559 { 560 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 561 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 562 const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; 563 564 assert_pll_enabled(dev_priv, crtc->pipe); 565 566 i9xx_load_lut_8(crtc, gamma_lut); 567 } 568 569 static void i965_load_lut_10p6(struct intel_crtc *crtc, 570 const struct drm_property_blob *blob) 571 { 572 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 573 const struct drm_color_lut *lut = blob->data; 574 int i, lut_size = drm_color_lut_size(blob); 575 enum pipe pipe = crtc->pipe; 576 577 for (i = 0; i < lut_size - 1; i++) { 578 intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 0), 579 i965_lut_10p6_ldw(&lut[i])); 580 intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 1), 581 i965_lut_10p6_udw(&lut[i])); 582 } 583 584 intel_de_write(dev_priv, PIPEGCMAX(pipe, 0), lut[i].red); 585 intel_de_write(dev_priv, PIPEGCMAX(pipe, 1), lut[i].green); 586 intel_de_write(dev_priv, PIPEGCMAX(pipe, 2), lut[i].blue); 587 } 588 589 static void i965_load_luts(const struct intel_crtc_state *crtc_state) 590 { 591 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 592 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 593 const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; 594 595 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) 596 assert_dsi_pll_enabled(dev_priv); 597 else 598 assert_pll_enabled(dev_priv, crtc->pipe); 599 600 if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) 601 i9xx_load_lut_8(crtc, gamma_lut); 602 else 603 i965_load_lut_10p6(crtc, gamma_lut); 604 } 605 606 static void ilk_load_lut_8(struct intel_crtc *crtc, 607 const struct drm_property_blob *blob) 608 { 609 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 610 const struct drm_color_lut *lut; 611 enum pipe pipe = crtc->pipe; 612 int i; 613 614 if (!blob) 615 return; 616 617 lut = blob->data; 618 619 for (i = 0; i < 256; i++) 620 intel_de_write(dev_priv, LGC_PALETTE(pipe, i), 621 i9xx_lut_8(&lut[i])); 622 } 623 624 static void ilk_load_lut_10(struct intel_crtc *crtc, 625 const struct drm_property_blob *blob) 626 { 627 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 628 const struct drm_color_lut *lut = blob->data; 629 int i, lut_size = drm_color_lut_size(blob); 630 enum pipe pipe = crtc->pipe; 631 632 for (i = 0; i < lut_size; i++) 633 intel_de_write(dev_priv, PREC_PALETTE(pipe, i), 634 ilk_lut_10(&lut[i])); 635 } 636 637 static void ilk_load_luts(const struct intel_crtc_state *crtc_state) 638 { 639 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 640 const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; 641 642 switch (crtc_state->gamma_mode) { 643 case GAMMA_MODE_MODE_8BIT: 644 ilk_load_lut_8(crtc, gamma_lut); 645 break; 646 case GAMMA_MODE_MODE_10BIT: 647 ilk_load_lut_10(crtc, gamma_lut); 648 break; 649 default: 650 MISSING_CASE(crtc_state->gamma_mode); 651 break; 652 } 653 } 654 655 static int ivb_lut_10_size(u32 prec_index) 656 { 657 if (prec_index & PAL_PREC_SPLIT_MODE) 658 return 512; 659 else 660 return 1024; 661 } 662 663 /* 664 * IVB/HSW Bspec / PAL_PREC_INDEX: 665 * "Restriction : Index auto increment mode is not 666 * supported and must not be enabled." 667 */ 668 static void ivb_load_lut_10(struct intel_crtc *crtc, 669 const struct drm_property_blob *blob, 670 u32 prec_index) 671 { 672 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 673 int hw_lut_size = ivb_lut_10_size(prec_index); 674 const struct drm_color_lut *lut = blob->data; 675 int i, lut_size = drm_color_lut_size(blob); 676 enum pipe pipe = crtc->pipe; 677 678 for (i = 0; i < hw_lut_size; i++) { 679 /* We discard half the user entries in split gamma mode */ 680 const struct drm_color_lut *entry = 681 &lut[i * (lut_size - 1) / (hw_lut_size - 1)]; 682 683 intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), prec_index++); 684 intel_de_write(dev_priv, PREC_PAL_DATA(pipe), 685 ilk_lut_10(entry)); 686 } 687 688 /* 689 * Reset the index, otherwise it prevents the legacy palette to be 690 * written properly. 691 */ 692 intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0); 693 } 694 695 /* On BDW+ the index auto increment mode actually works */ 696 static void bdw_load_lut_10(struct intel_crtc *crtc, 697 const struct drm_property_blob *blob, 698 u32 prec_index) 699 { 700 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 701 int hw_lut_size = ivb_lut_10_size(prec_index); 702 const struct drm_color_lut *lut = blob->data; 703 int i, lut_size = drm_color_lut_size(blob); 704 enum pipe pipe = crtc->pipe; 705 706 intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 707 prec_index | PAL_PREC_AUTO_INCREMENT); 708 709 for (i = 0; i < hw_lut_size; i++) { 710 /* We discard half the user entries in split gamma mode */ 711 const struct drm_color_lut *entry = 712 &lut[i * (lut_size - 1) / (hw_lut_size - 1)]; 713 714 intel_de_write(dev_priv, PREC_PAL_DATA(pipe), 715 ilk_lut_10(entry)); 716 } 717 718 /* 719 * Reset the index, otherwise it prevents the legacy palette to be 720 * written properly. 721 */ 722 intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0); 723 } 724 725 static void ivb_load_lut_ext_max(const struct intel_crtc_state *crtc_state) 726 { 727 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 728 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 729 enum pipe pipe = crtc->pipe; 730 731 /* Program the max register to clamp values > 1.0. */ 732 intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16); 733 intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16); 734 intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16); 735 736 /* 737 * Program the gc max 2 register to clamp values > 1.0. 738 * ToDo: Extend the ABI to be able to program values 739 * from 3.0 to 7.0 740 */ 741 if (DISPLAY_VER(dev_priv) >= 10) { 742 intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 0), 743 1 << 16); 744 intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 1), 745 1 << 16); 746 intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 2), 747 1 << 16); 748 } 749 } 750 751 static void ivb_load_luts(const struct intel_crtc_state *crtc_state) 752 { 753 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 754 const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; 755 const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut; 756 const struct drm_property_blob *blob = gamma_lut ?: degamma_lut; 757 758 switch (crtc_state->gamma_mode) { 759 case GAMMA_MODE_MODE_8BIT: 760 ilk_load_lut_8(crtc, blob); 761 break; 762 case GAMMA_MODE_MODE_SPLIT: 763 ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE | 764 PAL_PREC_INDEX_VALUE(0)); 765 ivb_load_lut_ext_max(crtc_state); 766 ivb_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE | 767 PAL_PREC_INDEX_VALUE(512)); 768 break; 769 case GAMMA_MODE_MODE_10BIT: 770 ivb_load_lut_10(crtc, blob, 771 PAL_PREC_INDEX_VALUE(0)); 772 ivb_load_lut_ext_max(crtc_state); 773 break; 774 default: 775 MISSING_CASE(crtc_state->gamma_mode); 776 break; 777 } 778 } 779 780 static void bdw_load_luts(const struct intel_crtc_state *crtc_state) 781 { 782 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 783 const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; 784 const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut; 785 const struct drm_property_blob *blob = gamma_lut ?: degamma_lut; 786 787 switch (crtc_state->gamma_mode) { 788 case GAMMA_MODE_MODE_8BIT: 789 ilk_load_lut_8(crtc, blob); 790 break; 791 case GAMMA_MODE_MODE_SPLIT: 792 bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE | 793 PAL_PREC_INDEX_VALUE(0)); 794 ivb_load_lut_ext_max(crtc_state); 795 bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE | 796 PAL_PREC_INDEX_VALUE(512)); 797 break; 798 case GAMMA_MODE_MODE_10BIT: 799 800 bdw_load_lut_10(crtc, blob, 801 PAL_PREC_INDEX_VALUE(0)); 802 ivb_load_lut_ext_max(crtc_state); 803 break; 804 default: 805 MISSING_CASE(crtc_state->gamma_mode); 806 break; 807 } 808 } 809 810 static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) 811 { 812 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 813 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 814 enum pipe pipe = crtc->pipe; 815 int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; 816 const struct drm_color_lut *lut = crtc_state->hw.degamma_lut->data; 817 818 /* 819 * When setting the auto-increment bit, the hardware seems to 820 * ignore the index bits, so we need to reset it to index 0 821 * separately. 822 */ 823 intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); 824 intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 825 PRE_CSC_GAMC_AUTO_INCREMENT); 826 827 for (i = 0; i < lut_size; i++) { 828 /* 829 * First 33 entries represent range from 0 to 1.0 830 * 34th and 35th entry will represent extended range 831 * inputs 3.0 and 7.0 respectively, currently clamped 832 * at 1.0. Since the precision is 16bit, the user 833 * value can be directly filled to register. 834 * The pipe degamma table in GLK+ onwards doesn't 835 * support different values per channel, so this just 836 * programs green value which will be equal to Red and 837 * Blue into the lut registers. 838 * ToDo: Extend to max 7.0. Enable 32 bit input value 839 * as compared to just 16 to achieve this. 840 */ 841 intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 842 lut[i].green); 843 } 844 845 /* Clamp values > 1.0. */ 846 while (i++ < 35) 847 intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); 848 849 intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); 850 } 851 852 static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state) 853 { 854 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 855 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 856 enum pipe pipe = crtc->pipe; 857 int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; 858 859 /* 860 * When setting the auto-increment bit, the hardware seems to 861 * ignore the index bits, so we need to reset it to index 0 862 * separately. 863 */ 864 intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); 865 intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 866 PRE_CSC_GAMC_AUTO_INCREMENT); 867 868 for (i = 0; i < lut_size; i++) { 869 u32 v = (i << 16) / (lut_size - 1); 870 871 intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), v); 872 } 873 874 /* Clamp values > 1.0. */ 875 while (i++ < 35) 876 intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); 877 878 intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); 879 } 880 881 static void glk_load_luts(const struct intel_crtc_state *crtc_state) 882 { 883 const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; 884 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 885 886 /* 887 * On GLK+ both pipe CSC and degamma LUT are controlled 888 * by csc_enable. Hence for the cases where the CSC is 889 * needed but degamma LUT is not we need to load a 890 * linear degamma LUT. In fact we'll just always load 891 * the degama LUT so that we don't have to reload 892 * it every time the pipe CSC is being enabled. 893 */ 894 if (crtc_state->hw.degamma_lut) 895 glk_load_degamma_lut(crtc_state); 896 else 897 glk_load_degamma_lut_linear(crtc_state); 898 899 switch (crtc_state->gamma_mode) { 900 case GAMMA_MODE_MODE_8BIT: 901 ilk_load_lut_8(crtc, gamma_lut); 902 break; 903 case GAMMA_MODE_MODE_10BIT: 904 bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0)); 905 ivb_load_lut_ext_max(crtc_state); 906 break; 907 default: 908 MISSING_CASE(crtc_state->gamma_mode); 909 break; 910 } 911 } 912 913 /* ilk+ "12.4" interpolated format (high 10 bits) */ 914 static u32 ilk_lut_12p4_udw(const struct drm_color_lut *color) 915 { 916 return (color->red >> 6) << 20 | (color->green >> 6) << 10 | 917 (color->blue >> 6); 918 } 919 920 /* ilk+ "12.4" interpolated format (low 6 bits) */ 921 static u32 ilk_lut_12p4_ldw(const struct drm_color_lut *color) 922 { 923 return (color->red & 0x3f) << 24 | (color->green & 0x3f) << 14 | 924 (color->blue & 0x3f) << 4; 925 } 926 927 static void 928 icl_load_gcmax(const struct intel_crtc_state *crtc_state, 929 const struct drm_color_lut *color) 930 { 931 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 932 enum pipe pipe = crtc->pipe; 933 934 /* FIXME LUT entries are 16 bit only, so we can prog 0xFFFF max */ 935 intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 0), color->red); 936 intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 1), color->green); 937 intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 2), color->blue); 938 } 939 940 static void 941 icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state) 942 { 943 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 944 const struct drm_property_blob *blob = crtc_state->hw.gamma_lut; 945 const struct drm_color_lut *lut = blob->data; 946 enum pipe pipe = crtc->pipe; 947 int i; 948 949 /* 950 * Program Super Fine segment (let's call it seg1)... 951 * 952 * Super Fine segment's step is 1/(8 * 128 * 256) and it has 953 * 9 entries, corresponding to values 0, 1/(8 * 128 * 256), 954 * 2/(8 * 128 * 256) ... 8/(8 * 128 * 256). 955 */ 956 intel_dsb_reg_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe), 957 PAL_PREC_AUTO_INCREMENT); 958 959 for (i = 0; i < 9; i++) { 960 const struct drm_color_lut *entry = &lut[i]; 961 962 intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe), 963 ilk_lut_12p4_ldw(entry)); 964 intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe), 965 ilk_lut_12p4_udw(entry)); 966 } 967 } 968 969 static void 970 icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state) 971 { 972 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 973 const struct drm_property_blob *blob = crtc_state->hw.gamma_lut; 974 const struct drm_color_lut *lut = blob->data; 975 const struct drm_color_lut *entry; 976 enum pipe pipe = crtc->pipe; 977 int i; 978 979 /* 980 * Program Fine segment (let's call it seg2)... 981 * 982 * Fine segment's step is 1/(128 * 256) i.e. 1/(128 * 256), 2/(128 * 256) 983 * ... 256/(128 * 256). So in order to program fine segment of LUT we 984 * need to pick every 8th entry in the LUT, and program 256 indexes. 985 * 986 * PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1], 987 * seg2[0] being unused by the hardware. 988 */ 989 intel_dsb_reg_write(crtc_state, PREC_PAL_INDEX(pipe), 990 PAL_PREC_AUTO_INCREMENT); 991 for (i = 1; i < 257; i++) { 992 entry = &lut[i * 8]; 993 intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe), 994 ilk_lut_12p4_ldw(entry)); 995 intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe), 996 ilk_lut_12p4_udw(entry)); 997 } 998 999 /* 1000 * Program Coarse segment (let's call it seg3)... 1001 * 1002 * Coarse segment starts from index 0 and it's step is 1/256 ie 0, 1003 * 1/256, 2/256 ... 256/256. As per the description of each entry in LUT 1004 * above, we need to pick every (8 * 128)th entry in LUT, and 1005 * program 256 of those. 1006 * 1007 * Spec is not very clear about if entries seg3[0] and seg3[1] are 1008 * being used or not, but we still need to program these to advance 1009 * the index. 1010 */ 1011 for (i = 0; i < 256; i++) { 1012 entry = &lut[i * 8 * 128]; 1013 intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe), 1014 ilk_lut_12p4_ldw(entry)); 1015 intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe), 1016 ilk_lut_12p4_udw(entry)); 1017 } 1018 1019 /* The last entry in the LUT is to be programmed in GCMAX */ 1020 entry = &lut[256 * 8 * 128]; 1021 icl_load_gcmax(crtc_state, entry); 1022 ivb_load_lut_ext_max(crtc_state); 1023 } 1024 1025 static void icl_load_luts(const struct intel_crtc_state *crtc_state) 1026 { 1027 const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; 1028 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1029 1030 if (crtc_state->hw.degamma_lut) 1031 glk_load_degamma_lut(crtc_state); 1032 1033 switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) { 1034 case GAMMA_MODE_MODE_8BIT: 1035 ilk_load_lut_8(crtc, gamma_lut); 1036 break; 1037 case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED: 1038 icl_program_gamma_superfine_segment(crtc_state); 1039 icl_program_gamma_multi_segment(crtc_state); 1040 break; 1041 case GAMMA_MODE_MODE_10BIT: 1042 bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0)); 1043 ivb_load_lut_ext_max(crtc_state); 1044 break; 1045 default: 1046 MISSING_CASE(crtc_state->gamma_mode); 1047 break; 1048 } 1049 1050 intel_dsb_commit(crtc_state); 1051 } 1052 1053 static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color) 1054 { 1055 return drm_color_lut_extract(color->green, 14) << 16 | 1056 drm_color_lut_extract(color->blue, 14); 1057 } 1058 1059 static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color) 1060 { 1061 return drm_color_lut_extract(color->red, 14); 1062 } 1063 1064 static void chv_load_cgm_degamma(struct intel_crtc *crtc, 1065 const struct drm_property_blob *blob) 1066 { 1067 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1068 const struct drm_color_lut *lut = blob->data; 1069 int i, lut_size = drm_color_lut_size(blob); 1070 enum pipe pipe = crtc->pipe; 1071 1072 for (i = 0; i < lut_size; i++) { 1073 intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0), 1074 chv_cgm_degamma_ldw(&lut[i])); 1075 intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1), 1076 chv_cgm_degamma_udw(&lut[i])); 1077 } 1078 } 1079 1080 static u32 chv_cgm_gamma_ldw(const struct drm_color_lut *color) 1081 { 1082 return drm_color_lut_extract(color->green, 10) << 16 | 1083 drm_color_lut_extract(color->blue, 10); 1084 } 1085 1086 static u32 chv_cgm_gamma_udw(const struct drm_color_lut *color) 1087 { 1088 return drm_color_lut_extract(color->red, 10); 1089 } 1090 1091 static void chv_cgm_gamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) 1092 { 1093 entry->green = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_GREEN_MASK, ldw), 10); 1094 entry->blue = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_BLUE_MASK, ldw), 10); 1095 entry->red = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_RED_MASK, udw), 10); 1096 } 1097 1098 static void chv_load_cgm_gamma(struct intel_crtc *crtc, 1099 const struct drm_property_blob *blob) 1100 { 1101 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1102 const struct drm_color_lut *lut = blob->data; 1103 int i, lut_size = drm_color_lut_size(blob); 1104 enum pipe pipe = crtc->pipe; 1105 1106 for (i = 0; i < lut_size; i++) { 1107 intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0), 1108 chv_cgm_gamma_ldw(&lut[i])); 1109 intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1), 1110 chv_cgm_gamma_udw(&lut[i])); 1111 } 1112 } 1113 1114 static void chv_load_luts(const struct intel_crtc_state *crtc_state) 1115 { 1116 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1117 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1118 const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut; 1119 const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; 1120 const struct drm_property_blob *ctm = crtc_state->hw.ctm; 1121 1122 if (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) 1123 chv_load_cgm_csc(crtc, ctm); 1124 1125 if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA) 1126 chv_load_cgm_degamma(crtc, degamma_lut); 1127 1128 if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA) 1129 chv_load_cgm_gamma(crtc, gamma_lut); 1130 else 1131 i965_load_luts(crtc_state); 1132 1133 intel_de_write(dev_priv, CGM_PIPE_MODE(crtc->pipe), 1134 crtc_state->cgm_mode); 1135 } 1136 1137 void intel_color_load_luts(const struct intel_crtc_state *crtc_state) 1138 { 1139 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1140 1141 dev_priv->display.load_luts(crtc_state); 1142 } 1143 1144 void intel_color_commit(const struct intel_crtc_state *crtc_state) 1145 { 1146 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1147 1148 dev_priv->display.color_commit(crtc_state); 1149 } 1150 1151 static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state) 1152 { 1153 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 1154 struct intel_atomic_state *state = 1155 to_intel_atomic_state(new_crtc_state->uapi.state); 1156 const struct intel_crtc_state *old_crtc_state = 1157 intel_atomic_get_old_crtc_state(state, crtc); 1158 1159 return !old_crtc_state->hw.gamma_lut && 1160 !old_crtc_state->hw.degamma_lut; 1161 } 1162 1163 static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state) 1164 { 1165 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 1166 struct intel_atomic_state *state = 1167 to_intel_atomic_state(new_crtc_state->uapi.state); 1168 const struct intel_crtc_state *old_crtc_state = 1169 intel_atomic_get_old_crtc_state(state, crtc); 1170 1171 /* 1172 * CGM_PIPE_MODE is itself single buffered. We'd have to 1173 * somehow split it out from chv_load_luts() if we wanted 1174 * the ability to preload the CGM LUTs/CSC without tearing. 1175 */ 1176 if (old_crtc_state->cgm_mode || new_crtc_state->cgm_mode) 1177 return false; 1178 1179 return !old_crtc_state->hw.gamma_lut; 1180 } 1181 1182 static bool glk_can_preload_luts(const struct intel_crtc_state *new_crtc_state) 1183 { 1184 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 1185 struct intel_atomic_state *state = 1186 to_intel_atomic_state(new_crtc_state->uapi.state); 1187 const struct intel_crtc_state *old_crtc_state = 1188 intel_atomic_get_old_crtc_state(state, crtc); 1189 1190 /* 1191 * The hardware degamma is active whenever the pipe 1192 * CSC is active. Thus even if the old state has no 1193 * software degamma we need to avoid clobbering the 1194 * linear hardware degamma mid scanout. 1195 */ 1196 return !old_crtc_state->csc_enable && 1197 !old_crtc_state->hw.gamma_lut; 1198 } 1199 1200 int intel_color_check(struct intel_crtc_state *crtc_state) 1201 { 1202 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1203 1204 return dev_priv->display.color_check(crtc_state); 1205 } 1206 1207 void intel_color_get_config(struct intel_crtc_state *crtc_state) 1208 { 1209 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1210 1211 if (dev_priv->display.read_luts) 1212 dev_priv->display.read_luts(crtc_state); 1213 } 1214 1215 static bool need_plane_update(struct intel_plane *plane, 1216 const struct intel_crtc_state *crtc_state) 1217 { 1218 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1219 1220 /* 1221 * On pre-SKL the pipe gamma enable and pipe csc enable for 1222 * the pipe bottom color are configured via the primary plane. 1223 * We have to reconfigure that even if the plane is inactive. 1224 */ 1225 return crtc_state->active_planes & BIT(plane->id) || 1226 (DISPLAY_VER(dev_priv) < 9 && 1227 plane->id == PLANE_PRIMARY); 1228 } 1229 1230 static int 1231 intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state) 1232 { 1233 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 1234 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1235 struct intel_atomic_state *state = 1236 to_intel_atomic_state(new_crtc_state->uapi.state); 1237 const struct intel_crtc_state *old_crtc_state = 1238 intel_atomic_get_old_crtc_state(state, crtc); 1239 struct intel_plane *plane; 1240 1241 if (!new_crtc_state->hw.active || 1242 drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) 1243 return 0; 1244 1245 if (new_crtc_state->gamma_enable == old_crtc_state->gamma_enable && 1246 new_crtc_state->csc_enable == old_crtc_state->csc_enable) 1247 return 0; 1248 1249 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 1250 struct intel_plane_state *plane_state; 1251 1252 if (!need_plane_update(plane, new_crtc_state)) 1253 continue; 1254 1255 plane_state = intel_atomic_get_plane_state(state, plane); 1256 if (IS_ERR(plane_state)) 1257 return PTR_ERR(plane_state); 1258 1259 new_crtc_state->update_planes |= BIT(plane->id); 1260 } 1261 1262 return 0; 1263 } 1264 1265 static int check_lut_size(const struct drm_property_blob *lut, int expected) 1266 { 1267 int len; 1268 1269 if (!lut) 1270 return 0; 1271 1272 len = drm_color_lut_size(lut); 1273 if (len != expected) { 1274 DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n", 1275 len, expected); 1276 return -EINVAL; 1277 } 1278 1279 return 0; 1280 } 1281 1282 static int check_luts(const struct intel_crtc_state *crtc_state) 1283 { 1284 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1285 const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; 1286 const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut; 1287 int gamma_length, degamma_length; 1288 u32 gamma_tests, degamma_tests; 1289 1290 /* Always allow legacy gamma LUT with no further checking. */ 1291 if (crtc_state_is_legacy_gamma(crtc_state)) 1292 return 0; 1293 1294 /* C8 relies on its palette being stored in the legacy LUT */ 1295 if (crtc_state->c8_planes) { 1296 drm_dbg_kms(&dev_priv->drm, 1297 "C8 pixelformat requires the legacy LUT\n"); 1298 return -EINVAL; 1299 } 1300 1301 degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size; 1302 gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size; 1303 degamma_tests = INTEL_INFO(dev_priv)->color.degamma_lut_tests; 1304 gamma_tests = INTEL_INFO(dev_priv)->color.gamma_lut_tests; 1305 1306 if (check_lut_size(degamma_lut, degamma_length) || 1307 check_lut_size(gamma_lut, gamma_length)) 1308 return -EINVAL; 1309 1310 if (drm_color_lut_check(degamma_lut, degamma_tests) || 1311 drm_color_lut_check(gamma_lut, gamma_tests)) 1312 return -EINVAL; 1313 1314 return 0; 1315 } 1316 1317 static u32 i9xx_gamma_mode(struct intel_crtc_state *crtc_state) 1318 { 1319 if (!crtc_state->gamma_enable || 1320 crtc_state_is_legacy_gamma(crtc_state)) 1321 return GAMMA_MODE_MODE_8BIT; 1322 else 1323 return GAMMA_MODE_MODE_10BIT; /* i965+ only */ 1324 } 1325 1326 static int i9xx_color_check(struct intel_crtc_state *crtc_state) 1327 { 1328 int ret; 1329 1330 ret = check_luts(crtc_state); 1331 if (ret) 1332 return ret; 1333 1334 crtc_state->gamma_enable = 1335 crtc_state->hw.gamma_lut && 1336 !crtc_state->c8_planes; 1337 1338 crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state); 1339 1340 ret = intel_color_add_affected_planes(crtc_state); 1341 if (ret) 1342 return ret; 1343 1344 crtc_state->preload_luts = intel_can_preload_luts(crtc_state); 1345 1346 return 0; 1347 } 1348 1349 static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state) 1350 { 1351 u32 cgm_mode = 0; 1352 1353 if (crtc_state_is_legacy_gamma(crtc_state)) 1354 return 0; 1355 1356 if (crtc_state->hw.degamma_lut) 1357 cgm_mode |= CGM_PIPE_MODE_DEGAMMA; 1358 if (crtc_state->hw.ctm) 1359 cgm_mode |= CGM_PIPE_MODE_CSC; 1360 if (crtc_state->hw.gamma_lut) 1361 cgm_mode |= CGM_PIPE_MODE_GAMMA; 1362 1363 return cgm_mode; 1364 } 1365 1366 /* 1367 * CHV color pipeline: 1368 * u0.10 -> CGM degamma -> u0.14 -> CGM csc -> u0.14 -> CGM gamma -> 1369 * u0.10 -> WGC csc -> u0.10 -> pipe gamma -> u0.10 1370 * 1371 * We always bypass the WGC csc and use the CGM csc 1372 * instead since it has degamma and better precision. 1373 */ 1374 static int chv_color_check(struct intel_crtc_state *crtc_state) 1375 { 1376 int ret; 1377 1378 ret = check_luts(crtc_state); 1379 if (ret) 1380 return ret; 1381 1382 /* 1383 * Pipe gamma will be used only for the legacy LUT. 1384 * Otherwise we bypass it and use the CGM gamma instead. 1385 */ 1386 crtc_state->gamma_enable = 1387 crtc_state_is_legacy_gamma(crtc_state) && 1388 !crtc_state->c8_planes; 1389 1390 crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT; 1391 1392 crtc_state->cgm_mode = chv_cgm_mode(crtc_state); 1393 1394 ret = intel_color_add_affected_planes(crtc_state); 1395 if (ret) 1396 return ret; 1397 1398 crtc_state->preload_luts = chv_can_preload_luts(crtc_state); 1399 1400 return 0; 1401 } 1402 1403 static u32 ilk_gamma_mode(const struct intel_crtc_state *crtc_state) 1404 { 1405 if (!crtc_state->gamma_enable || 1406 crtc_state_is_legacy_gamma(crtc_state)) 1407 return GAMMA_MODE_MODE_8BIT; 1408 else 1409 return GAMMA_MODE_MODE_10BIT; 1410 } 1411 1412 static u32 ilk_csc_mode(const struct intel_crtc_state *crtc_state) 1413 { 1414 /* 1415 * CSC comes after the LUT in RGB->YCbCr mode. 1416 * RGB->YCbCr needs the limited range offsets added to 1417 * the output. RGB limited range output is handled by 1418 * the hw automagically elsewhere. 1419 */ 1420 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 1421 return CSC_BLACK_SCREEN_OFFSET; 1422 1423 return CSC_MODE_YUV_TO_RGB | 1424 CSC_POSITION_BEFORE_GAMMA; 1425 } 1426 1427 static int ilk_color_check(struct intel_crtc_state *crtc_state) 1428 { 1429 int ret; 1430 1431 ret = check_luts(crtc_state); 1432 if (ret) 1433 return ret; 1434 1435 crtc_state->gamma_enable = 1436 crtc_state->hw.gamma_lut && 1437 !crtc_state->c8_planes; 1438 1439 /* 1440 * We don't expose the ctm on ilk/snb currently, also RGB 1441 * limited range output is handled by the hw automagically. 1442 */ 1443 crtc_state->csc_enable = 1444 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB; 1445 1446 crtc_state->gamma_mode = ilk_gamma_mode(crtc_state); 1447 1448 crtc_state->csc_mode = ilk_csc_mode(crtc_state); 1449 1450 ret = intel_color_add_affected_planes(crtc_state); 1451 if (ret) 1452 return ret; 1453 1454 crtc_state->preload_luts = intel_can_preload_luts(crtc_state); 1455 1456 return 0; 1457 } 1458 1459 static u32 ivb_gamma_mode(const struct intel_crtc_state *crtc_state) 1460 { 1461 if (!crtc_state->gamma_enable || 1462 crtc_state_is_legacy_gamma(crtc_state)) 1463 return GAMMA_MODE_MODE_8BIT; 1464 else if (crtc_state->hw.gamma_lut && 1465 crtc_state->hw.degamma_lut) 1466 return GAMMA_MODE_MODE_SPLIT; 1467 else 1468 return GAMMA_MODE_MODE_10BIT; 1469 } 1470 1471 static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state) 1472 { 1473 bool limited_color_range = ilk_csc_limited_range(crtc_state); 1474 1475 /* 1476 * CSC comes after the LUT in degamma, RGB->YCbCr, 1477 * and RGB full->limited range mode. 1478 */ 1479 if (crtc_state->hw.degamma_lut || 1480 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB || 1481 limited_color_range) 1482 return 0; 1483 1484 return CSC_POSITION_BEFORE_GAMMA; 1485 } 1486 1487 static int ivb_color_check(struct intel_crtc_state *crtc_state) 1488 { 1489 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1490 bool limited_color_range = ilk_csc_limited_range(crtc_state); 1491 int ret; 1492 1493 ret = check_luts(crtc_state); 1494 if (ret) 1495 return ret; 1496 1497 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB && 1498 crtc_state->hw.ctm) { 1499 drm_dbg_kms(&dev_priv->drm, 1500 "YCBCR and CTM together are not possible\n"); 1501 return -EINVAL; 1502 } 1503 1504 crtc_state->gamma_enable = 1505 (crtc_state->hw.gamma_lut || 1506 crtc_state->hw.degamma_lut) && 1507 !crtc_state->c8_planes; 1508 1509 crtc_state->csc_enable = 1510 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB || 1511 crtc_state->hw.ctm || limited_color_range; 1512 1513 crtc_state->gamma_mode = ivb_gamma_mode(crtc_state); 1514 1515 crtc_state->csc_mode = ivb_csc_mode(crtc_state); 1516 1517 ret = intel_color_add_affected_planes(crtc_state); 1518 if (ret) 1519 return ret; 1520 1521 crtc_state->preload_luts = intel_can_preload_luts(crtc_state); 1522 1523 return 0; 1524 } 1525 1526 static u32 glk_gamma_mode(const struct intel_crtc_state *crtc_state) 1527 { 1528 if (!crtc_state->gamma_enable || 1529 crtc_state_is_legacy_gamma(crtc_state)) 1530 return GAMMA_MODE_MODE_8BIT; 1531 else 1532 return GAMMA_MODE_MODE_10BIT; 1533 } 1534 1535 static int glk_color_check(struct intel_crtc_state *crtc_state) 1536 { 1537 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1538 int ret; 1539 1540 ret = check_luts(crtc_state); 1541 if (ret) 1542 return ret; 1543 1544 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB && 1545 crtc_state->hw.ctm) { 1546 drm_dbg_kms(&dev_priv->drm, 1547 "YCBCR and CTM together are not possible\n"); 1548 return -EINVAL; 1549 } 1550 1551 crtc_state->gamma_enable = 1552 crtc_state->hw.gamma_lut && 1553 !crtc_state->c8_planes; 1554 1555 /* On GLK+ degamma LUT is controlled by csc_enable */ 1556 crtc_state->csc_enable = 1557 crtc_state->hw.degamma_lut || 1558 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB || 1559 crtc_state->hw.ctm || crtc_state->limited_color_range; 1560 1561 crtc_state->gamma_mode = glk_gamma_mode(crtc_state); 1562 1563 crtc_state->csc_mode = 0; 1564 1565 ret = intel_color_add_affected_planes(crtc_state); 1566 if (ret) 1567 return ret; 1568 1569 crtc_state->preload_luts = glk_can_preload_luts(crtc_state); 1570 1571 return 0; 1572 } 1573 1574 static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state) 1575 { 1576 u32 gamma_mode = 0; 1577 1578 if (crtc_state->hw.degamma_lut) 1579 gamma_mode |= PRE_CSC_GAMMA_ENABLE; 1580 1581 if (crtc_state->hw.gamma_lut && 1582 !crtc_state->c8_planes) 1583 gamma_mode |= POST_CSC_GAMMA_ENABLE; 1584 1585 if (!crtc_state->hw.gamma_lut || 1586 crtc_state_is_legacy_gamma(crtc_state)) 1587 gamma_mode |= GAMMA_MODE_MODE_8BIT; 1588 else 1589 gamma_mode |= GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED; 1590 1591 return gamma_mode; 1592 } 1593 1594 static u32 icl_csc_mode(const struct intel_crtc_state *crtc_state) 1595 { 1596 u32 csc_mode = 0; 1597 1598 if (crtc_state->hw.ctm) 1599 csc_mode |= ICL_CSC_ENABLE; 1600 1601 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB || 1602 crtc_state->limited_color_range) 1603 csc_mode |= ICL_OUTPUT_CSC_ENABLE; 1604 1605 return csc_mode; 1606 } 1607 1608 static int icl_color_check(struct intel_crtc_state *crtc_state) 1609 { 1610 int ret; 1611 1612 ret = check_luts(crtc_state); 1613 if (ret) 1614 return ret; 1615 1616 crtc_state->gamma_mode = icl_gamma_mode(crtc_state); 1617 1618 crtc_state->csc_mode = icl_csc_mode(crtc_state); 1619 1620 crtc_state->preload_luts = intel_can_preload_luts(crtc_state); 1621 1622 return 0; 1623 } 1624 1625 static int i9xx_gamma_precision(const struct intel_crtc_state *crtc_state) 1626 { 1627 if (!crtc_state->gamma_enable) 1628 return 0; 1629 1630 switch (crtc_state->gamma_mode) { 1631 case GAMMA_MODE_MODE_8BIT: 1632 return 8; 1633 case GAMMA_MODE_MODE_10BIT: 1634 return 16; 1635 default: 1636 MISSING_CASE(crtc_state->gamma_mode); 1637 return 0; 1638 } 1639 } 1640 1641 static int ilk_gamma_precision(const struct intel_crtc_state *crtc_state) 1642 { 1643 if (!crtc_state->gamma_enable) 1644 return 0; 1645 1646 if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0) 1647 return 0; 1648 1649 switch (crtc_state->gamma_mode) { 1650 case GAMMA_MODE_MODE_8BIT: 1651 return 8; 1652 case GAMMA_MODE_MODE_10BIT: 1653 return 10; 1654 default: 1655 MISSING_CASE(crtc_state->gamma_mode); 1656 return 0; 1657 } 1658 } 1659 1660 static int chv_gamma_precision(const struct intel_crtc_state *crtc_state) 1661 { 1662 if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA) 1663 return 10; 1664 else 1665 return i9xx_gamma_precision(crtc_state); 1666 } 1667 1668 static int glk_gamma_precision(const struct intel_crtc_state *crtc_state) 1669 { 1670 if (!crtc_state->gamma_enable) 1671 return 0; 1672 1673 switch (crtc_state->gamma_mode) { 1674 case GAMMA_MODE_MODE_8BIT: 1675 return 8; 1676 case GAMMA_MODE_MODE_10BIT: 1677 return 10; 1678 default: 1679 MISSING_CASE(crtc_state->gamma_mode); 1680 return 0; 1681 } 1682 } 1683 1684 static int icl_gamma_precision(const struct intel_crtc_state *crtc_state) 1685 { 1686 if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0) 1687 return 0; 1688 1689 switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) { 1690 case GAMMA_MODE_MODE_8BIT: 1691 return 8; 1692 case GAMMA_MODE_MODE_10BIT: 1693 return 10; 1694 case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED: 1695 return 16; 1696 default: 1697 MISSING_CASE(crtc_state->gamma_mode); 1698 return 0; 1699 } 1700 } 1701 1702 int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state) 1703 { 1704 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1705 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1706 1707 if (HAS_GMCH(dev_priv)) { 1708 if (IS_CHERRYVIEW(dev_priv)) 1709 return chv_gamma_precision(crtc_state); 1710 else 1711 return i9xx_gamma_precision(crtc_state); 1712 } else { 1713 if (DISPLAY_VER(dev_priv) >= 11) 1714 return icl_gamma_precision(crtc_state); 1715 else if (DISPLAY_VER(dev_priv) == 10) 1716 return glk_gamma_precision(crtc_state); 1717 else if (IS_IRONLAKE(dev_priv)) 1718 return ilk_gamma_precision(crtc_state); 1719 } 1720 1721 return 0; 1722 } 1723 1724 static bool err_check(struct drm_color_lut *lut1, 1725 struct drm_color_lut *lut2, u32 err) 1726 { 1727 return ((abs((long)lut2->red - lut1->red)) <= err) && 1728 ((abs((long)lut2->blue - lut1->blue)) <= err) && 1729 ((abs((long)lut2->green - lut1->green)) <= err); 1730 } 1731 1732 static bool intel_color_lut_entries_equal(struct drm_color_lut *lut1, 1733 struct drm_color_lut *lut2, 1734 int lut_size, u32 err) 1735 { 1736 int i; 1737 1738 for (i = 0; i < lut_size; i++) { 1739 if (!err_check(&lut1[i], &lut2[i], err)) 1740 return false; 1741 } 1742 1743 return true; 1744 } 1745 1746 bool intel_color_lut_equal(struct drm_property_blob *blob1, 1747 struct drm_property_blob *blob2, 1748 u32 gamma_mode, u32 bit_precision) 1749 { 1750 struct drm_color_lut *lut1, *lut2; 1751 int lut_size1, lut_size2; 1752 u32 err; 1753 1754 if (!blob1 != !blob2) 1755 return false; 1756 1757 if (!blob1) 1758 return true; 1759 1760 lut_size1 = drm_color_lut_size(blob1); 1761 lut_size2 = drm_color_lut_size(blob2); 1762 1763 /* check sw and hw lut size */ 1764 if (lut_size1 != lut_size2) 1765 return false; 1766 1767 lut1 = blob1->data; 1768 lut2 = blob2->data; 1769 1770 err = 0xffff >> bit_precision; 1771 1772 /* check sw and hw lut entry to be equal */ 1773 switch (gamma_mode & GAMMA_MODE_MODE_MASK) { 1774 case GAMMA_MODE_MODE_8BIT: 1775 case GAMMA_MODE_MODE_10BIT: 1776 if (!intel_color_lut_entries_equal(lut1, lut2, 1777 lut_size2, err)) 1778 return false; 1779 break; 1780 case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED: 1781 if (!intel_color_lut_entries_equal(lut1, lut2, 1782 9, err)) 1783 return false; 1784 break; 1785 default: 1786 MISSING_CASE(gamma_mode); 1787 return false; 1788 } 1789 1790 return true; 1791 } 1792 1793 static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc) 1794 { 1795 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1796 enum pipe pipe = crtc->pipe; 1797 struct drm_property_blob *blob; 1798 struct drm_color_lut *lut; 1799 int i; 1800 1801 blob = drm_property_create_blob(&dev_priv->drm, 1802 sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH, 1803 NULL); 1804 if (IS_ERR(blob)) 1805 return NULL; 1806 1807 lut = blob->data; 1808 1809 for (i = 0; i < LEGACY_LUT_LENGTH; i++) { 1810 u32 val = intel_de_read(dev_priv, PALETTE(pipe, i)); 1811 1812 i9xx_lut_8_pack(&lut[i], val); 1813 } 1814 1815 return blob; 1816 } 1817 1818 static void i9xx_read_luts(struct intel_crtc_state *crtc_state) 1819 { 1820 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1821 1822 if (!crtc_state->gamma_enable) 1823 return; 1824 1825 crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc); 1826 } 1827 1828 static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc) 1829 { 1830 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1831 int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; 1832 enum pipe pipe = crtc->pipe; 1833 struct drm_property_blob *blob; 1834 struct drm_color_lut *lut; 1835 1836 blob = drm_property_create_blob(&dev_priv->drm, 1837 sizeof(struct drm_color_lut) * lut_size, 1838 NULL); 1839 if (IS_ERR(blob)) 1840 return NULL; 1841 1842 lut = blob->data; 1843 1844 for (i = 0; i < lut_size - 1; i++) { 1845 u32 ldw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 0)); 1846 u32 udw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 1)); 1847 1848 i965_lut_10p6_pack(&lut[i], ldw, udw); 1849 } 1850 1851 lut[i].red = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 0))); 1852 lut[i].green = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 1))); 1853 lut[i].blue = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 2))); 1854 1855 return blob; 1856 } 1857 1858 static void i965_read_luts(struct intel_crtc_state *crtc_state) 1859 { 1860 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1861 1862 if (!crtc_state->gamma_enable) 1863 return; 1864 1865 if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) 1866 crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc); 1867 else 1868 crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc); 1869 } 1870 1871 static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc) 1872 { 1873 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1874 int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; 1875 enum pipe pipe = crtc->pipe; 1876 struct drm_property_blob *blob; 1877 struct drm_color_lut *lut; 1878 1879 blob = drm_property_create_blob(&dev_priv->drm, 1880 sizeof(struct drm_color_lut) * lut_size, 1881 NULL); 1882 if (IS_ERR(blob)) 1883 return NULL; 1884 1885 lut = blob->data; 1886 1887 for (i = 0; i < lut_size; i++) { 1888 u32 ldw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0)); 1889 u32 udw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1)); 1890 1891 chv_cgm_gamma_pack(&lut[i], ldw, udw); 1892 } 1893 1894 return blob; 1895 } 1896 1897 static void chv_read_luts(struct intel_crtc_state *crtc_state) 1898 { 1899 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1900 1901 if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA) 1902 crtc_state->hw.gamma_lut = chv_read_cgm_gamma(crtc); 1903 else 1904 i965_read_luts(crtc_state); 1905 } 1906 1907 static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc) 1908 { 1909 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1910 enum pipe pipe = crtc->pipe; 1911 struct drm_property_blob *blob; 1912 struct drm_color_lut *lut; 1913 int i; 1914 1915 blob = drm_property_create_blob(&dev_priv->drm, 1916 sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH, 1917 NULL); 1918 if (IS_ERR(blob)) 1919 return NULL; 1920 1921 lut = blob->data; 1922 1923 for (i = 0; i < LEGACY_LUT_LENGTH; i++) { 1924 u32 val = intel_de_read(dev_priv, LGC_PALETTE(pipe, i)); 1925 1926 i9xx_lut_8_pack(&lut[i], val); 1927 } 1928 1929 return blob; 1930 } 1931 1932 static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc) 1933 { 1934 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1935 int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; 1936 enum pipe pipe = crtc->pipe; 1937 struct drm_property_blob *blob; 1938 struct drm_color_lut *lut; 1939 1940 blob = drm_property_create_blob(&dev_priv->drm, 1941 sizeof(struct drm_color_lut) * lut_size, 1942 NULL); 1943 if (IS_ERR(blob)) 1944 return NULL; 1945 1946 lut = blob->data; 1947 1948 for (i = 0; i < lut_size; i++) { 1949 u32 val = intel_de_read(dev_priv, PREC_PALETTE(pipe, i)); 1950 1951 ilk_lut_10_pack(&lut[i], val); 1952 } 1953 1954 return blob; 1955 } 1956 1957 static void ilk_read_luts(struct intel_crtc_state *crtc_state) 1958 { 1959 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1960 1961 if (!crtc_state->gamma_enable) 1962 return; 1963 1964 if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0) 1965 return; 1966 1967 switch (crtc_state->gamma_mode) { 1968 case GAMMA_MODE_MODE_8BIT: 1969 crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc); 1970 break; 1971 case GAMMA_MODE_MODE_10BIT: 1972 crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc); 1973 break; 1974 default: 1975 MISSING_CASE(crtc_state->gamma_mode); 1976 break; 1977 } 1978 } 1979 1980 /* On BDW+ the index auto increment mode actually works */ 1981 static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc, 1982 u32 prec_index) 1983 { 1984 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1985 int i, hw_lut_size = ivb_lut_10_size(prec_index); 1986 int lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; 1987 enum pipe pipe = crtc->pipe; 1988 struct drm_property_blob *blob; 1989 struct drm_color_lut *lut; 1990 1991 drm_WARN_ON(&dev_priv->drm, lut_size != hw_lut_size); 1992 1993 blob = drm_property_create_blob(&dev_priv->drm, 1994 sizeof(struct drm_color_lut) * lut_size, 1995 NULL); 1996 if (IS_ERR(blob)) 1997 return NULL; 1998 1999 lut = blob->data; 2000 2001 intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 2002 prec_index | PAL_PREC_AUTO_INCREMENT); 2003 2004 for (i = 0; i < lut_size; i++) { 2005 u32 val = intel_de_read(dev_priv, PREC_PAL_DATA(pipe)); 2006 2007 ilk_lut_10_pack(&lut[i], val); 2008 } 2009 2010 intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0); 2011 2012 return blob; 2013 } 2014 2015 static void glk_read_luts(struct intel_crtc_state *crtc_state) 2016 { 2017 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2018 2019 if (!crtc_state->gamma_enable) 2020 return; 2021 2022 switch (crtc_state->gamma_mode) { 2023 case GAMMA_MODE_MODE_8BIT: 2024 crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc); 2025 break; 2026 case GAMMA_MODE_MODE_10BIT: 2027 crtc_state->hw.gamma_lut = bdw_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0)); 2028 break; 2029 default: 2030 MISSING_CASE(crtc_state->gamma_mode); 2031 break; 2032 } 2033 } 2034 2035 static struct drm_property_blob * 2036 icl_read_lut_multi_segment(struct intel_crtc *crtc) 2037 { 2038 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2039 int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; 2040 enum pipe pipe = crtc->pipe; 2041 struct drm_property_blob *blob; 2042 struct drm_color_lut *lut; 2043 2044 blob = drm_property_create_blob(&dev_priv->drm, 2045 sizeof(struct drm_color_lut) * lut_size, 2046 NULL); 2047 if (IS_ERR(blob)) 2048 return NULL; 2049 2050 lut = blob->data; 2051 2052 intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 2053 PAL_PREC_AUTO_INCREMENT); 2054 2055 for (i = 0; i < 9; i++) { 2056 u32 ldw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe)); 2057 u32 udw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe)); 2058 2059 icl_lut_multi_seg_pack(&lut[i], ldw, udw); 2060 } 2061 2062 intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 0); 2063 2064 /* 2065 * FIXME readouts from PAL_PREC_DATA register aren't giving 2066 * correct values in the case of fine and coarse segments. 2067 * Restricting readouts only for super fine segment as of now. 2068 */ 2069 2070 return blob; 2071 } 2072 2073 static void icl_read_luts(struct intel_crtc_state *crtc_state) 2074 { 2075 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2076 2077 if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0) 2078 return; 2079 2080 switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) { 2081 case GAMMA_MODE_MODE_8BIT: 2082 crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc); 2083 break; 2084 case GAMMA_MODE_MODE_10BIT: 2085 crtc_state->hw.gamma_lut = bdw_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0)); 2086 break; 2087 case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED: 2088 crtc_state->hw.gamma_lut = icl_read_lut_multi_segment(crtc); 2089 break; 2090 default: 2091 MISSING_CASE(crtc_state->gamma_mode); 2092 break; 2093 } 2094 } 2095 2096 void intel_color_init(struct intel_crtc *crtc) 2097 { 2098 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2099 bool has_ctm = INTEL_INFO(dev_priv)->color.degamma_lut_size != 0; 2100 2101 drm_mode_crtc_set_gamma_size(&crtc->base, 256); 2102 2103 if (HAS_GMCH(dev_priv)) { 2104 if (IS_CHERRYVIEW(dev_priv)) { 2105 dev_priv->display.color_check = chv_color_check; 2106 dev_priv->display.color_commit = i9xx_color_commit; 2107 dev_priv->display.load_luts = chv_load_luts; 2108 dev_priv->display.read_luts = chv_read_luts; 2109 } else if (DISPLAY_VER(dev_priv) >= 4) { 2110 dev_priv->display.color_check = i9xx_color_check; 2111 dev_priv->display.color_commit = i9xx_color_commit; 2112 dev_priv->display.load_luts = i965_load_luts; 2113 dev_priv->display.read_luts = i965_read_luts; 2114 } else { 2115 dev_priv->display.color_check = i9xx_color_check; 2116 dev_priv->display.color_commit = i9xx_color_commit; 2117 dev_priv->display.load_luts = i9xx_load_luts; 2118 dev_priv->display.read_luts = i9xx_read_luts; 2119 } 2120 } else { 2121 if (DISPLAY_VER(dev_priv) >= 11) 2122 dev_priv->display.color_check = icl_color_check; 2123 else if (DISPLAY_VER(dev_priv) >= 10) 2124 dev_priv->display.color_check = glk_color_check; 2125 else if (DISPLAY_VER(dev_priv) >= 7) 2126 dev_priv->display.color_check = ivb_color_check; 2127 else 2128 dev_priv->display.color_check = ilk_color_check; 2129 2130 if (DISPLAY_VER(dev_priv) >= 9) 2131 dev_priv->display.color_commit = skl_color_commit; 2132 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2133 dev_priv->display.color_commit = hsw_color_commit; 2134 else 2135 dev_priv->display.color_commit = ilk_color_commit; 2136 2137 if (DISPLAY_VER(dev_priv) >= 11) { 2138 dev_priv->display.load_luts = icl_load_luts; 2139 dev_priv->display.read_luts = icl_read_luts; 2140 } else if (DISPLAY_VER(dev_priv) == 10) { 2141 dev_priv->display.load_luts = glk_load_luts; 2142 dev_priv->display.read_luts = glk_read_luts; 2143 } else if (DISPLAY_VER(dev_priv) >= 8) { 2144 dev_priv->display.load_luts = bdw_load_luts; 2145 } else if (DISPLAY_VER(dev_priv) >= 7) { 2146 dev_priv->display.load_luts = ivb_load_luts; 2147 } else { 2148 dev_priv->display.load_luts = ilk_load_luts; 2149 dev_priv->display.read_luts = ilk_read_luts; 2150 } 2151 } 2152 2153 drm_crtc_enable_color_mgmt(&crtc->base, 2154 INTEL_INFO(dev_priv)->color.degamma_lut_size, 2155 has_ctm, 2156 INTEL_INFO(dev_priv)->color.gamma_lut_size); 2157 } 2158