1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "dm_services.h"
27 #include "core_types.h"
28 #include "reg_helper.h"
29 #include "dcn30_dpp.h"
30 #include "basics/conversion.h"
31 #include "dcn30_cm_common.h"
32 
33 #define REG(reg)\
34 	dpp->tf_regs->reg
35 
36 #define CTX \
37 	dpp->base.ctx
38 
39 #undef FN
40 #define FN(reg_name, field_name) \
41 	dpp->tf_shift->field_name, dpp->tf_mask->field_name
42 
43 static void dpp3_enable_cm_block(
44 		struct dpp *dpp_base)
45 {
46 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
47 
48 	unsigned int cm_bypass_mode = 0;
49 
50 	// debug option: put CM in bypass mode
51 	if (dpp_base->ctx->dc->debug.cm_in_bypass)
52 		cm_bypass_mode = 1;
53 
54 	REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode);
55 }
56 
57 static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base)
58 {
59 	enum dc_lut_mode mode;
60 	uint32_t state_mode;
61 	uint32_t lut_mode;
62 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
63 
64 	REG_GET(CM_GAMCOR_CONTROL,
65 			CM_GAMCOR_MODE_CURRENT, &state_mode);
66 
67 		if (state_mode == 0)
68 			mode = LUT_BYPASS;
69 
70 		if (state_mode == 2) {//Programmable RAM LUT
71 			REG_GET(CM_GAMCOR_CONTROL,
72 					CM_GAMCOR_SELECT_CURRENT, &lut_mode);
73 
74 			if (lut_mode == 0)
75 				mode = LUT_RAM_A;
76 			else
77 				mode = LUT_RAM_B;
78 		}
79 
80 		return mode;
81 }
82 
83 static void dpp3_program_gammcor_lut(
84 		struct dpp *dpp_base,
85 		const struct pwl_result_data *rgb,
86 		uint32_t num,
87 		bool is_ram_a)
88 {
89 	uint32_t i;
90 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
91 	uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
92 	uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg;
93 	uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg;
94 
95 	/*fill in the LUT with all base values to be used by pwl module
96 	 * HW auto increments the LUT index: back-to-back write
97 	 */
98 	if (is_rgb_equal(rgb,  num)) {
99 		for (i = 0 ; i < num; i++)
100 			REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg);
101 
102 		REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red);
103 
104 	} else {
105 		REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
106 				CM_GAMCOR_LUT_WRITE_COLOR_MASK, 4);
107 		for (i = 0 ; i < num; i++)
108 			REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg);
109 
110 		REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red);
111 
112 		REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
113 
114 		REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
115 				CM_GAMCOR_LUT_WRITE_COLOR_MASK, 2);
116 		for (i = 0 ; i < num; i++)
117 			REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].green_reg);
118 
119 		REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_green);
120 
121 		REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
122 
123 		REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
124 				CM_GAMCOR_LUT_WRITE_COLOR_MASK, 1);
125 		for (i = 0 ; i < num; i++)
126 			REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].blue_reg);
127 
128 		REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_blue);
129 	}
130 }
131 
132 static void dpp3_power_on_gamcor_lut(
133 		struct dpp *dpp_base,
134 	bool power_on)
135 {
136 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
137 
138 	if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) {
139 		REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, power_on ? 0 : 3);
140 		if (power_on)
141 			REG_WAIT(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, 0, 1, 5);
142 	} else
143 		REG_SET(CM_MEM_PWR_CTRL, 0,
144 				GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1);
145 }
146 
147 void dpp3_program_cm_dealpha(
148 		struct dpp *dpp_base,
149 	uint32_t enable, uint32_t additive_blending)
150 {
151 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
152 
153 	REG_SET_2(CM_DEALPHA, 0,
154 			CM_DEALPHA_EN, enable,
155 			CM_DEALPHA_ABLND, additive_blending);
156 }
157 
158 void dpp3_program_cm_bias(
159 	struct dpp *dpp_base,
160 	struct CM_bias_params *bias_params)
161 {
162 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
163 
164 	REG_SET(CM_BIAS_CR_R, 0, CM_BIAS_CR_R, bias_params->cm_bias_cr_r);
165 	REG_SET_2(CM_BIAS_Y_G_CB_B, 0,
166 			CM_BIAS_Y_G, bias_params->cm_bias_y_g,
167 			CM_BIAS_CB_B, bias_params->cm_bias_cb_b);
168 }
169 
170 static void dpp3_gamcor_reg_field(
171 		struct dcn3_dpp *dpp,
172 		struct dcn3_xfer_func_reg *reg)
173 {
174 
175 	reg->shifts.field_region_start_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B;
176 	reg->masks.field_region_start_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B;
177 	reg->shifts.field_offset = dpp->tf_shift->CM_GAMCOR_RAMA_OFFSET_B;
178 	reg->masks.field_offset = dpp->tf_mask->CM_GAMCOR_RAMA_OFFSET_B;
179 
180 	reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET;
181 	reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET;
182 	reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS;
183 	reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS;
184 	reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET;
185 	reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET;
186 	reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;
187 	reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;
188 
189 	reg->shifts.field_region_end = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_B;
190 	reg->masks.field_region_end = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_B;
191 	reg->shifts.field_region_end_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B;
192 	reg->masks.field_region_end_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B;
193 	reg->shifts.field_region_end_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B;
194 	reg->masks.field_region_end_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B;
195 	reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B;
196 	reg->masks.field_region_linear_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B;
197 	reg->shifts.exp_region_start = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_B;
198 	reg->masks.exp_region_start = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_B;
199 	reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B;
200 	reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B;
201 }
202 
203 static void dpp3_configure_gamcor_lut(
204 		struct dpp *dpp_base,
205 		bool is_ram_a)
206 {
207 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
208 
209 	REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
210 			CM_GAMCOR_LUT_WRITE_COLOR_MASK, 7);
211 	REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
212 			CM_GAMCOR_LUT_HOST_SEL, is_ram_a == true ? 0:1);
213 	REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
214 }
215 
216 
217 bool dpp3_program_gamcor_lut(
218 	struct dpp *dpp_base, const struct pwl_params *params)
219 {
220 	enum dc_lut_mode current_mode;
221 	enum dc_lut_mode next_mode;
222 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
223 	struct dcn3_xfer_func_reg gam_regs;
224 
225 	dpp3_enable_cm_block(dpp_base);
226 
227 	if (params == NULL) { //bypass if we have no pwl data
228 		REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 0);
229 		if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm)
230 			dpp3_power_on_gamcor_lut(dpp_base, false);
231 		return false;
232 	}
233 	dpp3_power_on_gamcor_lut(dpp_base, true);
234 	REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 2);
235 
236 	current_mode = dpp30_get_gamcor_current(dpp_base);
237 	if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
238 		next_mode = LUT_RAM_B;
239 	else
240 		next_mode = LUT_RAM_A;
241 
242 	dpp3_power_on_gamcor_lut(dpp_base, true);
243 	dpp3_configure_gamcor_lut(dpp_base, next_mode == LUT_RAM_A ? true:false);
244 
245 	if (next_mode == LUT_RAM_B) {
246 		gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMB_START_CNTL_B);
247 		gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMB_START_CNTL_G);
248 		gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMB_START_CNTL_R);
249 		gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B);
250 		gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G);
251 		gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R);
252 		gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMB_END_CNTL1_B);
253 		gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMB_END_CNTL2_B);
254 		gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMB_END_CNTL1_G);
255 		gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMB_END_CNTL2_G);
256 		gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMB_END_CNTL1_R);
257 		gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMB_END_CNTL2_R);
258 		gam_regs.region_start = REG(CM_GAMCOR_RAMB_REGION_0_1);
259 		gam_regs.region_end = REG(CM_GAMCOR_RAMB_REGION_32_33);
260 		//New registers in DCN3AG/DCN GAMCOR block
261 		gam_regs.offset_b =  REG(CM_GAMCOR_RAMB_OFFSET_B);
262 		gam_regs.offset_g =  REG(CM_GAMCOR_RAMB_OFFSET_G);
263 		gam_regs.offset_r =  REG(CM_GAMCOR_RAMB_OFFSET_R);
264 		gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_B);
265 		gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_G);
266 		gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_R);
267 	} else {
268 		gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMA_START_CNTL_B);
269 		gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMA_START_CNTL_G);
270 		gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMA_START_CNTL_R);
271 		gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B);
272 		gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G);
273 		gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R);
274 		gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMA_END_CNTL1_B);
275 		gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMA_END_CNTL2_B);
276 		gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMA_END_CNTL1_G);
277 		gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMA_END_CNTL2_G);
278 		gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMA_END_CNTL1_R);
279 		gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMA_END_CNTL2_R);
280 		gam_regs.region_start = REG(CM_GAMCOR_RAMA_REGION_0_1);
281 		gam_regs.region_end = REG(CM_GAMCOR_RAMA_REGION_32_33);
282 		//New registers in DCN3AG/DCN GAMCOR block
283 		gam_regs.offset_b =  REG(CM_GAMCOR_RAMA_OFFSET_B);
284 		gam_regs.offset_g =  REG(CM_GAMCOR_RAMA_OFFSET_G);
285 		gam_regs.offset_r =  REG(CM_GAMCOR_RAMA_OFFSET_R);
286 		gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_B);
287 		gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_G);
288 		gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_R);
289 	}
290 
291 	//get register fields
292 	dpp3_gamcor_reg_field(dpp, &gam_regs);
293 
294 	//program register set for LUTA/LUTB
295 	cm_helper_program_gamcor_xfer_func(dpp_base->ctx, params, &gam_regs);
296 
297 	dpp3_program_gammcor_lut(dpp_base, params->rgb_resulted, params->hw_points_num,
298 			next_mode == LUT_RAM_A ? true:false);
299 
300 	//select Gamma LUT to use for next frame
301 	REG_UPDATE(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, next_mode == LUT_RAM_A ? 0:1);
302 
303 	return true;
304 }
305 
306 void dpp3_set_hdr_multiplier(
307 		struct dpp *dpp_base,
308 		uint32_t multiplier)
309 {
310 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
311 
312 	REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier);
313 }
314 
315 
316 static void program_gamut_remap(
317 		struct dcn3_dpp *dpp,
318 		const uint16_t *regval,
319 		int select)
320 {
321 	uint16_t selection = 0;
322 	struct color_matrices_reg gam_regs;
323 
324 	if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
325 		REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
326 				CM_GAMUT_REMAP_MODE, 0);
327 		return;
328 	}
329 	switch (select) {
330 	case GAMUT_REMAP_COEFF:
331 		selection = 1;
332 		break;
333 		/*this corresponds to GAMUT_REMAP coefficients set B
334 		 *we don't have common coefficient sets in dcn3ag/dcn3
335 		 */
336 	case GAMUT_REMAP_COMA_COEFF:
337 		selection = 2;
338 		break;
339 	default:
340 		break;
341 	}
342 
343 	gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
344 	gam_regs.masks.csc_c11  = dpp->tf_mask->CM_GAMUT_REMAP_C11;
345 	gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
346 	gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
347 
348 
349 	if (select == GAMUT_REMAP_COEFF) {
350 		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
351 		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
352 
353 		cm_helper_program_color_matrices(
354 				dpp->base.ctx,
355 				regval,
356 				&gam_regs);
357 
358 	} else  if (select == GAMUT_REMAP_COMA_COEFF) {
359 
360 		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
361 		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
362 
363 		cm_helper_program_color_matrices(
364 				dpp->base.ctx,
365 				regval,
366 				&gam_regs);
367 
368 	}
369 	//select coefficient set to use
370 	REG_SET(
371 			CM_GAMUT_REMAP_CONTROL, 0,
372 			CM_GAMUT_REMAP_MODE, selection);
373 }
374 
375 void dpp3_cm_set_gamut_remap(
376 	struct dpp *dpp_base,
377 	const struct dpp_grph_csc_adjustment *adjust)
378 {
379 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
380 	int i = 0;
381 	int gamut_mode;
382 
383 	if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
384 		/* Bypass if type is bypass or hw */
385 		program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS);
386 	else {
387 		struct fixed31_32 arr_matrix[12];
388 		uint16_t arr_reg_val[12];
389 
390 		for (i = 0; i < 12; i++)
391 			arr_matrix[i] = adjust->temperature_matrix[i];
392 
393 		convert_float_matrix(
394 			arr_reg_val, arr_matrix, 12);
395 
396 		//current coefficient set in use
397 		REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &gamut_mode);
398 
399 		if (gamut_mode == 0)
400 			gamut_mode = 1; //use coefficient set A
401 		else if (gamut_mode == 1)
402 			gamut_mode = 2;
403 		else
404 			gamut_mode = 1;
405 
406 		//follow dcn2 approach for now - using only coefficient set A
407 		program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF);
408 	}
409 }
410