1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "dm_services.h"
27 #include "core_types.h"
28 #include "reg_helper.h"
29 #include "dcn30_dpp.h"
30 #include "basics/conversion.h"
31 #include "dcn30_cm_common.h"
32 
33 #define REG(reg)\
34 	dpp->tf_regs->reg
35 
36 #define CTX \
37 	dpp->base.ctx
38 
39 #undef FN
40 #define FN(reg_name, field_name) \
41 	dpp->tf_shift->field_name, dpp->tf_mask->field_name
42 
43 static void dpp3_enable_cm_block(
44 		struct dpp *dpp_base)
45 {
46 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
47 
48 	unsigned int cm_bypass_mode = 0;
49 
50 	// debug option: put CM in bypass mode
51 	if (dpp_base->ctx->dc->debug.cm_in_bypass)
52 		cm_bypass_mode = 1;
53 
54 	REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode);
55 }
56 
57 static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base)
58 {
59 	enum dc_lut_mode mode;
60 	uint32_t state_mode;
61 	uint32_t lut_mode;
62 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
63 
64 	REG_GET(CM_GAMCOR_CONTROL,
65 			CM_GAMCOR_MODE_CURRENT, &state_mode);
66 
67 		if (state_mode == 0)
68 			mode = LUT_BYPASS;
69 
70 		if (state_mode == 2) {//Programmable RAM LUT
71 			REG_GET(CM_GAMCOR_CONTROL,
72 					CM_GAMCOR_SELECT_CURRENT, &lut_mode);
73 
74 			if (lut_mode == 0)
75 				mode = LUT_RAM_A;
76 			else
77 				mode = LUT_RAM_B;
78 		}
79 
80 		return mode;
81 }
82 
83 static void dpp3_program_gammcor_lut(
84 		struct dpp *dpp_base,
85 		const struct pwl_result_data *rgb,
86 		uint32_t num,
87 		bool is_ram_a)
88 {
89 	uint32_t i;
90 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
91 	uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
92 	uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg;
93 	uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg;
94 
95 	/*fill in the LUT with all base values to be used by pwl module
96 	 * HW auto increments the LUT index: back-to-back write
97 	 */
98 	if (is_rgb_equal(rgb,  num)) {
99 		for (i = 0 ; i < num; i++)
100 			REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg);
101 
102 		REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red);
103 
104 	} else {
105 		REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
106 				CM_GAMCOR_LUT_WRITE_COLOR_MASK, 4);
107 		for (i = 0 ; i < num; i++)
108 			REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg);
109 
110 		REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red);
111 
112 		REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
113 
114 		REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
115 				CM_GAMCOR_LUT_WRITE_COLOR_MASK, 2);
116 		for (i = 0 ; i < num; i++)
117 			REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].green_reg);
118 
119 		REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_green);
120 
121 		REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
122 
123 		REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
124 				CM_GAMCOR_LUT_WRITE_COLOR_MASK, 1);
125 		for (i = 0 ; i < num; i++)
126 			REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].blue_reg);
127 
128 		REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_blue);
129 	}
130 }
131 
132 static void dpp3_power_on_gamcor_lut(
133 		struct dpp *dpp_base,
134 	bool power_on)
135 {
136 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
137 
138 	if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) {
139 		if (power_on) {
140 			REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 0);
141 			REG_WAIT(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, 0, 1, 5);
142 		} else {
143 			dpp_base->ctx->dc->optimized_required = true;
144 			dpp_base->deferred_reg_writes.bits.disable_gamcor = true;
145 		}
146 	} else
147 		REG_SET(CM_MEM_PWR_CTRL, 0,
148 				GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1);
149 }
150 
151 void dpp3_program_cm_dealpha(
152 		struct dpp *dpp_base,
153 	uint32_t enable, uint32_t additive_blending)
154 {
155 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
156 
157 	REG_SET_2(CM_DEALPHA, 0,
158 			CM_DEALPHA_EN, enable,
159 			CM_DEALPHA_ABLND, additive_blending);
160 }
161 
162 void dpp3_program_cm_bias(
163 	struct dpp *dpp_base,
164 	struct CM_bias_params *bias_params)
165 {
166 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
167 
168 	REG_SET(CM_BIAS_CR_R, 0, CM_BIAS_CR_R, bias_params->cm_bias_cr_r);
169 	REG_SET_2(CM_BIAS_Y_G_CB_B, 0,
170 			CM_BIAS_Y_G, bias_params->cm_bias_y_g,
171 			CM_BIAS_CB_B, bias_params->cm_bias_cb_b);
172 }
173 
174 static void dpp3_gamcor_reg_field(
175 		struct dcn3_dpp *dpp,
176 		struct dcn3_xfer_func_reg *reg)
177 {
178 
179 	reg->shifts.field_region_start_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B;
180 	reg->masks.field_region_start_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B;
181 	reg->shifts.field_offset = dpp->tf_shift->CM_GAMCOR_RAMA_OFFSET_B;
182 	reg->masks.field_offset = dpp->tf_mask->CM_GAMCOR_RAMA_OFFSET_B;
183 
184 	reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET;
185 	reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET;
186 	reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS;
187 	reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS;
188 	reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET;
189 	reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET;
190 	reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;
191 	reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;
192 
193 	reg->shifts.field_region_end = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_B;
194 	reg->masks.field_region_end = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_B;
195 	reg->shifts.field_region_end_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B;
196 	reg->masks.field_region_end_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B;
197 	reg->shifts.field_region_end_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B;
198 	reg->masks.field_region_end_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B;
199 	reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B;
200 	reg->masks.field_region_linear_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B;
201 	reg->shifts.exp_region_start = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_B;
202 	reg->masks.exp_region_start = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_B;
203 	reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B;
204 	reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B;
205 }
206 
207 static void dpp3_configure_gamcor_lut(
208 		struct dpp *dpp_base,
209 		bool is_ram_a)
210 {
211 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
212 
213 	REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
214 			CM_GAMCOR_LUT_WRITE_COLOR_MASK, 7);
215 	REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
216 			CM_GAMCOR_LUT_HOST_SEL, is_ram_a == true ? 0:1);
217 	REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
218 }
219 
220 
221 bool dpp3_program_gamcor_lut(
222 	struct dpp *dpp_base, const struct pwl_params *params)
223 {
224 	enum dc_lut_mode current_mode;
225 	enum dc_lut_mode next_mode;
226 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
227 	struct dcn3_xfer_func_reg gam_regs;
228 
229 	dpp3_enable_cm_block(dpp_base);
230 
231 	if (params == NULL) { //bypass if we have no pwl data
232 		REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 0);
233 		if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm)
234 			dpp3_power_on_gamcor_lut(dpp_base, false);
235 		return false;
236 	}
237 	dpp3_power_on_gamcor_lut(dpp_base, true);
238 	REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 2);
239 
240 	current_mode = dpp30_get_gamcor_current(dpp_base);
241 	if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
242 		next_mode = LUT_RAM_B;
243 	else
244 		next_mode = LUT_RAM_A;
245 
246 	dpp3_power_on_gamcor_lut(dpp_base, true);
247 	dpp3_configure_gamcor_lut(dpp_base, next_mode == LUT_RAM_A);
248 
249 	if (next_mode == LUT_RAM_B) {
250 		gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMB_START_CNTL_B);
251 		gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMB_START_CNTL_G);
252 		gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMB_START_CNTL_R);
253 		gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B);
254 		gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G);
255 		gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R);
256 		gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMB_END_CNTL1_B);
257 		gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMB_END_CNTL2_B);
258 		gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMB_END_CNTL1_G);
259 		gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMB_END_CNTL2_G);
260 		gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMB_END_CNTL1_R);
261 		gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMB_END_CNTL2_R);
262 		gam_regs.region_start = REG(CM_GAMCOR_RAMB_REGION_0_1);
263 		gam_regs.region_end = REG(CM_GAMCOR_RAMB_REGION_32_33);
264 		//New registers in DCN3AG/DCN GAMCOR block
265 		gam_regs.offset_b =  REG(CM_GAMCOR_RAMB_OFFSET_B);
266 		gam_regs.offset_g =  REG(CM_GAMCOR_RAMB_OFFSET_G);
267 		gam_regs.offset_r =  REG(CM_GAMCOR_RAMB_OFFSET_R);
268 		gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_B);
269 		gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_G);
270 		gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_R);
271 	} else {
272 		gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMA_START_CNTL_B);
273 		gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMA_START_CNTL_G);
274 		gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMA_START_CNTL_R);
275 		gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B);
276 		gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G);
277 		gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R);
278 		gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMA_END_CNTL1_B);
279 		gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMA_END_CNTL2_B);
280 		gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMA_END_CNTL1_G);
281 		gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMA_END_CNTL2_G);
282 		gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMA_END_CNTL1_R);
283 		gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMA_END_CNTL2_R);
284 		gam_regs.region_start = REG(CM_GAMCOR_RAMA_REGION_0_1);
285 		gam_regs.region_end = REG(CM_GAMCOR_RAMA_REGION_32_33);
286 		//New registers in DCN3AG/DCN GAMCOR block
287 		gam_regs.offset_b =  REG(CM_GAMCOR_RAMA_OFFSET_B);
288 		gam_regs.offset_g =  REG(CM_GAMCOR_RAMA_OFFSET_G);
289 		gam_regs.offset_r =  REG(CM_GAMCOR_RAMA_OFFSET_R);
290 		gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_B);
291 		gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_G);
292 		gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_R);
293 	}
294 
295 	//get register fields
296 	dpp3_gamcor_reg_field(dpp, &gam_regs);
297 
298 	//program register set for LUTA/LUTB
299 	cm_helper_program_gamcor_xfer_func(dpp_base->ctx, params, &gam_regs);
300 
301 	dpp3_program_gammcor_lut(dpp_base, params->rgb_resulted, params->hw_points_num,
302 				 next_mode == LUT_RAM_A);
303 
304 	//select Gamma LUT to use for next frame
305 	REG_UPDATE(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, next_mode == LUT_RAM_A ? 0:1);
306 
307 	return true;
308 }
309 
310 void dpp3_set_hdr_multiplier(
311 		struct dpp *dpp_base,
312 		uint32_t multiplier)
313 {
314 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
315 
316 	REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier);
317 }
318 
319 
320 static void program_gamut_remap(
321 		struct dcn3_dpp *dpp,
322 		const uint16_t *regval,
323 		int select)
324 {
325 	uint16_t selection = 0;
326 	struct color_matrices_reg gam_regs;
327 
328 	if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
329 		REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
330 				CM_GAMUT_REMAP_MODE, 0);
331 		return;
332 	}
333 	switch (select) {
334 	case GAMUT_REMAP_COEFF:
335 		selection = 1;
336 		break;
337 		/*this corresponds to GAMUT_REMAP coefficients set B
338 		 *we don't have common coefficient sets in dcn3ag/dcn3
339 		 */
340 	case GAMUT_REMAP_COMA_COEFF:
341 		selection = 2;
342 		break;
343 	default:
344 		break;
345 	}
346 
347 	gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
348 	gam_regs.masks.csc_c11  = dpp->tf_mask->CM_GAMUT_REMAP_C11;
349 	gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
350 	gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
351 
352 
353 	if (select == GAMUT_REMAP_COEFF) {
354 		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
355 		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
356 
357 		cm_helper_program_color_matrices(
358 				dpp->base.ctx,
359 				regval,
360 				&gam_regs);
361 
362 	} else  if (select == GAMUT_REMAP_COMA_COEFF) {
363 
364 		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
365 		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
366 
367 		cm_helper_program_color_matrices(
368 				dpp->base.ctx,
369 				regval,
370 				&gam_regs);
371 
372 	}
373 	//select coefficient set to use
374 	REG_SET(
375 			CM_GAMUT_REMAP_CONTROL, 0,
376 			CM_GAMUT_REMAP_MODE, selection);
377 }
378 
379 void dpp3_cm_set_gamut_remap(
380 	struct dpp *dpp_base,
381 	const struct dpp_grph_csc_adjustment *adjust)
382 {
383 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
384 	int i = 0;
385 	int gamut_mode;
386 
387 	if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
388 		/* Bypass if type is bypass or hw */
389 		program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS);
390 	else {
391 		struct fixed31_32 arr_matrix[12];
392 		uint16_t arr_reg_val[12];
393 
394 		for (i = 0; i < 12; i++)
395 			arr_matrix[i] = adjust->temperature_matrix[i];
396 
397 		convert_float_matrix(
398 			arr_reg_val, arr_matrix, 12);
399 
400 		//current coefficient set in use
401 		REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &gamut_mode);
402 
403 		if (gamut_mode == 0)
404 			gamut_mode = 1; //use coefficient set A
405 		else if (gamut_mode == 1)
406 			gamut_mode = 2;
407 		else
408 			gamut_mode = 1;
409 
410 		//follow dcn2 approach for now - using only coefficient set A
411 		program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF);
412 	}
413 }
414